qemu/target/i386/kvm.c
<<
>>
Prefs
   1/*
   2 * QEMU KVM support
   3 *
   4 * Copyright (C) 2006-2008 Qumranet Technologies
   5 * Copyright IBM, Corp. 2008
   6 *
   7 * Authors:
   8 *  Anthony Liguori   <aliguori@us.ibm.com>
   9 *
  10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
  11 * See the COPYING file in the top-level directory.
  12 *
  13 */
  14
  15#include "qemu/osdep.h"
  16#include "qapi/error.h"
  17#include <sys/ioctl.h>
  18#include <sys/utsname.h>
  19
  20#include <linux/kvm.h>
  21#include "standard-headers/asm-x86/kvm_para.h"
  22
  23#include "cpu.h"
  24#include "sysemu/sysemu.h"
  25#include "sysemu/hw_accel.h"
  26#include "sysemu/kvm_int.h"
  27#include "sysemu/reset.h"
  28#include "sysemu/runstate.h"
  29#include "kvm_i386.h"
  30#include "hyperv.h"
  31#include "hyperv-proto.h"
  32
  33#include "exec/gdbstub.h"
  34#include "qemu/host-utils.h"
  35#include "qemu/main-loop.h"
  36#include "qemu/config-file.h"
  37#include "qemu/error-report.h"
  38#include "hw/i386/x86.h"
  39#include "hw/i386/apic.h"
  40#include "hw/i386/apic_internal.h"
  41#include "hw/i386/apic-msidef.h"
  42#include "hw/i386/intel_iommu.h"
  43#include "hw/i386/x86-iommu.h"
  44#include "hw/i386/e820_memory_layout.h"
  45
  46#include "hw/pci/pci.h"
  47#include "hw/pci/msi.h"
  48#include "hw/pci/msix.h"
  49#include "migration/blocker.h"
  50#include "exec/memattrs.h"
  51#include "trace.h"
  52
  53//#define DEBUG_KVM
  54
  55#ifdef DEBUG_KVM
  56#define DPRINTF(fmt, ...) \
  57    do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
  58#else
  59#define DPRINTF(fmt, ...) \
  60    do { } while (0)
  61#endif
  62
  63#define MSR_KVM_WALL_CLOCK  0x11
  64#define MSR_KVM_SYSTEM_TIME 0x12
  65
  66/* A 4096-byte buffer can hold the 8-byte kvm_msrs header, plus
  67 * 255 kvm_msr_entry structs */
  68#define MSR_BUF_SIZE 4096
  69
  70static void kvm_init_msrs(X86CPU *cpu);
  71
  72const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
  73    KVM_CAP_INFO(SET_TSS_ADDR),
  74    KVM_CAP_INFO(EXT_CPUID),
  75    KVM_CAP_INFO(MP_STATE),
  76    KVM_CAP_LAST_INFO
  77};
  78
  79static bool has_msr_star;
  80static bool has_msr_hsave_pa;
  81static bool has_msr_tsc_aux;
  82static bool has_msr_tsc_adjust;
  83static bool has_msr_tsc_deadline;
  84static bool has_msr_feature_control;
  85static bool has_msr_misc_enable;
  86static bool has_msr_smbase;
  87static bool has_msr_bndcfgs;
  88static int lm_capable_kernel;
  89static bool has_msr_hv_hypercall;
  90static bool has_msr_hv_crash;
  91static bool has_msr_hv_reset;
  92static bool has_msr_hv_vpindex;
  93static bool hv_vpindex_settable;
  94static bool has_msr_hv_runtime;
  95static bool has_msr_hv_synic;
  96static bool has_msr_hv_stimer;
  97static bool has_msr_hv_frequencies;
  98static bool has_msr_hv_reenlightenment;
  99static bool has_msr_xss;
 100static bool has_msr_umwait;
 101static bool has_msr_spec_ctrl;
 102static bool has_msr_tsx_ctrl;
 103static bool has_msr_virt_ssbd;
 104static bool has_msr_smi_count;
 105static bool has_msr_arch_capabs;
 106static bool has_msr_core_capabs;
 107static bool has_msr_vmx_vmfunc;
 108static bool has_msr_ucode_rev;
 109static bool has_msr_vmx_procbased_ctls2;
 110
 111static uint32_t has_architectural_pmu_version;
 112static uint32_t num_architectural_pmu_gp_counters;
 113static uint32_t num_architectural_pmu_fixed_counters;
 114
 115static int has_xsave;
 116static int has_xcrs;
 117static int has_pit_state2;
 118static int has_exception_payload;
 119
 120static bool has_msr_mcg_ext_ctl;
 121
 122static struct kvm_cpuid2 *cpuid_cache;
 123static struct kvm_msr_list *kvm_feature_msrs;
 124
 125int kvm_has_pit_state2(void)
 126{
 127    return has_pit_state2;
 128}
 129
 130bool kvm_has_smm(void)
 131{
 132    return kvm_check_extension(kvm_state, KVM_CAP_X86_SMM);
 133}
 134
 135bool kvm_has_adjust_clock_stable(void)
 136{
 137    int ret = kvm_check_extension(kvm_state, KVM_CAP_ADJUST_CLOCK);
 138
 139    return (ret == KVM_CLOCK_TSC_STABLE);
 140}
 141
 142bool kvm_has_exception_payload(void)
 143{
 144    return has_exception_payload;
 145}
 146
 147bool kvm_allows_irq0_override(void)
 148{
 149    return !kvm_irqchip_in_kernel() || kvm_has_gsi_routing();
 150}
 151
 152static bool kvm_x2apic_api_set_flags(uint64_t flags)
 153{
 154    KVMState *s = KVM_STATE(current_accel());
 155
 156    return !kvm_vm_enable_cap(s, KVM_CAP_X2APIC_API, 0, flags);
 157}
 158
 159#define MEMORIZE(fn, _result) \
 160    ({ \
 161        static bool _memorized; \
 162        \
 163        if (_memorized) { \
 164            return _result; \
 165        } \
 166        _memorized = true; \
 167        _result = fn; \
 168    })
 169
 170static bool has_x2apic_api;
 171
 172bool kvm_has_x2apic_api(void)
 173{
 174    return has_x2apic_api;
 175}
 176
 177bool kvm_enable_x2apic(void)
 178{
 179    return MEMORIZE(
 180             kvm_x2apic_api_set_flags(KVM_X2APIC_API_USE_32BIT_IDS |
 181                                      KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK),
 182             has_x2apic_api);
 183}
 184
 185bool kvm_hv_vpindex_settable(void)
 186{
 187    return hv_vpindex_settable;
 188}
 189
 190static int kvm_get_tsc(CPUState *cs)
 191{
 192    X86CPU *cpu = X86_CPU(cs);
 193    CPUX86State *env = &cpu->env;
 194    struct {
 195        struct kvm_msrs info;
 196        struct kvm_msr_entry entries[1];
 197    } msr_data = {};
 198    int ret;
 199
 200    if (env->tsc_valid) {
 201        return 0;
 202    }
 203
 204    memset(&msr_data, 0, sizeof(msr_data));
 205    msr_data.info.nmsrs = 1;
 206    msr_data.entries[0].index = MSR_IA32_TSC;
 207    env->tsc_valid = !runstate_is_running();
 208
 209    ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
 210    if (ret < 0) {
 211        return ret;
 212    }
 213
 214    assert(ret == 1);
 215    env->tsc = msr_data.entries[0].data;
 216    return 0;
 217}
 218
 219static inline void do_kvm_synchronize_tsc(CPUState *cpu, run_on_cpu_data arg)
 220{
 221    kvm_get_tsc(cpu);
 222}
 223
 224void kvm_synchronize_all_tsc(void)
 225{
 226    CPUState *cpu;
 227
 228    if (kvm_enabled()) {
 229        CPU_FOREACH(cpu) {
 230            run_on_cpu(cpu, do_kvm_synchronize_tsc, RUN_ON_CPU_NULL);
 231        }
 232    }
 233}
 234
 235static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
 236{
 237    struct kvm_cpuid2 *cpuid;
 238    int r, size;
 239
 240    size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
 241    cpuid = g_malloc0(size);
 242    cpuid->nent = max;
 243    r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid);
 244    if (r == 0 && cpuid->nent >= max) {
 245        r = -E2BIG;
 246    }
 247    if (r < 0) {
 248        if (r == -E2BIG) {
 249            g_free(cpuid);
 250            return NULL;
 251        } else {
 252            fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
 253                    strerror(-r));
 254            exit(1);
 255        }
 256    }
 257    return cpuid;
 258}
 259
 260/* Run KVM_GET_SUPPORTED_CPUID ioctl(), allocating a buffer large enough
 261 * for all entries.
 262 */
 263static struct kvm_cpuid2 *get_supported_cpuid(KVMState *s)
 264{
 265    struct kvm_cpuid2 *cpuid;
 266    int max = 1;
 267
 268    if (cpuid_cache != NULL) {
 269        return cpuid_cache;
 270    }
 271    while ((cpuid = try_get_cpuid(s, max)) == NULL) {
 272        max *= 2;
 273    }
 274    cpuid_cache = cpuid;
 275    return cpuid;
 276}
 277
 278static const struct kvm_para_features {
 279    int cap;
 280    int feature;
 281} para_features[] = {
 282    { KVM_CAP_CLOCKSOURCE, KVM_FEATURE_CLOCKSOURCE },
 283    { KVM_CAP_NOP_IO_DELAY, KVM_FEATURE_NOP_IO_DELAY },
 284    { KVM_CAP_PV_MMU, KVM_FEATURE_MMU_OP },
 285    { KVM_CAP_ASYNC_PF, KVM_FEATURE_ASYNC_PF },
 286};
 287
 288static int get_para_features(KVMState *s)
 289{
 290    int i, features = 0;
 291
 292    for (i = 0; i < ARRAY_SIZE(para_features); i++) {
 293        if (kvm_check_extension(s, para_features[i].cap)) {
 294            features |= (1 << para_features[i].feature);
 295        }
 296    }
 297
 298    return features;
 299}
 300
 301static bool host_tsx_blacklisted(void)
 302{
 303    int family, model, stepping;\
 304    char vendor[CPUID_VENDOR_SZ + 1];
 305
 306    host_vendor_fms(vendor, &family, &model, &stepping);
 307
 308    /* Check if we are running on a Haswell host known to have broken TSX */
 309    return !strcmp(vendor, CPUID_VENDOR_INTEL) &&
 310           (family == 6) &&
 311           ((model == 63 && stepping < 4) ||
 312            model == 60 || model == 69 || model == 70);
 313}
 314
 315/* Returns the value for a specific register on the cpuid entry
 316 */
 317static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg)
 318{
 319    uint32_t ret = 0;
 320    switch (reg) {
 321    case R_EAX:
 322        ret = entry->eax;
 323        break;
 324    case R_EBX:
 325        ret = entry->ebx;
 326        break;
 327    case R_ECX:
 328        ret = entry->ecx;
 329        break;
 330    case R_EDX:
 331        ret = entry->edx;
 332        break;
 333    }
 334    return ret;
 335}
 336
 337/* Find matching entry for function/index on kvm_cpuid2 struct
 338 */
 339static struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid,
 340                                                 uint32_t function,
 341                                                 uint32_t index)
 342{
 343    int i;
 344    for (i = 0; i < cpuid->nent; ++i) {
 345        if (cpuid->entries[i].function == function &&
 346            cpuid->entries[i].index == index) {
 347            return &cpuid->entries[i];
 348        }
 349    }
 350    /* not found: */
 351    return NULL;
 352}
 353
 354uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
 355                                      uint32_t index, int reg)
 356{
 357    struct kvm_cpuid2 *cpuid;
 358    uint32_t ret = 0;
 359    uint32_t cpuid_1_edx;
 360    bool found = false;
 361
 362    cpuid = get_supported_cpuid(s);
 363
 364    struct kvm_cpuid_entry2 *entry = cpuid_find_entry(cpuid, function, index);
 365    if (entry) {
 366        found = true;
 367        ret = cpuid_entry_get_reg(entry, reg);
 368    }
 369
 370    /* Fixups for the data returned by KVM, below */
 371
 372    if (function == 1 && reg == R_EDX) {
 373        /* KVM before 2.6.30 misreports the following features */
 374        ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA;
 375    } else if (function == 1 && reg == R_ECX) {
 376        /* We can set the hypervisor flag, even if KVM does not return it on
 377         * GET_SUPPORTED_CPUID
 378         */
 379        ret |= CPUID_EXT_HYPERVISOR;
 380        /* tsc-deadline flag is not returned by GET_SUPPORTED_CPUID, but it
 381         * can be enabled if the kernel has KVM_CAP_TSC_DEADLINE_TIMER,
 382         * and the irqchip is in the kernel.
 383         */
 384        if (kvm_irqchip_in_kernel() &&
 385                kvm_check_extension(s, KVM_CAP_TSC_DEADLINE_TIMER)) {
 386            ret |= CPUID_EXT_TSC_DEADLINE_TIMER;
 387        }
 388
 389        /* x2apic is reported by GET_SUPPORTED_CPUID, but it can't be enabled
 390         * without the in-kernel irqchip
 391         */
 392        if (!kvm_irqchip_in_kernel()) {
 393            ret &= ~CPUID_EXT_X2APIC;
 394        }
 395
 396        if (enable_cpu_pm) {
 397            int disable_exits = kvm_check_extension(s,
 398                                                    KVM_CAP_X86_DISABLE_EXITS);
 399
 400            if (disable_exits & KVM_X86_DISABLE_EXITS_MWAIT) {
 401                ret |= CPUID_EXT_MONITOR;
 402            }
 403        }
 404    } else if (function == 6 && reg == R_EAX) {
 405        ret |= CPUID_6_EAX_ARAT; /* safe to allow because of emulated APIC */
 406    } else if (function == 7 && index == 0 && reg == R_EBX) {
 407        if (host_tsx_blacklisted()) {
 408            ret &= ~(CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_HLE);
 409        }
 410    } else if (function == 7 && index == 0 && reg == R_ECX) {
 411        if (enable_cpu_pm) {
 412            ret |= CPUID_7_0_ECX_WAITPKG;
 413        } else {
 414            ret &= ~CPUID_7_0_ECX_WAITPKG;
 415        }
 416    } else if (function == 7 && index == 0 && reg == R_EDX) {
 417        /*
 418         * Linux v4.17-v4.20 incorrectly return ARCH_CAPABILITIES on SVM hosts.
 419         * We can detect the bug by checking if MSR_IA32_ARCH_CAPABILITIES is
 420         * returned by KVM_GET_MSR_INDEX_LIST.
 421         */
 422        if (!has_msr_arch_capabs) {
 423            ret &= ~CPUID_7_0_EDX_ARCH_CAPABILITIES;
 424        }
 425    } else if (function == 0x80000001 && reg == R_ECX) {
 426        /*
 427         * It's safe to enable TOPOEXT even if it's not returned by
 428         * GET_SUPPORTED_CPUID.  Unconditionally enabling TOPOEXT here allows
 429         * us to keep CPU models including TOPOEXT runnable on older kernels.
 430         */
 431        ret |= CPUID_EXT3_TOPOEXT;
 432    } else if (function == 0x80000001 && reg == R_EDX) {
 433        /* On Intel, kvm returns cpuid according to the Intel spec,
 434         * so add missing bits according to the AMD spec:
 435         */
 436        cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
 437        ret |= cpuid_1_edx & CPUID_EXT2_AMD_ALIASES;
 438    } else if (function == KVM_CPUID_FEATURES && reg == R_EAX) {
 439        /* kvm_pv_unhalt is reported by GET_SUPPORTED_CPUID, but it can't
 440         * be enabled without the in-kernel irqchip
 441         */
 442        if (!kvm_irqchip_in_kernel()) {
 443            ret &= ~(1U << KVM_FEATURE_PV_UNHALT);
 444        }
 445    } else if (function == KVM_CPUID_FEATURES && reg == R_EDX) {
 446        ret |= 1U << KVM_HINTS_REALTIME;
 447        found = 1;
 448    }
 449
 450    /* fallback for older kernels */
 451    if ((function == KVM_CPUID_FEATURES) && !found) {
 452        ret = get_para_features(s);
 453    }
 454
 455    return ret;
 456}
 457
 458uint64_t kvm_arch_get_supported_msr_feature(KVMState *s, uint32_t index)
 459{
 460    struct {
 461        struct kvm_msrs info;
 462        struct kvm_msr_entry entries[1];
 463    } msr_data = {};
 464    uint64_t value;
 465    uint32_t ret, can_be_one, must_be_one;
 466
 467    if (kvm_feature_msrs == NULL) { /* Host doesn't support feature MSRs */
 468        return 0;
 469    }
 470
 471    /* Check if requested MSR is supported feature MSR */
 472    int i;
 473    for (i = 0; i < kvm_feature_msrs->nmsrs; i++)
 474        if (kvm_feature_msrs->indices[i] == index) {
 475            break;
 476        }
 477    if (i == kvm_feature_msrs->nmsrs) {
 478        return 0; /* if the feature MSR is not supported, simply return 0 */
 479    }
 480
 481    msr_data.info.nmsrs = 1;
 482    msr_data.entries[0].index = index;
 483
 484    ret = kvm_ioctl(s, KVM_GET_MSRS, &msr_data);
 485    if (ret != 1) {
 486        error_report("KVM get MSR (index=0x%x) feature failed, %s",
 487            index, strerror(-ret));
 488        exit(1);
 489    }
 490
 491    value = msr_data.entries[0].data;
 492    switch (index) {
 493    case MSR_IA32_VMX_PROCBASED_CTLS2:
 494        if (!has_msr_vmx_procbased_ctls2) {
 495            /* KVM forgot to add these bits for some time, do this ourselves. */
 496            if (kvm_arch_get_supported_cpuid(s, 0xD, 1, R_ECX) &
 497                CPUID_XSAVE_XSAVES) {
 498                value |= (uint64_t)VMX_SECONDARY_EXEC_XSAVES << 32;
 499            }
 500            if (kvm_arch_get_supported_cpuid(s, 1, 0, R_ECX) &
 501                CPUID_EXT_RDRAND) {
 502                value |= (uint64_t)VMX_SECONDARY_EXEC_RDRAND_EXITING << 32;
 503            }
 504            if (kvm_arch_get_supported_cpuid(s, 7, 0, R_EBX) &
 505                CPUID_7_0_EBX_INVPCID) {
 506                value |= (uint64_t)VMX_SECONDARY_EXEC_ENABLE_INVPCID << 32;
 507            }
 508            if (kvm_arch_get_supported_cpuid(s, 7, 0, R_EBX) &
 509                CPUID_7_0_EBX_RDSEED) {
 510                value |= (uint64_t)VMX_SECONDARY_EXEC_RDSEED_EXITING << 32;
 511            }
 512            if (kvm_arch_get_supported_cpuid(s, 0x80000001, 0, R_EDX) &
 513                CPUID_EXT2_RDTSCP) {
 514                value |= (uint64_t)VMX_SECONDARY_EXEC_RDTSCP << 32;
 515            }
 516        }
 517        /* fall through */
 518    case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
 519    case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
 520    case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
 521    case MSR_IA32_VMX_TRUE_EXIT_CTLS:
 522        /*
 523         * Return true for bits that can be one, but do not have to be one.
 524         * The SDM tells us which bits could have a "must be one" setting,
 525         * so we can do the opposite transformation in make_vmx_msr_value.
 526         */
 527        must_be_one = (uint32_t)value;
 528        can_be_one = (uint32_t)(value >> 32);
 529        return can_be_one & ~must_be_one;
 530
 531    default:
 532        return value;
 533    }
 534}
 535
 536
 537typedef struct HWPoisonPage {
 538    ram_addr_t ram_addr;
 539    QLIST_ENTRY(HWPoisonPage) list;
 540} HWPoisonPage;
 541
 542static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list =
 543    QLIST_HEAD_INITIALIZER(hwpoison_page_list);
 544
 545static void kvm_unpoison_all(void *param)
 546{
 547    HWPoisonPage *page, *next_page;
 548
 549    QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) {
 550        QLIST_REMOVE(page, list);
 551        qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE);
 552        g_free(page);
 553    }
 554}
 555
 556static void kvm_hwpoison_page_add(ram_addr_t ram_addr)
 557{
 558    HWPoisonPage *page;
 559
 560    QLIST_FOREACH(page, &hwpoison_page_list, list) {
 561        if (page->ram_addr == ram_addr) {
 562            return;
 563        }
 564    }
 565    page = g_new(HWPoisonPage, 1);
 566    page->ram_addr = ram_addr;
 567    QLIST_INSERT_HEAD(&hwpoison_page_list, page, list);
 568}
 569
 570static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap,
 571                                     int *max_banks)
 572{
 573    int r;
 574
 575    r = kvm_check_extension(s, KVM_CAP_MCE);
 576    if (r > 0) {
 577        *max_banks = r;
 578        return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap);
 579    }
 580    return -ENOSYS;
 581}
 582
 583static void kvm_mce_inject(X86CPU *cpu, hwaddr paddr, int code)
 584{
 585    CPUState *cs = CPU(cpu);
 586    CPUX86State *env = &cpu->env;
 587    uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
 588                      MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S;
 589    uint64_t mcg_status = MCG_STATUS_MCIP;
 590    int flags = 0;
 591
 592    if (code == BUS_MCEERR_AR) {
 593        status |= MCI_STATUS_AR | 0x134;
 594        mcg_status |= MCG_STATUS_EIPV;
 595    } else {
 596        status |= 0xc0;
 597        mcg_status |= MCG_STATUS_RIPV;
 598    }
 599
 600    flags = cpu_x86_support_mca_broadcast(env) ? MCE_INJECT_BROADCAST : 0;
 601    /* We need to read back the value of MSR_EXT_MCG_CTL that was set by the
 602     * guest kernel back into env->mcg_ext_ctl.
 603     */
 604    cpu_synchronize_state(cs);
 605    if (env->mcg_ext_ctl & MCG_EXT_CTL_LMCE_EN) {
 606        mcg_status |= MCG_STATUS_LMCE;
 607        flags = 0;
 608    }
 609
 610    cpu_x86_inject_mce(NULL, cpu, 9, status, mcg_status, paddr,
 611                       (MCM_ADDR_PHYS << 6) | 0xc, flags);
 612}
 613
 614static void hardware_memory_error(void *host_addr)
 615{
 616    error_report("QEMU got Hardware memory error at addr %p", host_addr);
 617    exit(1);
 618}
 619
 620void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
 621{
 622    X86CPU *cpu = X86_CPU(c);
 623    CPUX86State *env = &cpu->env;
 624    ram_addr_t ram_addr;
 625    hwaddr paddr;
 626
 627    /* If we get an action required MCE, it has been injected by KVM
 628     * while the VM was running.  An action optional MCE instead should
 629     * be coming from the main thread, which qemu_init_sigbus identifies
 630     * as the "early kill" thread.
 631     */
 632    assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO);
 633
 634    if ((env->mcg_cap & MCG_SER_P) && addr) {
 635        ram_addr = qemu_ram_addr_from_host(addr);
 636        if (ram_addr != RAM_ADDR_INVALID &&
 637            kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) {
 638            kvm_hwpoison_page_add(ram_addr);
 639            kvm_mce_inject(cpu, paddr, code);
 640
 641            /*
 642             * Use different logging severity based on error type.
 643             * If there is additional MCE reporting on the hypervisor, QEMU VA
 644             * could be another source to identify the PA and MCE details.
 645             */
 646            if (code == BUS_MCEERR_AR) {
 647                error_report("Guest MCE Memory Error at QEMU addr %p and "
 648                    "GUEST addr 0x%" HWADDR_PRIx " of type %s injected",
 649                    addr, paddr, "BUS_MCEERR_AR");
 650            } else {
 651                 warn_report("Guest MCE Memory Error at QEMU addr %p and "
 652                     "GUEST addr 0x%" HWADDR_PRIx " of type %s injected",
 653                     addr, paddr, "BUS_MCEERR_AO");
 654            }
 655
 656            return;
 657        }
 658
 659        if (code == BUS_MCEERR_AO) {
 660            warn_report("Hardware memory error at addr %p of type %s "
 661                "for memory used by QEMU itself instead of guest system!",
 662                 addr, "BUS_MCEERR_AO");
 663        }
 664    }
 665
 666    if (code == BUS_MCEERR_AR) {
 667        hardware_memory_error(addr);
 668    }
 669
 670    /* Hope we are lucky for AO MCE */
 671}
 672
 673static void kvm_reset_exception(CPUX86State *env)
 674{
 675    env->exception_nr = -1;
 676    env->exception_pending = 0;
 677    env->exception_injected = 0;
 678    env->exception_has_payload = false;
 679    env->exception_payload = 0;
 680}
 681
 682static void kvm_queue_exception(CPUX86State *env,
 683                                int32_t exception_nr,
 684                                uint8_t exception_has_payload,
 685                                uint64_t exception_payload)
 686{
 687    assert(env->exception_nr == -1);
 688    assert(!env->exception_pending);
 689    assert(!env->exception_injected);
 690    assert(!env->exception_has_payload);
 691
 692    env->exception_nr = exception_nr;
 693
 694    if (has_exception_payload) {
 695        env->exception_pending = 1;
 696
 697        env->exception_has_payload = exception_has_payload;
 698        env->exception_payload = exception_payload;
 699    } else {
 700        env->exception_injected = 1;
 701
 702        if (exception_nr == EXCP01_DB) {
 703            assert(exception_has_payload);
 704            env->dr[6] = exception_payload;
 705        } else if (exception_nr == EXCP0E_PAGE) {
 706            assert(exception_has_payload);
 707            env->cr[2] = exception_payload;
 708        } else {
 709            assert(!exception_has_payload);
 710        }
 711    }
 712}
 713
 714static int kvm_inject_mce_oldstyle(X86CPU *cpu)
 715{
 716    CPUX86State *env = &cpu->env;
 717
 718    if (!kvm_has_vcpu_events() && env->exception_nr == EXCP12_MCHK) {
 719        unsigned int bank, bank_num = env->mcg_cap & 0xff;
 720        struct kvm_x86_mce mce;
 721
 722        kvm_reset_exception(env);
 723
 724        /*
 725         * There must be at least one bank in use if an MCE is pending.
 726         * Find it and use its values for the event injection.
 727         */
 728        for (bank = 0; bank < bank_num; bank++) {
 729            if (env->mce_banks[bank * 4 + 1] & MCI_STATUS_VAL) {
 730                break;
 731            }
 732        }
 733        assert(bank < bank_num);
 734
 735        mce.bank = bank;
 736        mce.status = env->mce_banks[bank * 4 + 1];
 737        mce.mcg_status = env->mcg_status;
 738        mce.addr = env->mce_banks[bank * 4 + 2];
 739        mce.misc = env->mce_banks[bank * 4 + 3];
 740
 741        return kvm_vcpu_ioctl(CPU(cpu), KVM_X86_SET_MCE, &mce);
 742    }
 743    return 0;
 744}
 745
 746static void cpu_update_state(void *opaque, int running, RunState state)
 747{
 748    CPUX86State *env = opaque;
 749
 750    if (running) {
 751        env->tsc_valid = false;
 752    }
 753}
 754
 755unsigned long kvm_arch_vcpu_id(CPUState *cs)
 756{
 757    X86CPU *cpu = X86_CPU(cs);
 758    return cpu->apic_id;
 759}
 760
 761#ifndef KVM_CPUID_SIGNATURE_NEXT
 762#define KVM_CPUID_SIGNATURE_NEXT                0x40000100
 763#endif
 764
 765static bool hyperv_enabled(X86CPU *cpu)
 766{
 767    CPUState *cs = CPU(cpu);
 768    return kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0 &&
 769        ((cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_RETRY) ||
 770         cpu->hyperv_features || cpu->hyperv_passthrough);
 771}
 772
 773static int kvm_arch_set_tsc_khz(CPUState *cs)
 774{
 775    X86CPU *cpu = X86_CPU(cs);
 776    CPUX86State *env = &cpu->env;
 777    int r;
 778
 779    if (!env->tsc_khz) {
 780        return 0;
 781    }
 782
 783    r = kvm_check_extension(cs->kvm_state, KVM_CAP_TSC_CONTROL) ?
 784        kvm_vcpu_ioctl(cs, KVM_SET_TSC_KHZ, env->tsc_khz) :
 785        -ENOTSUP;
 786    if (r < 0) {
 787        /* When KVM_SET_TSC_KHZ fails, it's an error only if the current
 788         * TSC frequency doesn't match the one we want.
 789         */
 790        int cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
 791                       kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
 792                       -ENOTSUP;
 793        if (cur_freq <= 0 || cur_freq != env->tsc_khz) {
 794            warn_report("TSC frequency mismatch between "
 795                        "VM (%" PRId64 " kHz) and host (%d kHz), "
 796                        "and TSC scaling unavailable",
 797                        env->tsc_khz, cur_freq);
 798            return r;
 799        }
 800    }
 801
 802    return 0;
 803}
 804
 805static bool tsc_is_stable_and_known(CPUX86State *env)
 806{
 807    if (!env->tsc_khz) {
 808        return false;
 809    }
 810    return (env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC)
 811        || env->user_tsc_khz;
 812}
 813
 814static struct {
 815    const char *desc;
 816    struct {
 817        uint32_t fw;
 818        uint32_t bits;
 819    } flags[2];
 820    uint64_t dependencies;
 821} kvm_hyperv_properties[] = {
 822    [HYPERV_FEAT_RELAXED] = {
 823        .desc = "relaxed timing (hv-relaxed)",
 824        .flags = {
 825            {.fw = FEAT_HYPERV_EAX,
 826             .bits = HV_HYPERCALL_AVAILABLE},
 827            {.fw = FEAT_HV_RECOMM_EAX,
 828             .bits = HV_RELAXED_TIMING_RECOMMENDED}
 829        }
 830    },
 831    [HYPERV_FEAT_VAPIC] = {
 832        .desc = "virtual APIC (hv-vapic)",
 833        .flags = {
 834            {.fw = FEAT_HYPERV_EAX,
 835             .bits = HV_HYPERCALL_AVAILABLE | HV_APIC_ACCESS_AVAILABLE},
 836            {.fw = FEAT_HV_RECOMM_EAX,
 837             .bits = HV_APIC_ACCESS_RECOMMENDED}
 838        }
 839    },
 840    [HYPERV_FEAT_TIME] = {
 841        .desc = "clocksources (hv-time)",
 842        .flags = {
 843            {.fw = FEAT_HYPERV_EAX,
 844             .bits = HV_HYPERCALL_AVAILABLE | HV_TIME_REF_COUNT_AVAILABLE |
 845             HV_REFERENCE_TSC_AVAILABLE}
 846        }
 847    },
 848    [HYPERV_FEAT_CRASH] = {
 849        .desc = "crash MSRs (hv-crash)",
 850        .flags = {
 851            {.fw = FEAT_HYPERV_EDX,
 852             .bits = HV_GUEST_CRASH_MSR_AVAILABLE}
 853        }
 854    },
 855    [HYPERV_FEAT_RESET] = {
 856        .desc = "reset MSR (hv-reset)",
 857        .flags = {
 858            {.fw = FEAT_HYPERV_EAX,
 859             .bits = HV_RESET_AVAILABLE}
 860        }
 861    },
 862    [HYPERV_FEAT_VPINDEX] = {
 863        .desc = "VP_INDEX MSR (hv-vpindex)",
 864        .flags = {
 865            {.fw = FEAT_HYPERV_EAX,
 866             .bits = HV_VP_INDEX_AVAILABLE}
 867        }
 868    },
 869    [HYPERV_FEAT_RUNTIME] = {
 870        .desc = "VP_RUNTIME MSR (hv-runtime)",
 871        .flags = {
 872            {.fw = FEAT_HYPERV_EAX,
 873             .bits = HV_VP_RUNTIME_AVAILABLE}
 874        }
 875    },
 876    [HYPERV_FEAT_SYNIC] = {
 877        .desc = "synthetic interrupt controller (hv-synic)",
 878        .flags = {
 879            {.fw = FEAT_HYPERV_EAX,
 880             .bits = HV_SYNIC_AVAILABLE}
 881        }
 882    },
 883    [HYPERV_FEAT_STIMER] = {
 884        .desc = "synthetic timers (hv-stimer)",
 885        .flags = {
 886            {.fw = FEAT_HYPERV_EAX,
 887             .bits = HV_SYNTIMERS_AVAILABLE}
 888        },
 889        .dependencies = BIT(HYPERV_FEAT_SYNIC) | BIT(HYPERV_FEAT_TIME)
 890    },
 891    [HYPERV_FEAT_FREQUENCIES] = {
 892        .desc = "frequency MSRs (hv-frequencies)",
 893        .flags = {
 894            {.fw = FEAT_HYPERV_EAX,
 895             .bits = HV_ACCESS_FREQUENCY_MSRS},
 896            {.fw = FEAT_HYPERV_EDX,
 897             .bits = HV_FREQUENCY_MSRS_AVAILABLE}
 898        }
 899    },
 900    [HYPERV_FEAT_REENLIGHTENMENT] = {
 901        .desc = "reenlightenment MSRs (hv-reenlightenment)",
 902        .flags = {
 903            {.fw = FEAT_HYPERV_EAX,
 904             .bits = HV_ACCESS_REENLIGHTENMENTS_CONTROL}
 905        }
 906    },
 907    [HYPERV_FEAT_TLBFLUSH] = {
 908        .desc = "paravirtualized TLB flush (hv-tlbflush)",
 909        .flags = {
 910            {.fw = FEAT_HV_RECOMM_EAX,
 911             .bits = HV_REMOTE_TLB_FLUSH_RECOMMENDED |
 912             HV_EX_PROCESSOR_MASKS_RECOMMENDED}
 913        },
 914        .dependencies = BIT(HYPERV_FEAT_VPINDEX)
 915    },
 916    [HYPERV_FEAT_EVMCS] = {
 917        .desc = "enlightened VMCS (hv-evmcs)",
 918        .flags = {
 919            {.fw = FEAT_HV_RECOMM_EAX,
 920             .bits = HV_ENLIGHTENED_VMCS_RECOMMENDED}
 921        },
 922        .dependencies = BIT(HYPERV_FEAT_VAPIC)
 923    },
 924    [HYPERV_FEAT_IPI] = {
 925        .desc = "paravirtualized IPI (hv-ipi)",
 926        .flags = {
 927            {.fw = FEAT_HV_RECOMM_EAX,
 928             .bits = HV_CLUSTER_IPI_RECOMMENDED |
 929             HV_EX_PROCESSOR_MASKS_RECOMMENDED}
 930        },
 931        .dependencies = BIT(HYPERV_FEAT_VPINDEX)
 932    },
 933    [HYPERV_FEAT_STIMER_DIRECT] = {
 934        .desc = "direct mode synthetic timers (hv-stimer-direct)",
 935        .flags = {
 936            {.fw = FEAT_HYPERV_EDX,
 937             .bits = HV_STIMER_DIRECT_MODE_AVAILABLE}
 938        },
 939        .dependencies = BIT(HYPERV_FEAT_STIMER)
 940    },
 941};
 942
 943static struct kvm_cpuid2 *try_get_hv_cpuid(CPUState *cs, int max)
 944{
 945    struct kvm_cpuid2 *cpuid;
 946    int r, size;
 947
 948    size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
 949    cpuid = g_malloc0(size);
 950    cpuid->nent = max;
 951
 952    r = kvm_vcpu_ioctl(cs, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
 953    if (r == 0 && cpuid->nent >= max) {
 954        r = -E2BIG;
 955    }
 956    if (r < 0) {
 957        if (r == -E2BIG) {
 958            g_free(cpuid);
 959            return NULL;
 960        } else {
 961            fprintf(stderr, "KVM_GET_SUPPORTED_HV_CPUID failed: %s\n",
 962                    strerror(-r));
 963            exit(1);
 964        }
 965    }
 966    return cpuid;
 967}
 968
 969/*
 970 * Run KVM_GET_SUPPORTED_HV_CPUID ioctl(), allocating a buffer large enough
 971 * for all entries.
 972 */
 973static struct kvm_cpuid2 *get_supported_hv_cpuid(CPUState *cs)
 974{
 975    struct kvm_cpuid2 *cpuid;
 976    int max = 7; /* 0x40000000..0x40000005, 0x4000000A */
 977
 978    /*
 979     * When the buffer is too small, KVM_GET_SUPPORTED_HV_CPUID fails with
 980     * -E2BIG, however, it doesn't report back the right size. Keep increasing
 981     * it and re-trying until we succeed.
 982     */
 983    while ((cpuid = try_get_hv_cpuid(cs, max)) == NULL) {
 984        max++;
 985    }
 986    return cpuid;
 987}
 988
 989/*
 990 * When KVM_GET_SUPPORTED_HV_CPUID is not supported we fill CPUID feature
 991 * leaves from KVM_CAP_HYPERV* and present MSRs data.
 992 */
 993static struct kvm_cpuid2 *get_supported_hv_cpuid_legacy(CPUState *cs)
 994{
 995    X86CPU *cpu = X86_CPU(cs);
 996    struct kvm_cpuid2 *cpuid;
 997    struct kvm_cpuid_entry2 *entry_feat, *entry_recomm;
 998
 999    /* HV_CPUID_FEATURES, HV_CPUID_ENLIGHTMENT_INFO */
1000    cpuid = g_malloc0(sizeof(*cpuid) + 2 * sizeof(*cpuid->entries));
1001    cpuid->nent = 2;
1002
1003    /* HV_CPUID_VENDOR_AND_MAX_FUNCTIONS */
1004    entry_feat = &cpuid->entries[0];
1005    entry_feat->function = HV_CPUID_FEATURES;
1006
1007    entry_recomm = &cpuid->entries[1];
1008    entry_recomm->function = HV_CPUID_ENLIGHTMENT_INFO;
1009    entry_recomm->ebx = cpu->hyperv_spinlock_attempts;
1010
1011    if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0) {
1012        entry_feat->eax |= HV_HYPERCALL_AVAILABLE;
1013        entry_feat->eax |= HV_APIC_ACCESS_AVAILABLE;
1014        entry_feat->edx |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
1015        entry_recomm->eax |= HV_RELAXED_TIMING_RECOMMENDED;
1016        entry_recomm->eax |= HV_APIC_ACCESS_RECOMMENDED;
1017    }
1018
1019    if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TIME) > 0) {
1020        entry_feat->eax |= HV_TIME_REF_COUNT_AVAILABLE;
1021        entry_feat->eax |= HV_REFERENCE_TSC_AVAILABLE;
1022    }
1023
1024    if (has_msr_hv_frequencies) {
1025        entry_feat->eax |= HV_ACCESS_FREQUENCY_MSRS;
1026        entry_feat->edx |= HV_FREQUENCY_MSRS_AVAILABLE;
1027    }
1028
1029    if (has_msr_hv_crash) {
1030        entry_feat->edx |= HV_GUEST_CRASH_MSR_AVAILABLE;
1031    }
1032
1033    if (has_msr_hv_reenlightenment) {
1034        entry_feat->eax |= HV_ACCESS_REENLIGHTENMENTS_CONTROL;
1035    }
1036
1037    if (has_msr_hv_reset) {
1038        entry_feat->eax |= HV_RESET_AVAILABLE;
1039    }
1040
1041    if (has_msr_hv_vpindex) {
1042        entry_feat->eax |= HV_VP_INDEX_AVAILABLE;
1043    }
1044
1045    if (has_msr_hv_runtime) {
1046        entry_feat->eax |= HV_VP_RUNTIME_AVAILABLE;
1047    }
1048
1049    if (has_msr_hv_synic) {
1050        unsigned int cap = cpu->hyperv_synic_kvm_only ?
1051            KVM_CAP_HYPERV_SYNIC : KVM_CAP_HYPERV_SYNIC2;
1052
1053        if (kvm_check_extension(cs->kvm_state, cap) > 0) {
1054            entry_feat->eax |= HV_SYNIC_AVAILABLE;
1055        }
1056    }
1057
1058    if (has_msr_hv_stimer) {
1059        entry_feat->eax |= HV_SYNTIMERS_AVAILABLE;
1060    }
1061
1062    if (kvm_check_extension(cs->kvm_state,
1063                            KVM_CAP_HYPERV_TLBFLUSH) > 0) {
1064        entry_recomm->eax |= HV_REMOTE_TLB_FLUSH_RECOMMENDED;
1065        entry_recomm->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED;
1066    }
1067
1068    if (kvm_check_extension(cs->kvm_state,
1069                            KVM_CAP_HYPERV_ENLIGHTENED_VMCS) > 0) {
1070        entry_recomm->eax |= HV_ENLIGHTENED_VMCS_RECOMMENDED;
1071    }
1072
1073    if (kvm_check_extension(cs->kvm_state,
1074                            KVM_CAP_HYPERV_SEND_IPI) > 0) {
1075        entry_recomm->eax |= HV_CLUSTER_IPI_RECOMMENDED;
1076        entry_recomm->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED;
1077    }
1078
1079    return cpuid;
1080}
1081
1082static int hv_cpuid_get_fw(struct kvm_cpuid2 *cpuid, int fw, uint32_t *r)
1083{
1084    struct kvm_cpuid_entry2 *entry;
1085    uint32_t func;
1086    int reg;
1087
1088    switch (fw) {
1089    case FEAT_HYPERV_EAX:
1090        reg = R_EAX;
1091        func = HV_CPUID_FEATURES;
1092        break;
1093    case FEAT_HYPERV_EDX:
1094        reg = R_EDX;
1095        func = HV_CPUID_FEATURES;
1096        break;
1097    case FEAT_HV_RECOMM_EAX:
1098        reg = R_EAX;
1099        func = HV_CPUID_ENLIGHTMENT_INFO;
1100        break;
1101    default:
1102        return -EINVAL;
1103    }
1104
1105    entry = cpuid_find_entry(cpuid, func, 0);
1106    if (!entry) {
1107        return -ENOENT;
1108    }
1109
1110    switch (reg) {
1111    case R_EAX:
1112        *r = entry->eax;
1113        break;
1114    case R_EDX:
1115        *r = entry->edx;
1116        break;
1117    default:
1118        return -EINVAL;
1119    }
1120
1121    return 0;
1122}
1123
1124static int hv_cpuid_check_and_set(CPUState *cs, struct kvm_cpuid2 *cpuid,
1125                                  int feature)
1126{
1127    X86CPU *cpu = X86_CPU(cs);
1128    CPUX86State *env = &cpu->env;
1129    uint32_t r, fw, bits;
1130    uint64_t deps;
1131    int i, dep_feat;
1132
1133    if (!hyperv_feat_enabled(cpu, feature) && !cpu->hyperv_passthrough) {
1134        return 0;
1135    }
1136
1137    deps = kvm_hyperv_properties[feature].dependencies;
1138    while (deps) {
1139        dep_feat = ctz64(deps);
1140        if (!(hyperv_feat_enabled(cpu, dep_feat))) {
1141                fprintf(stderr,
1142                        "Hyper-V %s requires Hyper-V %s\n",
1143                        kvm_hyperv_properties[feature].desc,
1144                        kvm_hyperv_properties[dep_feat].desc);
1145                return 1;
1146        }
1147        deps &= ~(1ull << dep_feat);
1148    }
1149
1150    for (i = 0; i < ARRAY_SIZE(kvm_hyperv_properties[feature].flags); i++) {
1151        fw = kvm_hyperv_properties[feature].flags[i].fw;
1152        bits = kvm_hyperv_properties[feature].flags[i].bits;
1153
1154        if (!fw) {
1155            continue;
1156        }
1157
1158        if (hv_cpuid_get_fw(cpuid, fw, &r) || (r & bits) != bits) {
1159            if (hyperv_feat_enabled(cpu, feature)) {
1160                fprintf(stderr,
1161                        "Hyper-V %s is not supported by kernel\n",
1162                        kvm_hyperv_properties[feature].desc);
1163                return 1;
1164            } else {
1165                return 0;
1166            }
1167        }
1168
1169        env->features[fw] |= bits;
1170    }
1171
1172    if (cpu->hyperv_passthrough) {
1173        cpu->hyperv_features |= BIT(feature);
1174    }
1175
1176    return 0;
1177}
1178
1179/*
1180 * Fill in Hyper-V CPUIDs. Returns the number of entries filled in cpuid_ent in
1181 * case of success, errno < 0 in case of failure and 0 when no Hyper-V
1182 * extentions are enabled.
1183 */
1184static int hyperv_handle_properties(CPUState *cs,
1185                                    struct kvm_cpuid_entry2 *cpuid_ent)
1186{
1187    X86CPU *cpu = X86_CPU(cs);
1188    CPUX86State *env = &cpu->env;
1189    struct kvm_cpuid2 *cpuid;
1190    struct kvm_cpuid_entry2 *c;
1191    uint32_t signature[3];
1192    uint32_t cpuid_i = 0;
1193    int r;
1194
1195    if (!hyperv_enabled(cpu))
1196        return 0;
1197
1198    if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS) ||
1199        cpu->hyperv_passthrough) {
1200        uint16_t evmcs_version;
1201
1202        r = kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_ENLIGHTENED_VMCS, 0,
1203                                (uintptr_t)&evmcs_version);
1204
1205        if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS) && r) {
1206            fprintf(stderr, "Hyper-V %s is not supported by kernel\n",
1207                    kvm_hyperv_properties[HYPERV_FEAT_EVMCS].desc);
1208            return -ENOSYS;
1209        }
1210
1211        if (!r) {
1212            env->features[FEAT_HV_RECOMM_EAX] |=
1213                HV_ENLIGHTENED_VMCS_RECOMMENDED;
1214            env->features[FEAT_HV_NESTED_EAX] = evmcs_version;
1215        }
1216    }
1217
1218    if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_CPUID) > 0) {
1219        cpuid = get_supported_hv_cpuid(cs);
1220    } else {
1221        cpuid = get_supported_hv_cpuid_legacy(cs);
1222    }
1223
1224    if (cpu->hyperv_passthrough) {
1225        memcpy(cpuid_ent, &cpuid->entries[0],
1226               cpuid->nent * sizeof(cpuid->entries[0]));
1227
1228        c = cpuid_find_entry(cpuid, HV_CPUID_FEATURES, 0);
1229        if (c) {
1230            env->features[FEAT_HYPERV_EAX] = c->eax;
1231            env->features[FEAT_HYPERV_EBX] = c->ebx;
1232            env->features[FEAT_HYPERV_EDX] = c->eax;
1233        }
1234        c = cpuid_find_entry(cpuid, HV_CPUID_ENLIGHTMENT_INFO, 0);
1235        if (c) {
1236            env->features[FEAT_HV_RECOMM_EAX] = c->eax;
1237
1238            /* hv-spinlocks may have been overriden */
1239            if (cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_RETRY) {
1240                c->ebx = cpu->hyperv_spinlock_attempts;
1241            }
1242        }
1243        c = cpuid_find_entry(cpuid, HV_CPUID_NESTED_FEATURES, 0);
1244        if (c) {
1245            env->features[FEAT_HV_NESTED_EAX] = c->eax;
1246        }
1247    }
1248
1249    if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_ON) {
1250        env->features[FEAT_HV_RECOMM_EAX] |= HV_NO_NONARCH_CORESHARING;
1251    } else if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO) {
1252        c = cpuid_find_entry(cpuid, HV_CPUID_ENLIGHTMENT_INFO, 0);
1253        if (c) {
1254            env->features[FEAT_HV_RECOMM_EAX] |=
1255                c->eax & HV_NO_NONARCH_CORESHARING;
1256        }
1257    }
1258
1259    /* Features */
1260    r = hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_RELAXED);
1261    r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_VAPIC);
1262    r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_TIME);
1263    r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_CRASH);
1264    r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_RESET);
1265    r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_VPINDEX);
1266    r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_RUNTIME);
1267    r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_SYNIC);
1268    r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_STIMER);
1269    r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_FREQUENCIES);
1270    r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_REENLIGHTENMENT);
1271    r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_TLBFLUSH);
1272    r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_EVMCS);
1273    r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_IPI);
1274    r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_STIMER_DIRECT);
1275
1276    /* Additional dependencies not covered by kvm_hyperv_properties[] */
1277    if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC) &&
1278        !cpu->hyperv_synic_kvm_only &&
1279        !hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX)) {
1280        fprintf(stderr, "Hyper-V %s requires Hyper-V %s\n",
1281                kvm_hyperv_properties[HYPERV_FEAT_SYNIC].desc,
1282                kvm_hyperv_properties[HYPERV_FEAT_VPINDEX].desc);
1283        r |= 1;
1284    }
1285
1286    /* Not exposed by KVM but needed to make CPU hotplug in Windows work */
1287    env->features[FEAT_HYPERV_EDX] |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
1288
1289    if (r) {
1290        r = -ENOSYS;
1291        goto free;
1292    }
1293
1294    if (cpu->hyperv_passthrough) {
1295        /* We already copied all feature words from KVM as is */
1296        r = cpuid->nent;
1297        goto free;
1298    }
1299
1300    c = &cpuid_ent[cpuid_i++];
1301    c->function = HV_CPUID_VENDOR_AND_MAX_FUNCTIONS;
1302    if (!cpu->hyperv_vendor_id) {
1303        memcpy(signature, "Microsoft Hv", 12);
1304    } else {
1305        size_t len = strlen(cpu->hyperv_vendor_id);
1306
1307        if (len > 12) {
1308            error_report("hv-vendor-id truncated to 12 characters");
1309            len = 12;
1310        }
1311        memset(signature, 0, 12);
1312        memcpy(signature, cpu->hyperv_vendor_id, len);
1313    }
1314    c->eax = hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS) ?
1315        HV_CPUID_NESTED_FEATURES : HV_CPUID_IMPLEMENT_LIMITS;
1316    c->ebx = signature[0];
1317    c->ecx = signature[1];
1318    c->edx = signature[2];
1319
1320    c = &cpuid_ent[cpuid_i++];
1321    c->function = HV_CPUID_INTERFACE;
1322    memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12);
1323    c->eax = signature[0];
1324    c->ebx = 0;
1325    c->ecx = 0;
1326    c->edx = 0;
1327
1328    c = &cpuid_ent[cpuid_i++];
1329    c->function = HV_CPUID_VERSION;
1330    c->eax = 0x00001bbc;
1331    c->ebx = 0x00060001;
1332
1333    c = &cpuid_ent[cpuid_i++];
1334    c->function = HV_CPUID_FEATURES;
1335    c->eax = env->features[FEAT_HYPERV_EAX];
1336    c->ebx = env->features[FEAT_HYPERV_EBX];
1337    c->edx = env->features[FEAT_HYPERV_EDX];
1338
1339    c = &cpuid_ent[cpuid_i++];
1340    c->function = HV_CPUID_ENLIGHTMENT_INFO;
1341    c->eax = env->features[FEAT_HV_RECOMM_EAX];
1342    c->ebx = cpu->hyperv_spinlock_attempts;
1343
1344    c = &cpuid_ent[cpuid_i++];
1345    c->function = HV_CPUID_IMPLEMENT_LIMITS;
1346    c->eax = cpu->hv_max_vps;
1347    c->ebx = 0x40;
1348
1349    if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS)) {
1350        __u32 function;
1351
1352        /* Create zeroed 0x40000006..0x40000009 leaves */
1353        for (function = HV_CPUID_IMPLEMENT_LIMITS + 1;
1354             function < HV_CPUID_NESTED_FEATURES; function++) {
1355            c = &cpuid_ent[cpuid_i++];
1356            c->function = function;
1357        }
1358
1359        c = &cpuid_ent[cpuid_i++];
1360        c->function = HV_CPUID_NESTED_FEATURES;
1361        c->eax = env->features[FEAT_HV_NESTED_EAX];
1362    }
1363    r = cpuid_i;
1364
1365free:
1366    g_free(cpuid);
1367
1368    return r;
1369}
1370
1371static Error *hv_passthrough_mig_blocker;
1372static Error *hv_no_nonarch_cs_mig_blocker;
1373
1374static int hyperv_init_vcpu(X86CPU *cpu)
1375{
1376    CPUState *cs = CPU(cpu);
1377    Error *local_err = NULL;
1378    int ret;
1379
1380    if (cpu->hyperv_passthrough && hv_passthrough_mig_blocker == NULL) {
1381        error_setg(&hv_passthrough_mig_blocker,
1382                   "'hv-passthrough' CPU flag prevents migration, use explicit"
1383                   " set of hv-* flags instead");
1384        ret = migrate_add_blocker(hv_passthrough_mig_blocker, &local_err);
1385        if (local_err) {
1386            error_report_err(local_err);
1387            error_free(hv_passthrough_mig_blocker);
1388            return ret;
1389        }
1390    }
1391
1392    if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO &&
1393        hv_no_nonarch_cs_mig_blocker == NULL) {
1394        error_setg(&hv_no_nonarch_cs_mig_blocker,
1395                   "'hv-no-nonarch-coresharing=auto' CPU flag prevents migration"
1396                   " use explicit 'hv-no-nonarch-coresharing=on' instead (but"
1397                   " make sure SMT is disabled and/or that vCPUs are properly"
1398                   " pinned)");
1399        ret = migrate_add_blocker(hv_no_nonarch_cs_mig_blocker, &local_err);
1400        if (local_err) {
1401            error_report_err(local_err);
1402            error_free(hv_no_nonarch_cs_mig_blocker);
1403            return ret;
1404        }
1405    }
1406
1407    if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX) && !hv_vpindex_settable) {
1408        /*
1409         * the kernel doesn't support setting vp_index; assert that its value
1410         * is in sync
1411         */
1412        struct {
1413            struct kvm_msrs info;
1414            struct kvm_msr_entry entries[1];
1415        } msr_data = {
1416            .info.nmsrs = 1,
1417            .entries[0].index = HV_X64_MSR_VP_INDEX,
1418        };
1419
1420        ret = kvm_vcpu_ioctl(cs, KVM_GET_MSRS, &msr_data);
1421        if (ret < 0) {
1422            return ret;
1423        }
1424        assert(ret == 1);
1425
1426        if (msr_data.entries[0].data != hyperv_vp_index(CPU(cpu))) {
1427            error_report("kernel's vp_index != QEMU's vp_index");
1428            return -ENXIO;
1429        }
1430    }
1431
1432    if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
1433        uint32_t synic_cap = cpu->hyperv_synic_kvm_only ?
1434            KVM_CAP_HYPERV_SYNIC : KVM_CAP_HYPERV_SYNIC2;
1435        ret = kvm_vcpu_enable_cap(cs, synic_cap, 0);
1436        if (ret < 0) {
1437            error_report("failed to turn on HyperV SynIC in KVM: %s",
1438                         strerror(-ret));
1439            return ret;
1440        }
1441
1442        if (!cpu->hyperv_synic_kvm_only) {
1443            ret = hyperv_x86_synic_add(cpu);
1444            if (ret < 0) {
1445                error_report("failed to create HyperV SynIC: %s",
1446                             strerror(-ret));
1447                return ret;
1448            }
1449        }
1450    }
1451
1452    return 0;
1453}
1454
1455static Error *invtsc_mig_blocker;
1456
1457#define KVM_MAX_CPUID_ENTRIES  100
1458
1459int kvm_arch_init_vcpu(CPUState *cs)
1460{
1461    struct {
1462        struct kvm_cpuid2 cpuid;
1463        struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES];
1464    } cpuid_data;
1465    /*
1466     * The kernel defines these structs with padding fields so there
1467     * should be no extra padding in our cpuid_data struct.
1468     */
1469    QEMU_BUILD_BUG_ON(sizeof(cpuid_data) !=
1470                      sizeof(struct kvm_cpuid2) +
1471                      sizeof(struct kvm_cpuid_entry2) * KVM_MAX_CPUID_ENTRIES);
1472
1473    X86CPU *cpu = X86_CPU(cs);
1474    CPUX86State *env = &cpu->env;
1475    uint32_t limit, i, j, cpuid_i;
1476    uint32_t unused;
1477    struct kvm_cpuid_entry2 *c;
1478    uint32_t signature[3];
1479    int kvm_base = KVM_CPUID_SIGNATURE;
1480    int max_nested_state_len;
1481    int r;
1482    Error *local_err = NULL;
1483
1484    memset(&cpuid_data, 0, sizeof(cpuid_data));
1485
1486    cpuid_i = 0;
1487
1488    r = kvm_arch_set_tsc_khz(cs);
1489    if (r < 0) {
1490        return r;
1491    }
1492
1493    /* vcpu's TSC frequency is either specified by user, or following
1494     * the value used by KVM if the former is not present. In the
1495     * latter case, we query it from KVM and record in env->tsc_khz,
1496     * so that vcpu's TSC frequency can be migrated later via this field.
1497     */
1498    if (!env->tsc_khz) {
1499        r = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
1500            kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
1501            -ENOTSUP;
1502        if (r > 0) {
1503            env->tsc_khz = r;
1504        }
1505    }
1506
1507    /* Paravirtualization CPUIDs */
1508    r = hyperv_handle_properties(cs, cpuid_data.entries);
1509    if (r < 0) {
1510        return r;
1511    } else if (r > 0) {
1512        cpuid_i = r;
1513        kvm_base = KVM_CPUID_SIGNATURE_NEXT;
1514        has_msr_hv_hypercall = true;
1515    }
1516
1517    if (cpu->expose_kvm) {
1518        memcpy(signature, "KVMKVMKVM\0\0\0", 12);
1519        c = &cpuid_data.entries[cpuid_i++];
1520        c->function = KVM_CPUID_SIGNATURE | kvm_base;
1521        c->eax = KVM_CPUID_FEATURES | kvm_base;
1522        c->ebx = signature[0];
1523        c->ecx = signature[1];
1524        c->edx = signature[2];
1525
1526        c = &cpuid_data.entries[cpuid_i++];
1527        c->function = KVM_CPUID_FEATURES | kvm_base;
1528        c->eax = env->features[FEAT_KVM];
1529        c->edx = env->features[FEAT_KVM_HINTS];
1530    }
1531
1532    cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
1533
1534    for (i = 0; i <= limit; i++) {
1535        if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1536            fprintf(stderr, "unsupported level value: 0x%x\n", limit);
1537            abort();
1538        }
1539        c = &cpuid_data.entries[cpuid_i++];
1540
1541        switch (i) {
1542        case 2: {
1543            /* Keep reading function 2 till all the input is received */
1544            int times;
1545
1546            c->function = i;
1547            c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
1548                       KVM_CPUID_FLAG_STATE_READ_NEXT;
1549            cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1550            times = c->eax & 0xff;
1551
1552            for (j = 1; j < times; ++j) {
1553                if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1554                    fprintf(stderr, "cpuid_data is full, no space for "
1555                            "cpuid(eax:2):eax & 0xf = 0x%x\n", times);
1556                    abort();
1557                }
1558                c = &cpuid_data.entries[cpuid_i++];
1559                c->function = i;
1560                c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
1561                cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1562            }
1563            break;
1564        }
1565        case 0x1f:
1566            if (env->nr_dies < 2) {
1567                break;
1568            }
1569        case 4:
1570        case 0xb:
1571        case 0xd:
1572            for (j = 0; ; j++) {
1573                if (i == 0xd && j == 64) {
1574                    break;
1575                }
1576
1577                if (i == 0x1f && j == 64) {
1578                    break;
1579                }
1580
1581                c->function = i;
1582                c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1583                c->index = j;
1584                cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
1585
1586                if (i == 4 && c->eax == 0) {
1587                    break;
1588                }
1589                if (i == 0xb && !(c->ecx & 0xff00)) {
1590                    break;
1591                }
1592                if (i == 0x1f && !(c->ecx & 0xff00)) {
1593                    break;
1594                }
1595                if (i == 0xd && c->eax == 0) {
1596                    continue;
1597                }
1598                if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1599                    fprintf(stderr, "cpuid_data is full, no space for "
1600                            "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
1601                    abort();
1602                }
1603                c = &cpuid_data.entries[cpuid_i++];
1604            }
1605            break;
1606        case 0x7:
1607        case 0x14: {
1608            uint32_t times;
1609
1610            c->function = i;
1611            c->index = 0;
1612            c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1613            cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1614            times = c->eax;
1615
1616            for (j = 1; j <= times; ++j) {
1617                if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1618                    fprintf(stderr, "cpuid_data is full, no space for "
1619                                "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
1620                    abort();
1621                }
1622                c = &cpuid_data.entries[cpuid_i++];
1623                c->function = i;
1624                c->index = j;
1625                c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1626                cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
1627            }
1628            break;
1629        }
1630        default:
1631            c->function = i;
1632            c->flags = 0;
1633            cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1634            if (!c->eax && !c->ebx && !c->ecx && !c->edx) {
1635                /*
1636                 * KVM already returns all zeroes if a CPUID entry is missing,
1637                 * so we can omit it and avoid hitting KVM's 80-entry limit.
1638                 */
1639                cpuid_i--;
1640            }
1641            break;
1642        }
1643    }
1644
1645    if (limit >= 0x0a) {
1646        uint32_t eax, edx;
1647
1648        cpu_x86_cpuid(env, 0x0a, 0, &eax, &unused, &unused, &edx);
1649
1650        has_architectural_pmu_version = eax & 0xff;
1651        if (has_architectural_pmu_version > 0) {
1652            num_architectural_pmu_gp_counters = (eax & 0xff00) >> 8;
1653
1654            /* Shouldn't be more than 32, since that's the number of bits
1655             * available in EBX to tell us _which_ counters are available.
1656             * Play it safe.
1657             */
1658            if (num_architectural_pmu_gp_counters > MAX_GP_COUNTERS) {
1659                num_architectural_pmu_gp_counters = MAX_GP_COUNTERS;
1660            }
1661
1662            if (has_architectural_pmu_version > 1) {
1663                num_architectural_pmu_fixed_counters = edx & 0x1f;
1664
1665                if (num_architectural_pmu_fixed_counters > MAX_FIXED_COUNTERS) {
1666                    num_architectural_pmu_fixed_counters = MAX_FIXED_COUNTERS;
1667                }
1668            }
1669        }
1670    }
1671
1672    cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
1673
1674    for (i = 0x80000000; i <= limit; i++) {
1675        if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1676            fprintf(stderr, "unsupported xlevel value: 0x%x\n", limit);
1677            abort();
1678        }
1679        c = &cpuid_data.entries[cpuid_i++];
1680
1681        switch (i) {
1682        case 0x8000001d:
1683            /* Query for all AMD cache information leaves */
1684            for (j = 0; ; j++) {
1685                c->function = i;
1686                c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1687                c->index = j;
1688                cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
1689
1690                if (c->eax == 0) {
1691                    break;
1692                }
1693                if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1694                    fprintf(stderr, "cpuid_data is full, no space for "
1695                            "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
1696                    abort();
1697                }
1698                c = &cpuid_data.entries[cpuid_i++];
1699            }
1700            break;
1701        default:
1702            c->function = i;
1703            c->flags = 0;
1704            cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1705            if (!c->eax && !c->ebx && !c->ecx && !c->edx) {
1706                /*
1707                 * KVM already returns all zeroes if a CPUID entry is missing,
1708                 * so we can omit it and avoid hitting KVM's 80-entry limit.
1709                 */
1710                cpuid_i--;
1711            }
1712            break;
1713        }
1714    }
1715
1716    /* Call Centaur's CPUID instructions they are supported. */
1717    if (env->cpuid_xlevel2 > 0) {
1718        cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused);
1719
1720        for (i = 0xC0000000; i <= limit; i++) {
1721            if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
1722                fprintf(stderr, "unsupported xlevel2 value: 0x%x\n", limit);
1723                abort();
1724            }
1725            c = &cpuid_data.entries[cpuid_i++];
1726
1727            c->function = i;
1728            c->flags = 0;
1729            cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
1730        }
1731    }
1732
1733    cpuid_data.cpuid.nent = cpuid_i;
1734
1735    if (((env->cpuid_version >> 8)&0xF) >= 6
1736        && (env->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
1737           (CPUID_MCE | CPUID_MCA)
1738        && kvm_check_extension(cs->kvm_state, KVM_CAP_MCE) > 0) {
1739        uint64_t mcg_cap, unsupported_caps;
1740        int banks;
1741        int ret;
1742
1743        ret = kvm_get_mce_cap_supported(cs->kvm_state, &mcg_cap, &banks);
1744        if (ret < 0) {
1745            fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret));
1746            return ret;
1747        }
1748
1749        if (banks < (env->mcg_cap & MCG_CAP_BANKS_MASK)) {
1750            error_report("kvm: Unsupported MCE bank count (QEMU = %d, KVM = %d)",
1751                         (int)(env->mcg_cap & MCG_CAP_BANKS_MASK), banks);
1752            return -ENOTSUP;
1753        }
1754
1755        unsupported_caps = env->mcg_cap & ~(mcg_cap | MCG_CAP_BANKS_MASK);
1756        if (unsupported_caps) {
1757            if (unsupported_caps & MCG_LMCE_P) {
1758                error_report("kvm: LMCE not supported");
1759                return -ENOTSUP;
1760            }
1761            warn_report("Unsupported MCG_CAP bits: 0x%" PRIx64,
1762                        unsupported_caps);
1763        }
1764
1765        env->mcg_cap &= mcg_cap | MCG_CAP_BANKS_MASK;
1766        ret = kvm_vcpu_ioctl(cs, KVM_X86_SETUP_MCE, &env->mcg_cap);
1767        if (ret < 0) {
1768            fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret));
1769            return ret;
1770        }
1771    }
1772
1773    qemu_add_vm_change_state_handler(cpu_update_state, env);
1774
1775    c = cpuid_find_entry(&cpuid_data.cpuid, 1, 0);
1776    if (c) {
1777        has_msr_feature_control = !!(c->ecx & CPUID_EXT_VMX) ||
1778                                  !!(c->ecx & CPUID_EXT_SMX);
1779    }
1780
1781    if (env->mcg_cap & MCG_LMCE_P) {
1782        has_msr_mcg_ext_ctl = has_msr_feature_control = true;
1783    }
1784
1785    if (!env->user_tsc_khz) {
1786        if ((env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC) &&
1787            invtsc_mig_blocker == NULL) {
1788            error_setg(&invtsc_mig_blocker,
1789                       "State blocked by non-migratable CPU device"
1790                       " (invtsc flag)");
1791            r = migrate_add_blocker(invtsc_mig_blocker, &local_err);
1792            if (local_err) {
1793                error_report_err(local_err);
1794                error_free(invtsc_mig_blocker);
1795                return r;
1796            }
1797        }
1798    }
1799
1800    if (cpu->vmware_cpuid_freq
1801        /* Guests depend on 0x40000000 to detect this feature, so only expose
1802         * it if KVM exposes leaf 0x40000000. (Conflicts with Hyper-V) */
1803        && cpu->expose_kvm
1804        && kvm_base == KVM_CPUID_SIGNATURE
1805        /* TSC clock must be stable and known for this feature. */
1806        && tsc_is_stable_and_known(env)) {
1807
1808        c = &cpuid_data.entries[cpuid_i++];
1809        c->function = KVM_CPUID_SIGNATURE | 0x10;
1810        c->eax = env->tsc_khz;
1811        /* LAPIC resolution of 1ns (freq: 1GHz) is hardcoded in KVM's
1812         * APIC_BUS_CYCLE_NS */
1813        c->ebx = 1000000;
1814        c->ecx = c->edx = 0;
1815
1816        c = cpuid_find_entry(&cpuid_data.cpuid, kvm_base, 0);
1817        c->eax = MAX(c->eax, KVM_CPUID_SIGNATURE | 0x10);
1818    }
1819
1820    cpuid_data.cpuid.nent = cpuid_i;
1821
1822    cpuid_data.cpuid.padding = 0;
1823    r = kvm_vcpu_ioctl(cs, KVM_SET_CPUID2, &cpuid_data);
1824    if (r) {
1825        goto fail;
1826    }
1827
1828    if (has_xsave) {
1829        env->xsave_buf = qemu_memalign(4096, sizeof(struct kvm_xsave));
1830        memset(env->xsave_buf, 0, sizeof(struct kvm_xsave));
1831    }
1832
1833    max_nested_state_len = kvm_max_nested_state_length();
1834    if (max_nested_state_len > 0) {
1835        assert(max_nested_state_len >= offsetof(struct kvm_nested_state, data));
1836
1837        if (cpu_has_vmx(env)) {
1838            struct kvm_vmx_nested_state_hdr *vmx_hdr;
1839
1840            env->nested_state = g_malloc0(max_nested_state_len);
1841            env->nested_state->size = max_nested_state_len;
1842            env->nested_state->format = KVM_STATE_NESTED_FORMAT_VMX;
1843
1844            vmx_hdr = &env->nested_state->hdr.vmx;
1845            vmx_hdr->vmxon_pa = -1ull;
1846            vmx_hdr->vmcs12_pa = -1ull;
1847        }
1848    }
1849
1850    cpu->kvm_msr_buf = g_malloc0(MSR_BUF_SIZE);
1851
1852    if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP)) {
1853        has_msr_tsc_aux = false;
1854    }
1855
1856    kvm_init_msrs(cpu);
1857
1858    r = hyperv_init_vcpu(cpu);
1859    if (r) {
1860        goto fail;
1861    }
1862
1863    return 0;
1864
1865 fail:
1866    migrate_del_blocker(invtsc_mig_blocker);
1867
1868    return r;
1869}
1870
1871int kvm_arch_destroy_vcpu(CPUState *cs)
1872{
1873    X86CPU *cpu = X86_CPU(cs);
1874    CPUX86State *env = &cpu->env;
1875
1876    if (cpu->kvm_msr_buf) {
1877        g_free(cpu->kvm_msr_buf);
1878        cpu->kvm_msr_buf = NULL;
1879    }
1880
1881    if (env->nested_state) {
1882        g_free(env->nested_state);
1883        env->nested_state = NULL;
1884    }
1885
1886    return 0;
1887}
1888
1889void kvm_arch_reset_vcpu(X86CPU *cpu)
1890{
1891    CPUX86State *env = &cpu->env;
1892
1893    env->xcr0 = 1;
1894    if (kvm_irqchip_in_kernel()) {
1895        env->mp_state = cpu_is_bsp(cpu) ? KVM_MP_STATE_RUNNABLE :
1896                                          KVM_MP_STATE_UNINITIALIZED;
1897    } else {
1898        env->mp_state = KVM_MP_STATE_RUNNABLE;
1899    }
1900
1901    if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
1902        int i;
1903        for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) {
1904            env->msr_hv_synic_sint[i] = HV_SINT_MASKED;
1905        }
1906
1907        hyperv_x86_synic_reset(cpu);
1908    }
1909    /* enabled by default */
1910    env->poll_control_msr = 1;
1911}
1912
1913void kvm_arch_do_init_vcpu(X86CPU *cpu)
1914{
1915    CPUX86State *env = &cpu->env;
1916
1917    /* APs get directly into wait-for-SIPI state.  */
1918    if (env->mp_state == KVM_MP_STATE_UNINITIALIZED) {
1919        env->mp_state = KVM_MP_STATE_INIT_RECEIVED;
1920    }
1921}
1922
1923static int kvm_get_supported_feature_msrs(KVMState *s)
1924{
1925    int ret = 0;
1926
1927    if (kvm_feature_msrs != NULL) {
1928        return 0;
1929    }
1930
1931    if (!kvm_check_extension(s, KVM_CAP_GET_MSR_FEATURES)) {
1932        return 0;
1933    }
1934
1935    struct kvm_msr_list msr_list;
1936
1937    msr_list.nmsrs = 0;
1938    ret = kvm_ioctl(s, KVM_GET_MSR_FEATURE_INDEX_LIST, &msr_list);
1939    if (ret < 0 && ret != -E2BIG) {
1940        error_report("Fetch KVM feature MSR list failed: %s",
1941            strerror(-ret));
1942        return ret;
1943    }
1944
1945    assert(msr_list.nmsrs > 0);
1946    kvm_feature_msrs = (struct kvm_msr_list *) \
1947        g_malloc0(sizeof(msr_list) +
1948                 msr_list.nmsrs * sizeof(msr_list.indices[0]));
1949
1950    kvm_feature_msrs->nmsrs = msr_list.nmsrs;
1951    ret = kvm_ioctl(s, KVM_GET_MSR_FEATURE_INDEX_LIST, kvm_feature_msrs);
1952
1953    if (ret < 0) {
1954        error_report("Fetch KVM feature MSR list failed: %s",
1955            strerror(-ret));
1956        g_free(kvm_feature_msrs);
1957        kvm_feature_msrs = NULL;
1958        return ret;
1959    }
1960
1961    return 0;
1962}
1963
1964static int kvm_get_supported_msrs(KVMState *s)
1965{
1966    int ret = 0;
1967    struct kvm_msr_list msr_list, *kvm_msr_list;
1968
1969    /*
1970     *  Obtain MSR list from KVM.  These are the MSRs that we must
1971     *  save/restore.
1972     */
1973    msr_list.nmsrs = 0;
1974    ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list);
1975    if (ret < 0 && ret != -E2BIG) {
1976        return ret;
1977    }
1978    /*
1979     * Old kernel modules had a bug and could write beyond the provided
1980     * memory. Allocate at least a safe amount of 1K.
1981     */
1982    kvm_msr_list = g_malloc0(MAX(1024, sizeof(msr_list) +
1983                                          msr_list.nmsrs *
1984                                          sizeof(msr_list.indices[0])));
1985
1986    kvm_msr_list->nmsrs = msr_list.nmsrs;
1987    ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list);
1988    if (ret >= 0) {
1989        int i;
1990
1991        for (i = 0; i < kvm_msr_list->nmsrs; i++) {
1992            switch (kvm_msr_list->indices[i]) {
1993            case MSR_STAR:
1994                has_msr_star = true;
1995                break;
1996            case MSR_VM_HSAVE_PA:
1997                has_msr_hsave_pa = true;
1998                break;
1999            case MSR_TSC_AUX:
2000                has_msr_tsc_aux = true;
2001                break;
2002            case MSR_TSC_ADJUST:
2003                has_msr_tsc_adjust = true;
2004                break;
2005            case MSR_IA32_TSCDEADLINE:
2006                has_msr_tsc_deadline = true;
2007                break;
2008            case MSR_IA32_SMBASE:
2009                has_msr_smbase = true;
2010                break;
2011            case MSR_SMI_COUNT:
2012                has_msr_smi_count = true;
2013                break;
2014            case MSR_IA32_MISC_ENABLE:
2015                has_msr_misc_enable = true;
2016                break;
2017            case MSR_IA32_BNDCFGS:
2018                has_msr_bndcfgs = true;
2019                break;
2020            case MSR_IA32_XSS:
2021                has_msr_xss = true;
2022                break;
2023            case MSR_IA32_UMWAIT_CONTROL:
2024                has_msr_umwait = true;
2025                break;
2026            case HV_X64_MSR_CRASH_CTL:
2027                has_msr_hv_crash = true;
2028                break;
2029            case HV_X64_MSR_RESET:
2030                has_msr_hv_reset = true;
2031                break;
2032            case HV_X64_MSR_VP_INDEX:
2033                has_msr_hv_vpindex = true;
2034                break;
2035            case HV_X64_MSR_VP_RUNTIME:
2036                has_msr_hv_runtime = true;
2037                break;
2038            case HV_X64_MSR_SCONTROL:
2039                has_msr_hv_synic = true;
2040                break;
2041            case HV_X64_MSR_STIMER0_CONFIG:
2042                has_msr_hv_stimer = true;
2043                break;
2044            case HV_X64_MSR_TSC_FREQUENCY:
2045                has_msr_hv_frequencies = true;
2046                break;
2047            case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
2048                has_msr_hv_reenlightenment = true;
2049                break;
2050            case MSR_IA32_SPEC_CTRL:
2051                has_msr_spec_ctrl = true;
2052                break;
2053            case MSR_IA32_TSX_CTRL:
2054                has_msr_tsx_ctrl = true;
2055                break;
2056            case MSR_VIRT_SSBD:
2057                has_msr_virt_ssbd = true;
2058                break;
2059            case MSR_IA32_ARCH_CAPABILITIES:
2060                has_msr_arch_capabs = true;
2061                break;
2062            case MSR_IA32_CORE_CAPABILITY:
2063                has_msr_core_capabs = true;
2064                break;
2065            case MSR_IA32_VMX_VMFUNC:
2066                has_msr_vmx_vmfunc = true;
2067                break;
2068            case MSR_IA32_UCODE_REV:
2069                has_msr_ucode_rev = true;
2070                break;
2071            case MSR_IA32_VMX_PROCBASED_CTLS2:
2072                has_msr_vmx_procbased_ctls2 = true;
2073                break;
2074            }
2075        }
2076    }
2077
2078    g_free(kvm_msr_list);
2079
2080    return ret;
2081}
2082
2083static Notifier smram_machine_done;
2084static KVMMemoryListener smram_listener;
2085static AddressSpace smram_address_space;
2086static MemoryRegion smram_as_root;
2087static MemoryRegion smram_as_mem;
2088
2089static void register_smram_listener(Notifier *n, void *unused)
2090{
2091    MemoryRegion *smram =
2092        (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2093
2094    /* Outer container... */
2095    memory_region_init(&smram_as_root, OBJECT(kvm_state), "mem-container-smram", ~0ull);
2096    memory_region_set_enabled(&smram_as_root, true);
2097
2098    /* ... with two regions inside: normal system memory with low
2099     * priority, and...
2100     */
2101    memory_region_init_alias(&smram_as_mem, OBJECT(kvm_state), "mem-smram",
2102                             get_system_memory(), 0, ~0ull);
2103    memory_region_add_subregion_overlap(&smram_as_root, 0, &smram_as_mem, 0);
2104    memory_region_set_enabled(&smram_as_mem, true);
2105
2106    if (smram) {
2107        /* ... SMRAM with higher priority */
2108        memory_region_add_subregion_overlap(&smram_as_root, 0, smram, 10);
2109        memory_region_set_enabled(smram, true);
2110    }
2111
2112    address_space_init(&smram_address_space, &smram_as_root, "KVM-SMRAM");
2113    kvm_memory_listener_register(kvm_state, &smram_listener,
2114                                 &smram_address_space, 1);
2115}
2116
2117int kvm_arch_init(MachineState *ms, KVMState *s)
2118{
2119    uint64_t identity_base = 0xfffbc000;
2120    uint64_t shadow_mem;
2121    int ret;
2122    struct utsname utsname;
2123
2124    has_xsave = kvm_check_extension(s, KVM_CAP_XSAVE);
2125    has_xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
2126    has_pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2);
2127
2128    hv_vpindex_settable = kvm_check_extension(s, KVM_CAP_HYPERV_VP_INDEX);
2129
2130    has_exception_payload = kvm_check_extension(s, KVM_CAP_EXCEPTION_PAYLOAD);
2131    if (has_exception_payload) {
2132        ret = kvm_vm_enable_cap(s, KVM_CAP_EXCEPTION_PAYLOAD, 0, true);
2133        if (ret < 0) {
2134            error_report("kvm: Failed to enable exception payload cap: %s",
2135                         strerror(-ret));
2136            return ret;
2137        }
2138    }
2139
2140    ret = kvm_get_supported_msrs(s);
2141    if (ret < 0) {
2142        return ret;
2143    }
2144
2145    kvm_get_supported_feature_msrs(s);
2146
2147    uname(&utsname);
2148    lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0;
2149
2150    /*
2151     * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
2152     * In order to use vm86 mode, an EPT identity map and a TSS  are needed.
2153     * Since these must be part of guest physical memory, we need to allocate
2154     * them, both by setting their start addresses in the kernel and by
2155     * creating a corresponding e820 entry. We need 4 pages before the BIOS.
2156     *
2157     * Older KVM versions may not support setting the identity map base. In
2158     * that case we need to stick with the default, i.e. a 256K maximum BIOS
2159     * size.
2160     */
2161    if (kvm_check_extension(s, KVM_CAP_SET_IDENTITY_MAP_ADDR)) {
2162        /* Allows up to 16M BIOSes. */
2163        identity_base = 0xfeffc000;
2164
2165        ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base);
2166        if (ret < 0) {
2167            return ret;
2168        }
2169    }
2170
2171    /* Set TSS base one page after EPT identity map. */
2172    ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, identity_base + 0x1000);
2173    if (ret < 0) {
2174        return ret;
2175    }
2176
2177    /* Tell fw_cfg to notify the BIOS to reserve the range. */
2178    ret = e820_add_entry(identity_base, 0x4000, E820_RESERVED);
2179    if (ret < 0) {
2180        fprintf(stderr, "e820_add_entry() table is full\n");
2181        return ret;
2182    }
2183    qemu_register_reset(kvm_unpoison_all, NULL);
2184
2185    shadow_mem = object_property_get_int(OBJECT(s), "kvm-shadow-mem", &error_abort);
2186    if (shadow_mem != -1) {
2187        shadow_mem /= 4096;
2188        ret = kvm_vm_ioctl(s, KVM_SET_NR_MMU_PAGES, shadow_mem);
2189        if (ret < 0) {
2190            return ret;
2191        }
2192    }
2193
2194    if (kvm_check_extension(s, KVM_CAP_X86_SMM) &&
2195        object_dynamic_cast(OBJECT(ms), TYPE_X86_MACHINE) &&
2196        x86_machine_is_smm_enabled(X86_MACHINE(ms))) {
2197        smram_machine_done.notify = register_smram_listener;
2198        qemu_add_machine_init_done_notifier(&smram_machine_done);
2199    }
2200
2201    if (enable_cpu_pm) {
2202        int disable_exits = kvm_check_extension(s, KVM_CAP_X86_DISABLE_EXITS);
2203        int ret;
2204
2205/* Work around for kernel header with a typo. TODO: fix header and drop. */
2206#if defined(KVM_X86_DISABLE_EXITS_HTL) && !defined(KVM_X86_DISABLE_EXITS_HLT)
2207#define KVM_X86_DISABLE_EXITS_HLT KVM_X86_DISABLE_EXITS_HTL
2208#endif
2209        if (disable_exits) {
2210            disable_exits &= (KVM_X86_DISABLE_EXITS_MWAIT |
2211                              KVM_X86_DISABLE_EXITS_HLT |
2212                              KVM_X86_DISABLE_EXITS_PAUSE |
2213                              KVM_X86_DISABLE_EXITS_CSTATE);
2214        }
2215
2216        ret = kvm_vm_enable_cap(s, KVM_CAP_X86_DISABLE_EXITS, 0,
2217                                disable_exits);
2218        if (ret < 0) {
2219            error_report("kvm: guest stopping CPU not supported: %s",
2220                         strerror(-ret));
2221        }
2222    }
2223
2224    return 0;
2225}
2226
2227static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
2228{
2229    lhs->selector = rhs->selector;
2230    lhs->base = rhs->base;
2231    lhs->limit = rhs->limit;
2232    lhs->type = 3;
2233    lhs->present = 1;
2234    lhs->dpl = 3;
2235    lhs->db = 0;
2236    lhs->s = 1;
2237    lhs->l = 0;
2238    lhs->g = 0;
2239    lhs->avl = 0;
2240    lhs->unusable = 0;
2241}
2242
2243static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
2244{
2245    unsigned flags = rhs->flags;
2246    lhs->selector = rhs->selector;
2247    lhs->base = rhs->base;
2248    lhs->limit = rhs->limit;
2249    lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
2250    lhs->present = (flags & DESC_P_MASK) != 0;
2251    lhs->dpl = (flags >> DESC_DPL_SHIFT) & 3;
2252    lhs->db = (flags >> DESC_B_SHIFT) & 1;
2253    lhs->s = (flags & DESC_S_MASK) != 0;
2254    lhs->l = (flags >> DESC_L_SHIFT) & 1;
2255    lhs->g = (flags & DESC_G_MASK) != 0;
2256    lhs->avl = (flags & DESC_AVL_MASK) != 0;
2257    lhs->unusable = !lhs->present;
2258    lhs->padding = 0;
2259}
2260
2261static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
2262{
2263    lhs->selector = rhs->selector;
2264    lhs->base = rhs->base;
2265    lhs->limit = rhs->limit;
2266    lhs->flags = (rhs->type << DESC_TYPE_SHIFT) |
2267                 ((rhs->present && !rhs->unusable) * DESC_P_MASK) |
2268                 (rhs->dpl << DESC_DPL_SHIFT) |
2269                 (rhs->db << DESC_B_SHIFT) |
2270                 (rhs->s * DESC_S_MASK) |
2271                 (rhs->l << DESC_L_SHIFT) |
2272                 (rhs->g * DESC_G_MASK) |
2273                 (rhs->avl * DESC_AVL_MASK);
2274}
2275
2276static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
2277{
2278    if (set) {
2279        *kvm_reg = *qemu_reg;
2280    } else {
2281        *qemu_reg = *kvm_reg;
2282    }
2283}
2284
2285static int kvm_getput_regs(X86CPU *cpu, int set)
2286{
2287    CPUX86State *env = &cpu->env;
2288    struct kvm_regs regs;
2289    int ret = 0;
2290
2291    if (!set) {
2292        ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_REGS, &regs);
2293        if (ret < 0) {
2294            return ret;
2295        }
2296    }
2297
2298    kvm_getput_reg(&regs.rax, &env->regs[R_EAX], set);
2299    kvm_getput_reg(&regs.rbx, &env->regs[R_EBX], set);
2300    kvm_getput_reg(&regs.rcx, &env->regs[R_ECX], set);
2301    kvm_getput_reg(&regs.rdx, &env->regs[R_EDX], set);
2302    kvm_getput_reg(&regs.rsi, &env->regs[R_ESI], set);
2303    kvm_getput_reg(&regs.rdi, &env->regs[R_EDI], set);
2304    kvm_getput_reg(&regs.rsp, &env->regs[R_ESP], set);
2305    kvm_getput_reg(&regs.rbp, &env->regs[R_EBP], set);
2306#ifdef TARGET_X86_64
2307    kvm_getput_reg(&regs.r8, &env->regs[8], set);
2308    kvm_getput_reg(&regs.r9, &env->regs[9], set);
2309    kvm_getput_reg(&regs.r10, &env->regs[10], set);
2310    kvm_getput_reg(&regs.r11, &env->regs[11], set);
2311    kvm_getput_reg(&regs.r12, &env->regs[12], set);
2312    kvm_getput_reg(&regs.r13, &env->regs[13], set);
2313    kvm_getput_reg(&regs.r14, &env->regs[14], set);
2314    kvm_getput_reg(&regs.r15, &env->regs[15], set);
2315#endif
2316
2317    kvm_getput_reg(&regs.rflags, &env->eflags, set);
2318    kvm_getput_reg(&regs.rip, &env->eip, set);
2319
2320    if (set) {
2321        ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_REGS, &regs);
2322    }
2323
2324    return ret;
2325}
2326
2327static int kvm_put_fpu(X86CPU *cpu)
2328{
2329    CPUX86State *env = &cpu->env;
2330    struct kvm_fpu fpu;
2331    int i;
2332
2333    memset(&fpu, 0, sizeof fpu);
2334    fpu.fsw = env->fpus & ~(7 << 11);
2335    fpu.fsw |= (env->fpstt & 7) << 11;
2336    fpu.fcw = env->fpuc;
2337    fpu.last_opcode = env->fpop;
2338    fpu.last_ip = env->fpip;
2339    fpu.last_dp = env->fpdp;
2340    for (i = 0; i < 8; ++i) {
2341        fpu.ftwx |= (!env->fptags[i]) << i;
2342    }
2343    memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
2344    for (i = 0; i < CPU_NB_REGS; i++) {
2345        stq_p(&fpu.xmm[i][0], env->xmm_regs[i].ZMM_Q(0));
2346        stq_p(&fpu.xmm[i][8], env->xmm_regs[i].ZMM_Q(1));
2347    }
2348    fpu.mxcsr = env->mxcsr;
2349
2350    return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_FPU, &fpu);
2351}
2352
2353#define XSAVE_FCW_FSW     0
2354#define XSAVE_FTW_FOP     1
2355#define XSAVE_CWD_RIP     2
2356#define XSAVE_CWD_RDP     4
2357#define XSAVE_MXCSR       6
2358#define XSAVE_ST_SPACE    8
2359#define XSAVE_XMM_SPACE   40
2360#define XSAVE_XSTATE_BV   128
2361#define XSAVE_YMMH_SPACE  144
2362#define XSAVE_BNDREGS     240
2363#define XSAVE_BNDCSR      256
2364#define XSAVE_OPMASK      272
2365#define XSAVE_ZMM_Hi256   288
2366#define XSAVE_Hi16_ZMM    416
2367#define XSAVE_PKRU        672
2368
2369#define XSAVE_BYTE_OFFSET(word_offset) \
2370    ((word_offset) * sizeof_field(struct kvm_xsave, region[0]))
2371
2372#define ASSERT_OFFSET(word_offset, field) \
2373    QEMU_BUILD_BUG_ON(XSAVE_BYTE_OFFSET(word_offset) != \
2374                      offsetof(X86XSaveArea, field))
2375
2376ASSERT_OFFSET(XSAVE_FCW_FSW, legacy.fcw);
2377ASSERT_OFFSET(XSAVE_FTW_FOP, legacy.ftw);
2378ASSERT_OFFSET(XSAVE_CWD_RIP, legacy.fpip);
2379ASSERT_OFFSET(XSAVE_CWD_RDP, legacy.fpdp);
2380ASSERT_OFFSET(XSAVE_MXCSR, legacy.mxcsr);
2381ASSERT_OFFSET(XSAVE_ST_SPACE, legacy.fpregs);
2382ASSERT_OFFSET(XSAVE_XMM_SPACE, legacy.xmm_regs);
2383ASSERT_OFFSET(XSAVE_XSTATE_BV, header.xstate_bv);
2384ASSERT_OFFSET(XSAVE_YMMH_SPACE, avx_state);
2385ASSERT_OFFSET(XSAVE_BNDREGS, bndreg_state);
2386ASSERT_OFFSET(XSAVE_BNDCSR, bndcsr_state);
2387ASSERT_OFFSET(XSAVE_OPMASK, opmask_state);
2388ASSERT_OFFSET(XSAVE_ZMM_Hi256, zmm_hi256_state);
2389ASSERT_OFFSET(XSAVE_Hi16_ZMM, hi16_zmm_state);
2390ASSERT_OFFSET(XSAVE_PKRU, pkru_state);
2391
2392static int kvm_put_xsave(X86CPU *cpu)
2393{
2394    CPUX86State *env = &cpu->env;
2395    X86XSaveArea *xsave = env->xsave_buf;
2396
2397    if (!has_xsave) {
2398        return kvm_put_fpu(cpu);
2399    }
2400    x86_cpu_xsave_all_areas(cpu, xsave);
2401
2402    return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave);
2403}
2404
2405static int kvm_put_xcrs(X86CPU *cpu)
2406{
2407    CPUX86State *env = &cpu->env;
2408    struct kvm_xcrs xcrs = {};
2409
2410    if (!has_xcrs) {
2411        return 0;
2412    }
2413
2414    xcrs.nr_xcrs = 1;
2415    xcrs.flags = 0;
2416    xcrs.xcrs[0].xcr = 0;
2417    xcrs.xcrs[0].value = env->xcr0;
2418    return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XCRS, &xcrs);
2419}
2420
2421static int kvm_put_sregs(X86CPU *cpu)
2422{
2423    CPUX86State *env = &cpu->env;
2424    struct kvm_sregs sregs;
2425
2426    memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
2427    if (env->interrupt_injected >= 0) {
2428        sregs.interrupt_bitmap[env->interrupt_injected / 64] |=
2429                (uint64_t)1 << (env->interrupt_injected % 64);
2430    }
2431
2432    if ((env->eflags & VM_MASK)) {
2433        set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
2434        set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
2435        set_v8086_seg(&sregs.es, &env->segs[R_ES]);
2436        set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
2437        set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
2438        set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
2439    } else {
2440        set_seg(&sregs.cs, &env->segs[R_CS]);
2441        set_seg(&sregs.ds, &env->segs[R_DS]);
2442        set_seg(&sregs.es, &env->segs[R_ES]);
2443        set_seg(&sregs.fs, &env->segs[R_FS]);
2444        set_seg(&sregs.gs, &env->segs[R_GS]);
2445        set_seg(&sregs.ss, &env->segs[R_SS]);
2446    }
2447
2448    set_seg(&sregs.tr, &env->tr);
2449    set_seg(&sregs.ldt, &env->ldt);
2450
2451    sregs.idt.limit = env->idt.limit;
2452    sregs.idt.base = env->idt.base;
2453    memset(sregs.idt.padding, 0, sizeof sregs.idt.padding);
2454    sregs.gdt.limit = env->gdt.limit;
2455    sregs.gdt.base = env->gdt.base;
2456    memset(sregs.gdt.padding, 0, sizeof sregs.gdt.padding);
2457
2458    sregs.cr0 = env->cr[0];
2459    sregs.cr2 = env->cr[2];
2460    sregs.cr3 = env->cr[3];
2461    sregs.cr4 = env->cr[4];
2462
2463    sregs.cr8 = cpu_get_apic_tpr(cpu->apic_state);
2464    sregs.apic_base = cpu_get_apic_base(cpu->apic_state);
2465
2466    sregs.efer = env->efer;
2467
2468    return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
2469}
2470
2471static void kvm_msr_buf_reset(X86CPU *cpu)
2472{
2473    memset(cpu->kvm_msr_buf, 0, MSR_BUF_SIZE);
2474}
2475
2476static void kvm_msr_entry_add(X86CPU *cpu, uint32_t index, uint64_t value)
2477{
2478    struct kvm_msrs *msrs = cpu->kvm_msr_buf;
2479    void *limit = ((void *)msrs) + MSR_BUF_SIZE;
2480    struct kvm_msr_entry *entry = &msrs->entries[msrs->nmsrs];
2481
2482    assert((void *)(entry + 1) <= limit);
2483
2484    entry->index = index;
2485    entry->reserved = 0;
2486    entry->data = value;
2487    msrs->nmsrs++;
2488}
2489
2490static int kvm_put_one_msr(X86CPU *cpu, int index, uint64_t value)
2491{
2492    kvm_msr_buf_reset(cpu);
2493    kvm_msr_entry_add(cpu, index, value);
2494
2495    return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
2496}
2497
2498void kvm_put_apicbase(X86CPU *cpu, uint64_t value)
2499{
2500    int ret;
2501
2502    ret = kvm_put_one_msr(cpu, MSR_IA32_APICBASE, value);
2503    assert(ret == 1);
2504}
2505
2506static int kvm_put_tscdeadline_msr(X86CPU *cpu)
2507{
2508    CPUX86State *env = &cpu->env;
2509    int ret;
2510
2511    if (!has_msr_tsc_deadline) {
2512        return 0;
2513    }
2514
2515    ret = kvm_put_one_msr(cpu, MSR_IA32_TSCDEADLINE, env->tsc_deadline);
2516    if (ret < 0) {
2517        return ret;
2518    }
2519
2520    assert(ret == 1);
2521    return 0;
2522}
2523
2524/*
2525 * Provide a separate write service for the feature control MSR in order to
2526 * kick the VCPU out of VMXON or even guest mode on reset. This has to be done
2527 * before writing any other state because forcibly leaving nested mode
2528 * invalidates the VCPU state.
2529 */
2530static int kvm_put_msr_feature_control(X86CPU *cpu)
2531{
2532    int ret;
2533
2534    if (!has_msr_feature_control) {
2535        return 0;
2536    }
2537
2538    ret = kvm_put_one_msr(cpu, MSR_IA32_FEATURE_CONTROL,
2539                          cpu->env.msr_ia32_feature_control);
2540    if (ret < 0) {
2541        return ret;
2542    }
2543
2544    assert(ret == 1);
2545    return 0;
2546}
2547
2548static uint64_t make_vmx_msr_value(uint32_t index, uint32_t features)
2549{
2550    uint32_t default1, can_be_one, can_be_zero;
2551    uint32_t must_be_one;
2552
2553    switch (index) {
2554    case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
2555        default1 = 0x00000016;
2556        break;
2557    case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
2558        default1 = 0x0401e172;
2559        break;
2560    case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
2561        default1 = 0x000011ff;
2562        break;
2563    case MSR_IA32_VMX_TRUE_EXIT_CTLS:
2564        default1 = 0x00036dff;
2565        break;
2566    case MSR_IA32_VMX_PROCBASED_CTLS2:
2567        default1 = 0;
2568        break;
2569    default:
2570        abort();
2571    }
2572
2573    /* If a feature bit is set, the control can be either set or clear.
2574     * Otherwise the value is limited to either 0 or 1 by default1.
2575     */
2576    can_be_one = features | default1;
2577    can_be_zero = features | ~default1;
2578    must_be_one = ~can_be_zero;
2579
2580    /*
2581     * Bit 0:31 -> 0 if the control bit can be zero (i.e. 1 if it must be one).
2582     * Bit 32:63 -> 1 if the control bit can be one.
2583     */
2584    return must_be_one | (((uint64_t)can_be_one) << 32);
2585}
2586
2587#define VMCS12_MAX_FIELD_INDEX (0x17)
2588
2589static void kvm_msr_entry_add_vmx(X86CPU *cpu, FeatureWordArray f)
2590{
2591    uint64_t kvm_vmx_basic =
2592        kvm_arch_get_supported_msr_feature(kvm_state,
2593                                           MSR_IA32_VMX_BASIC);
2594
2595    if (!kvm_vmx_basic) {
2596        /* If the kernel doesn't support VMX feature (kvm_intel.nested=0),
2597         * then kvm_vmx_basic will be 0 and KVM_SET_MSR will fail.
2598         */
2599        return;
2600    }
2601
2602    uint64_t kvm_vmx_misc =
2603        kvm_arch_get_supported_msr_feature(kvm_state,
2604                                           MSR_IA32_VMX_MISC);
2605    uint64_t kvm_vmx_ept_vpid =
2606        kvm_arch_get_supported_msr_feature(kvm_state,
2607                                           MSR_IA32_VMX_EPT_VPID_CAP);
2608
2609    /*
2610     * If the guest is 64-bit, a value of 1 is allowed for the host address
2611     * space size vmexit control.
2612     */
2613    uint64_t fixed_vmx_exit = f[FEAT_8000_0001_EDX] & CPUID_EXT2_LM
2614        ? (uint64_t)VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE << 32 : 0;
2615
2616    /*
2617     * Bits 0-30, 32-44 and 50-53 come from the host.  KVM should
2618     * not change them for backwards compatibility.
2619     */
2620    uint64_t fixed_vmx_basic = kvm_vmx_basic &
2621        (MSR_VMX_BASIC_VMCS_REVISION_MASK |
2622         MSR_VMX_BASIC_VMXON_REGION_SIZE_MASK |
2623         MSR_VMX_BASIC_VMCS_MEM_TYPE_MASK);
2624
2625    /*
2626     * Same for bits 0-4 and 25-27.  Bits 16-24 (CR3 target count) can
2627     * change in the future but are always zero for now, clear them to be
2628     * future proof.  Bits 32-63 in theory could change, though KVM does
2629     * not support dual-monitor treatment and probably never will; mask
2630     * them out as well.
2631     */
2632    uint64_t fixed_vmx_misc = kvm_vmx_misc &
2633        (MSR_VMX_MISC_PREEMPTION_TIMER_SHIFT_MASK |
2634         MSR_VMX_MISC_MAX_MSR_LIST_SIZE_MASK);
2635
2636    /*
2637     * EPT memory types should not change either, so we do not bother
2638     * adding features for them.
2639     */
2640    uint64_t fixed_vmx_ept_mask =
2641            (f[FEAT_VMX_SECONDARY_CTLS] & VMX_SECONDARY_EXEC_ENABLE_EPT ?
2642             MSR_VMX_EPT_UC | MSR_VMX_EPT_WB : 0);
2643    uint64_t fixed_vmx_ept_vpid = kvm_vmx_ept_vpid & fixed_vmx_ept_mask;
2644
2645    kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
2646                      make_vmx_msr_value(MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
2647                                         f[FEAT_VMX_PROCBASED_CTLS]));
2648    kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_PINBASED_CTLS,
2649                      make_vmx_msr_value(MSR_IA32_VMX_TRUE_PINBASED_CTLS,
2650                                         f[FEAT_VMX_PINBASED_CTLS]));
2651    kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_EXIT_CTLS,
2652                      make_vmx_msr_value(MSR_IA32_VMX_TRUE_EXIT_CTLS,
2653                                         f[FEAT_VMX_EXIT_CTLS]) | fixed_vmx_exit);
2654    kvm_msr_entry_add(cpu, MSR_IA32_VMX_TRUE_ENTRY_CTLS,
2655                      make_vmx_msr_value(MSR_IA32_VMX_TRUE_ENTRY_CTLS,
2656                                         f[FEAT_VMX_ENTRY_CTLS]));
2657    kvm_msr_entry_add(cpu, MSR_IA32_VMX_PROCBASED_CTLS2,
2658                      make_vmx_msr_value(MSR_IA32_VMX_PROCBASED_CTLS2,
2659                                         f[FEAT_VMX_SECONDARY_CTLS]));
2660    kvm_msr_entry_add(cpu, MSR_IA32_VMX_EPT_VPID_CAP,
2661                      f[FEAT_VMX_EPT_VPID_CAPS] | fixed_vmx_ept_vpid);
2662    kvm_msr_entry_add(cpu, MSR_IA32_VMX_BASIC,
2663                      f[FEAT_VMX_BASIC] | fixed_vmx_basic);
2664    kvm_msr_entry_add(cpu, MSR_IA32_VMX_MISC,
2665                      f[FEAT_VMX_MISC] | fixed_vmx_misc);
2666    if (has_msr_vmx_vmfunc) {
2667        kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMFUNC, f[FEAT_VMX_VMFUNC]);
2668    }
2669
2670    /*
2671     * Just to be safe, write these with constant values.  The CRn_FIXED1
2672     * MSRs are generated by KVM based on the vCPU's CPUID.
2673     */
2674    kvm_msr_entry_add(cpu, MSR_IA32_VMX_CR0_FIXED0,
2675                      CR0_PE_MASK | CR0_PG_MASK | CR0_NE_MASK);
2676    kvm_msr_entry_add(cpu, MSR_IA32_VMX_CR4_FIXED0,
2677                      CR4_VMXE_MASK);
2678    kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMCS_ENUM,
2679                      VMCS12_MAX_FIELD_INDEX << 1);
2680}
2681
2682static int kvm_buf_set_msrs(X86CPU *cpu)
2683{
2684    int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
2685    if (ret < 0) {
2686        return ret;
2687    }
2688
2689    if (ret < cpu->kvm_msr_buf->nmsrs) {
2690        struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret];
2691        error_report("error: failed to set MSR 0x%" PRIx32 " to 0x%" PRIx64,
2692                     (uint32_t)e->index, (uint64_t)e->data);
2693    }
2694
2695    assert(ret == cpu->kvm_msr_buf->nmsrs);
2696    return 0;
2697}
2698
2699static void kvm_init_msrs(X86CPU *cpu)
2700{
2701    CPUX86State *env = &cpu->env;
2702
2703    kvm_msr_buf_reset(cpu);
2704    if (has_msr_arch_capabs) {
2705        kvm_msr_entry_add(cpu, MSR_IA32_ARCH_CAPABILITIES,
2706                          env->features[FEAT_ARCH_CAPABILITIES]);
2707    }
2708
2709    if (has_msr_core_capabs) {
2710        kvm_msr_entry_add(cpu, MSR_IA32_CORE_CAPABILITY,
2711                          env->features[FEAT_CORE_CAPABILITY]);
2712    }
2713
2714    if (has_msr_ucode_rev) {
2715        kvm_msr_entry_add(cpu, MSR_IA32_UCODE_REV, cpu->ucode_rev);
2716    }
2717
2718    /*
2719     * Older kernels do not include VMX MSRs in KVM_GET_MSR_INDEX_LIST, but
2720     * all kernels with MSR features should have them.
2721     */
2722    if (kvm_feature_msrs && cpu_has_vmx(env)) {
2723        kvm_msr_entry_add_vmx(cpu, env->features);
2724    }
2725
2726    assert(kvm_buf_set_msrs(cpu) == 0);
2727}
2728
2729static int kvm_put_msrs(X86CPU *cpu, int level)
2730{
2731    CPUX86State *env = &cpu->env;
2732    int i;
2733
2734    kvm_msr_buf_reset(cpu);
2735
2736    kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, env->sysenter_cs);
2737    kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
2738    kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
2739    kvm_msr_entry_add(cpu, MSR_PAT, env->pat);
2740    if (has_msr_star) {
2741        kvm_msr_entry_add(cpu, MSR_STAR, env->star);
2742    }
2743    if (has_msr_hsave_pa) {
2744        kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, env->vm_hsave);
2745    }
2746    if (has_msr_tsc_aux) {
2747        kvm_msr_entry_add(cpu, MSR_TSC_AUX, env->tsc_aux);
2748    }
2749    if (has_msr_tsc_adjust) {
2750        kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, env->tsc_adjust);
2751    }
2752    if (has_msr_misc_enable) {
2753        kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE,
2754                          env->msr_ia32_misc_enable);
2755    }
2756    if (has_msr_smbase) {
2757        kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, env->smbase);
2758    }
2759    if (has_msr_smi_count) {
2760        kvm_msr_entry_add(cpu, MSR_SMI_COUNT, env->msr_smi_count);
2761    }
2762    if (has_msr_bndcfgs) {
2763        kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, env->msr_bndcfgs);
2764    }
2765    if (has_msr_xss) {
2766        kvm_msr_entry_add(cpu, MSR_IA32_XSS, env->xss);
2767    }
2768    if (has_msr_umwait) {
2769        kvm_msr_entry_add(cpu, MSR_IA32_UMWAIT_CONTROL, env->umwait);
2770    }
2771    if (has_msr_spec_ctrl) {
2772        kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, env->spec_ctrl);
2773    }
2774    if (has_msr_tsx_ctrl) {
2775        kvm_msr_entry_add(cpu, MSR_IA32_TSX_CTRL, env->tsx_ctrl);
2776    }
2777    if (has_msr_virt_ssbd) {
2778        kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, env->virt_ssbd);
2779    }
2780
2781#ifdef TARGET_X86_64
2782    if (lm_capable_kernel) {
2783        kvm_msr_entry_add(cpu, MSR_CSTAR, env->cstar);
2784        kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, env->kernelgsbase);
2785        kvm_msr_entry_add(cpu, MSR_FMASK, env->fmask);
2786        kvm_msr_entry_add(cpu, MSR_LSTAR, env->lstar);
2787    }
2788#endif
2789
2790    /*
2791     * The following MSRs have side effects on the guest or are too heavy
2792     * for normal writeback. Limit them to reset or full state updates.
2793     */
2794    if (level >= KVM_PUT_RESET_STATE) {
2795        kvm_msr_entry_add(cpu, MSR_IA32_TSC, env->tsc);
2796        kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, env->system_time_msr);
2797        kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
2798        if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
2799            kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, env->async_pf_en_msr);
2800        }
2801        if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
2802            kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, env->pv_eoi_en_msr);
2803        }
2804        if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
2805            kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr);
2806        }
2807
2808        if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_POLL_CONTROL)) {
2809            kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, env->poll_control_msr);
2810        }
2811
2812        if (has_architectural_pmu_version > 0) {
2813            if (has_architectural_pmu_version > 1) {
2814                /* Stop the counter.  */
2815                kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
2816                kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
2817            }
2818
2819            /* Set the counter values.  */
2820            for (i = 0; i < num_architectural_pmu_fixed_counters; i++) {
2821                kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i,
2822                                  env->msr_fixed_counters[i]);
2823            }
2824            for (i = 0; i < num_architectural_pmu_gp_counters; i++) {
2825                kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i,
2826                                  env->msr_gp_counters[i]);
2827                kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i,
2828                                  env->msr_gp_evtsel[i]);
2829            }
2830            if (has_architectural_pmu_version > 1) {
2831                kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS,
2832                                  env->msr_global_status);
2833                kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
2834                                  env->msr_global_ovf_ctrl);
2835
2836                /* Now start the PMU.  */
2837                kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL,
2838                                  env->msr_fixed_ctr_ctrl);
2839                kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL,
2840                                  env->msr_global_ctrl);
2841            }
2842        }
2843        /*
2844         * Hyper-V partition-wide MSRs: to avoid clearing them on cpu hot-add,
2845         * only sync them to KVM on the first cpu
2846         */
2847        if (current_cpu == first_cpu) {
2848            if (has_msr_hv_hypercall) {
2849                kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID,
2850                                  env->msr_hv_guest_os_id);
2851                kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL,
2852                                  env->msr_hv_hypercall);
2853            }
2854            if (hyperv_feat_enabled(cpu, HYPERV_FEAT_TIME)) {
2855                kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC,
2856                                  env->msr_hv_tsc);
2857            }
2858            if (hyperv_feat_enabled(cpu, HYPERV_FEAT_REENLIGHTENMENT)) {
2859                kvm_msr_entry_add(cpu, HV_X64_MSR_REENLIGHTENMENT_CONTROL,
2860                                  env->msr_hv_reenlightenment_control);
2861                kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_CONTROL,
2862                                  env->msr_hv_tsc_emulation_control);
2863                kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS,
2864                                  env->msr_hv_tsc_emulation_status);
2865            }
2866        }
2867        if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC)) {
2868            kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE,
2869                              env->msr_hv_vapic);
2870        }
2871        if (has_msr_hv_crash) {
2872            int j;
2873
2874            for (j = 0; j < HV_CRASH_PARAMS; j++)
2875                kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j,
2876                                  env->msr_hv_crash_params[j]);
2877
2878            kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_NOTIFY);
2879        }
2880        if (has_msr_hv_runtime) {
2881            kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, env->msr_hv_runtime);
2882        }
2883        if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX)
2884            && hv_vpindex_settable) {
2885            kvm_msr_entry_add(cpu, HV_X64_MSR_VP_INDEX,
2886                              hyperv_vp_index(CPU(cpu)));
2887        }
2888        if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
2889            int j;
2890
2891            kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION, HV_SYNIC_VERSION);
2892
2893            kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL,
2894                              env->msr_hv_synic_control);
2895            kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP,
2896                              env->msr_hv_synic_evt_page);
2897            kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP,
2898                              env->msr_hv_synic_msg_page);
2899
2900            for (j = 0; j < ARRAY_SIZE(env->msr_hv_synic_sint); j++) {
2901                kvm_msr_entry_add(cpu, HV_X64_MSR_SINT0 + j,
2902                                  env->msr_hv_synic_sint[j]);
2903            }
2904        }
2905        if (has_msr_hv_stimer) {
2906            int j;
2907
2908            for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_config); j++) {
2909                kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_CONFIG + j * 2,
2910                                env->msr_hv_stimer_config[j]);
2911            }
2912
2913            for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_count); j++) {
2914                kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_COUNT + j * 2,
2915                                env->msr_hv_stimer_count[j]);
2916            }
2917        }
2918        if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
2919            uint64_t phys_mask = MAKE_64BIT_MASK(0, cpu->phys_bits);
2920
2921            kvm_msr_entry_add(cpu, MSR_MTRRdefType, env->mtrr_deftype);
2922            kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, env->mtrr_fixed[0]);
2923            kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, env->mtrr_fixed[1]);
2924            kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, env->mtrr_fixed[2]);
2925            kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, env->mtrr_fixed[3]);
2926            kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, env->mtrr_fixed[4]);
2927            kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, env->mtrr_fixed[5]);
2928            kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, env->mtrr_fixed[6]);
2929            kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, env->mtrr_fixed[7]);
2930            kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, env->mtrr_fixed[8]);
2931            kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, env->mtrr_fixed[9]);
2932            kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, env->mtrr_fixed[10]);
2933            for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
2934                /* The CPU GPs if we write to a bit above the physical limit of
2935                 * the host CPU (and KVM emulates that)
2936                 */
2937                uint64_t mask = env->mtrr_var[i].mask;
2938                mask &= phys_mask;
2939
2940                kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i),
2941                                  env->mtrr_var[i].base);
2942                kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), mask);
2943            }
2944        }
2945        if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) {
2946            int addr_num = kvm_arch_get_supported_cpuid(kvm_state,
2947                                                    0x14, 1, R_EAX) & 0x7;
2948
2949            kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CTL,
2950                            env->msr_rtit_ctrl);
2951            kvm_msr_entry_add(cpu, MSR_IA32_RTIT_STATUS,
2952                            env->msr_rtit_status);
2953            kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_BASE,
2954                            env->msr_rtit_output_base);
2955            kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_MASK,
2956                            env->msr_rtit_output_mask);
2957            kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CR3_MATCH,
2958                            env->msr_rtit_cr3_match);
2959            for (i = 0; i < addr_num; i++) {
2960                kvm_msr_entry_add(cpu, MSR_IA32_RTIT_ADDR0_A + i,
2961                            env->msr_rtit_addrs[i]);
2962            }
2963        }
2964
2965        /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see
2966         *       kvm_put_msr_feature_control. */
2967    }
2968
2969    if (env->mcg_cap) {
2970        int i;
2971
2972        kvm_msr_entry_add(cpu, MSR_MCG_STATUS, env->mcg_status);
2973        kvm_msr_entry_add(cpu, MSR_MCG_CTL, env->mcg_ctl);
2974        if (has_msr_mcg_ext_ctl) {
2975            kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, env->mcg_ext_ctl);
2976        }
2977        for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
2978            kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, env->mce_banks[i]);
2979        }
2980    }
2981
2982    return kvm_buf_set_msrs(cpu);
2983}
2984
2985
2986static int kvm_get_fpu(X86CPU *cpu)
2987{
2988    CPUX86State *env = &cpu->env;
2989    struct kvm_fpu fpu;
2990    int i, ret;
2991
2992    ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_FPU, &fpu);
2993    if (ret < 0) {
2994        return ret;
2995    }
2996
2997    env->fpstt = (fpu.fsw >> 11) & 7;
2998    env->fpus = fpu.fsw;
2999    env->fpuc = fpu.fcw;
3000    env->fpop = fpu.last_opcode;
3001    env->fpip = fpu.last_ip;
3002    env->fpdp = fpu.last_dp;
3003    for (i = 0; i < 8; ++i) {
3004        env->fptags[i] = !((fpu.ftwx >> i) & 1);
3005    }
3006    memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
3007    for (i = 0; i < CPU_NB_REGS; i++) {
3008        env->xmm_regs[i].ZMM_Q(0) = ldq_p(&fpu.xmm[i][0]);
3009        env->xmm_regs[i].ZMM_Q(1) = ldq_p(&fpu.xmm[i][8]);
3010    }
3011    env->mxcsr = fpu.mxcsr;
3012
3013    return 0;
3014}
3015
3016static int kvm_get_xsave(X86CPU *cpu)
3017{
3018    CPUX86State *env = &cpu->env;
3019    X86XSaveArea *xsave = env->xsave_buf;
3020    int ret;
3021
3022    if (!has_xsave) {
3023        return kvm_get_fpu(cpu);
3024    }
3025
3026    ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XSAVE, xsave);
3027    if (ret < 0) {
3028        return ret;
3029    }
3030    x86_cpu_xrstor_all_areas(cpu, xsave);
3031
3032    return 0;
3033}
3034
3035static int kvm_get_xcrs(X86CPU *cpu)
3036{
3037    CPUX86State *env = &cpu->env;
3038    int i, ret;
3039    struct kvm_xcrs xcrs;
3040
3041    if (!has_xcrs) {
3042        return 0;
3043    }
3044
3045    ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XCRS, &xcrs);
3046    if (ret < 0) {
3047        return ret;
3048    }
3049
3050    for (i = 0; i < xcrs.nr_xcrs; i++) {
3051        /* Only support xcr0 now */
3052        if (xcrs.xcrs[i].xcr == 0) {
3053            env->xcr0 = xcrs.xcrs[i].value;
3054            break;
3055        }
3056    }
3057    return 0;
3058}
3059
3060static int kvm_get_sregs(X86CPU *cpu)
3061{
3062    CPUX86State *env = &cpu->env;
3063    struct kvm_sregs sregs;
3064    int bit, i, ret;
3065
3066    ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
3067    if (ret < 0) {
3068        return ret;
3069    }
3070
3071    /* There can only be one pending IRQ set in the bitmap at a time, so try
3072       to find it and save its number instead (-1 for none). */
3073    env->interrupt_injected = -1;
3074    for (i = 0; i < ARRAY_SIZE(sregs.interrupt_bitmap); i++) {
3075        if (sregs.interrupt_bitmap[i]) {
3076            bit = ctz64(sregs.interrupt_bitmap[i]);
3077            env->interrupt_injected = i * 64 + bit;
3078            break;
3079        }
3080    }
3081
3082    get_seg(&env->segs[R_CS], &sregs.cs);
3083    get_seg(&env->segs[R_DS], &sregs.ds);
3084    get_seg(&env->segs[R_ES], &sregs.es);
3085    get_seg(&env->segs[R_FS], &sregs.fs);
3086    get_seg(&env->segs[R_GS], &sregs.gs);
3087    get_seg(&env->segs[R_SS], &sregs.ss);
3088
3089    get_seg(&env->tr, &sregs.tr);
3090    get_seg(&env->ldt, &sregs.ldt);
3091
3092    env->idt.limit = sregs.idt.limit;
3093    env->idt.base = sregs.idt.base;
3094    env->gdt.limit = sregs.gdt.limit;
3095    env->gdt.base = sregs.gdt.base;
3096
3097    env->cr[0] = sregs.cr0;
3098    env->cr[2] = sregs.cr2;
3099    env->cr[3] = sregs.cr3;
3100    env->cr[4] = sregs.cr4;
3101
3102    env->efer = sregs.efer;
3103
3104    /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */
3105    x86_update_hflags(env);
3106
3107    return 0;
3108}
3109
3110static int kvm_get_msrs(X86CPU *cpu)
3111{
3112    CPUX86State *env = &cpu->env;
3113    struct kvm_msr_entry *msrs = cpu->kvm_msr_buf->entries;
3114    int ret, i;
3115    uint64_t mtrr_top_bits;
3116
3117    kvm_msr_buf_reset(cpu);
3118
3119    kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, 0);
3120    kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, 0);
3121    kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, 0);
3122    kvm_msr_entry_add(cpu, MSR_PAT, 0);
3123    if (has_msr_star) {
3124        kvm_msr_entry_add(cpu, MSR_STAR, 0);
3125    }
3126    if (has_msr_hsave_pa) {
3127        kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, 0);
3128    }
3129    if (has_msr_tsc_aux) {
3130        kvm_msr_entry_add(cpu, MSR_TSC_AUX, 0);
3131    }
3132    if (has_msr_tsc_adjust) {
3133        kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, 0);
3134    }
3135    if (has_msr_tsc_deadline) {
3136        kvm_msr_entry_add(cpu, MSR_IA32_TSCDEADLINE, 0);
3137    }
3138    if (has_msr_misc_enable) {
3139        kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE, 0);
3140    }
3141    if (has_msr_smbase) {
3142        kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, 0);
3143    }
3144    if (has_msr_smi_count) {
3145        kvm_msr_entry_add(cpu, MSR_SMI_COUNT, 0);
3146    }
3147    if (has_msr_feature_control) {
3148        kvm_msr_entry_add(cpu, MSR_IA32_FEATURE_CONTROL, 0);
3149    }
3150    if (has_msr_bndcfgs) {
3151        kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, 0);
3152    }
3153    if (has_msr_xss) {
3154        kvm_msr_entry_add(cpu, MSR_IA32_XSS, 0);
3155    }
3156    if (has_msr_umwait) {
3157        kvm_msr_entry_add(cpu, MSR_IA32_UMWAIT_CONTROL, 0);
3158    }
3159    if (has_msr_spec_ctrl) {
3160        kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, 0);
3161    }
3162    if (has_msr_tsx_ctrl) {
3163        kvm_msr_entry_add(cpu, MSR_IA32_TSX_CTRL, 0);
3164    }
3165    if (has_msr_virt_ssbd) {
3166        kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, 0);
3167    }
3168    if (!env->tsc_valid) {
3169        kvm_msr_entry_add(cpu, MSR_IA32_TSC, 0);
3170        env->tsc_valid = !runstate_is_running();
3171    }
3172
3173#ifdef TARGET_X86_64
3174    if (lm_capable_kernel) {
3175        kvm_msr_entry_add(cpu, MSR_CSTAR, 0);
3176        kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, 0);
3177        kvm_msr_entry_add(cpu, MSR_FMASK, 0);
3178        kvm_msr_entry_add(cpu, MSR_LSTAR, 0);
3179    }
3180#endif
3181    kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, 0);
3182    kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, 0);
3183    if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
3184        kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, 0);
3185    }
3186    if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
3187        kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, 0);
3188    }
3189    if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
3190        kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, 0);
3191    }
3192    if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_POLL_CONTROL)) {
3193        kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, 1);
3194    }
3195    if (has_architectural_pmu_version > 0) {
3196        if (has_architectural_pmu_version > 1) {
3197            kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
3198            kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
3199            kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 0);
3200            kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0);
3201        }
3202        for (i = 0; i < num_architectural_pmu_fixed_counters; i++) {
3203            kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 0);
3204        }
3205        for (i = 0; i < num_architectural_pmu_gp_counters; i++) {
3206            kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, 0);
3207            kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 0);
3208        }
3209    }
3210
3211    if (env->mcg_cap) {
3212        kvm_msr_entry_add(cpu, MSR_MCG_STATUS, 0);
3213        kvm_msr_entry_add(cpu, MSR_MCG_CTL, 0);
3214        if (has_msr_mcg_ext_ctl) {
3215            kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, 0);
3216        }
3217        for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
3218            kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, 0);
3219        }
3220    }
3221
3222    if (has_msr_hv_hypercall) {
3223        kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, 0);
3224        kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, 0);
3225    }
3226    if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC)) {
3227        kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, 0);
3228    }
3229    if (hyperv_feat_enabled(cpu, HYPERV_FEAT_TIME)) {
3230        kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, 0);
3231    }
3232    if (hyperv_feat_enabled(cpu, HYPERV_FEAT_REENLIGHTENMENT)) {
3233        kvm_msr_entry_add(cpu, HV_X64_MSR_REENLIGHTENMENT_CONTROL, 0);
3234        kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_CONTROL, 0);
3235        kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS, 0);
3236    }
3237    if (has_msr_hv_crash) {
3238        int j;
3239
3240        for (j = 0; j < HV_CRASH_PARAMS; j++) {
3241            kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j, 0);
3242        }
3243    }
3244    if (has_msr_hv_runtime) {
3245        kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, 0);
3246    }
3247    if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
3248        uint32_t msr;
3249
3250        kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL, 0);
3251        kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP, 0);
3252        kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP, 0);
3253        for (msr = HV_X64_MSR_SINT0; msr <= HV_X64_MSR_SINT15; msr++) {
3254            kvm_msr_entry_add(cpu, msr, 0);
3255        }
3256    }
3257    if (has_msr_hv_stimer) {
3258        uint32_t msr;
3259
3260        for (msr = HV_X64_MSR_STIMER0_CONFIG; msr <= HV_X64_MSR_STIMER3_COUNT;
3261             msr++) {
3262            kvm_msr_entry_add(cpu, msr, 0);
3263        }
3264    }
3265    if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
3266        kvm_msr_entry_add(cpu, MSR_MTRRdefType, 0);
3267        kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, 0);
3268        kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, 0);
3269        kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, 0);
3270        kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, 0);
3271        kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, 0);
3272        kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, 0);
3273        kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, 0);
3274        kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, 0);
3275        kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, 0);
3276        kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, 0);
3277        kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, 0);
3278        for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
3279            kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i), 0);
3280            kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), 0);
3281        }
3282    }
3283
3284    if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) {
3285        int addr_num =
3286            kvm_arch_get_supported_cpuid(kvm_state, 0x14, 1, R_EAX) & 0x7;
3287
3288        kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CTL, 0);
3289        kvm_msr_entry_add(cpu, MSR_IA32_RTIT_STATUS, 0);
3290        kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_BASE, 0);
3291        kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_MASK, 0);
3292        kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CR3_MATCH, 0);
3293        for (i = 0; i < addr_num; i++) {
3294            kvm_msr_entry_add(cpu, MSR_IA32_RTIT_ADDR0_A + i, 0);
3295        }
3296    }
3297
3298    ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf);
3299    if (ret < 0) {
3300        return ret;
3301    }
3302
3303    if (ret < cpu->kvm_msr_buf->nmsrs) {
3304        struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret];
3305        error_report("error: failed to get MSR 0x%" PRIx32,
3306                     (uint32_t)e->index);
3307    }
3308
3309    assert(ret == cpu->kvm_msr_buf->nmsrs);
3310    /*
3311     * MTRR masks: Each mask consists of 5 parts
3312     * a  10..0: must be zero
3313     * b  11   : valid bit
3314     * c n-1.12: actual mask bits
3315     * d  51..n: reserved must be zero
3316     * e  63.52: reserved must be zero
3317     *
3318     * 'n' is the number of physical bits supported by the CPU and is
3319     * apparently always <= 52.   We know our 'n' but don't know what
3320     * the destinations 'n' is; it might be smaller, in which case
3321     * it masks (c) on loading. It might be larger, in which case
3322     * we fill 'd' so that d..c is consistent irrespetive of the 'n'
3323     * we're migrating to.
3324     */
3325
3326    if (cpu->fill_mtrr_mask) {
3327        QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 52);
3328        assert(cpu->phys_bits <= TARGET_PHYS_ADDR_SPACE_BITS);
3329        mtrr_top_bits = MAKE_64BIT_MASK(cpu->phys_bits, 52 - cpu->phys_bits);
3330    } else {
3331        mtrr_top_bits = 0;
3332    }
3333
3334    for (i = 0; i < ret; i++) {
3335        uint32_t index = msrs[i].index;
3336        switch (index) {
3337        case MSR_IA32_SYSENTER_CS:
3338            env->sysenter_cs = msrs[i].data;
3339            break;
3340        case MSR_IA32_SYSENTER_ESP:
3341            env->sysenter_esp = msrs[i].data;
3342            break;
3343        case MSR_IA32_SYSENTER_EIP:
3344            env->sysenter_eip = msrs[i].data;
3345            break;
3346        case MSR_PAT:
3347            env->pat = msrs[i].data;
3348            break;
3349        case MSR_STAR:
3350            env->star = msrs[i].data;
3351            break;
3352#ifdef TARGET_X86_64
3353        case MSR_CSTAR:
3354            env->cstar = msrs[i].data;
3355            break;
3356        case MSR_KERNELGSBASE:
3357            env->kernelgsbase = msrs[i].data;
3358            break;
3359        case MSR_FMASK:
3360            env->fmask = msrs[i].data;
3361            break;
3362        case MSR_LSTAR:
3363            env->lstar = msrs[i].data;
3364            break;
3365#endif
3366        case MSR_IA32_TSC:
3367            env->tsc = msrs[i].data;
3368            break;
3369        case MSR_TSC_AUX:
3370            env->tsc_aux = msrs[i].data;
3371            break;
3372        case MSR_TSC_ADJUST:
3373            env->tsc_adjust = msrs[i].data;
3374            break;
3375        case MSR_IA32_TSCDEADLINE:
3376            env->tsc_deadline = msrs[i].data;
3377            break;
3378        case MSR_VM_HSAVE_PA:
3379            env->vm_hsave = msrs[i].data;
3380            break;
3381        case MSR_KVM_SYSTEM_TIME:
3382            env->system_time_msr = msrs[i].data;
3383            break;
3384        case MSR_KVM_WALL_CLOCK:
3385            env->wall_clock_msr = msrs[i].data;
3386            break;
3387        case MSR_MCG_STATUS:
3388            env->mcg_status = msrs[i].data;
3389            break;
3390        case MSR_MCG_CTL:
3391            env->mcg_ctl = msrs[i].data;
3392            break;
3393        case MSR_MCG_EXT_CTL:
3394            env->mcg_ext_ctl = msrs[i].data;
3395            break;
3396        case MSR_IA32_MISC_ENABLE:
3397            env->msr_ia32_misc_enable = msrs[i].data;
3398            break;
3399        case MSR_IA32_SMBASE:
3400            env->smbase = msrs[i].data;
3401            break;
3402        case MSR_SMI_COUNT:
3403            env->msr_smi_count = msrs[i].data;
3404            break;
3405        case MSR_IA32_FEATURE_CONTROL:
3406            env->msr_ia32_feature_control = msrs[i].data;
3407            break;
3408        case MSR_IA32_BNDCFGS:
3409            env->msr_bndcfgs = msrs[i].data;
3410            break;
3411        case MSR_IA32_XSS:
3412            env->xss = msrs[i].data;
3413            break;
3414        case MSR_IA32_UMWAIT_CONTROL:
3415            env->umwait = msrs[i].data;
3416            break;
3417        default:
3418            if (msrs[i].index >= MSR_MC0_CTL &&
3419                msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) {
3420                env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data;
3421            }
3422            break;
3423        case MSR_KVM_ASYNC_PF_EN:
3424            env->async_pf_en_msr = msrs[i].data;
3425            break;
3426        case MSR_KVM_PV_EOI_EN:
3427            env->pv_eoi_en_msr = msrs[i].data;
3428            break;
3429        case MSR_KVM_STEAL_TIME:
3430            env->steal_time_msr = msrs[i].data;
3431            break;
3432        case MSR_KVM_POLL_CONTROL: {
3433            env->poll_control_msr = msrs[i].data;
3434            break;
3435        }
3436        case MSR_CORE_PERF_FIXED_CTR_CTRL:
3437            env->msr_fixed_ctr_ctrl = msrs[i].data;
3438            break;
3439        case MSR_CORE_PERF_GLOBAL_CTRL:
3440            env->msr_global_ctrl = msrs[i].data;
3441            break;
3442        case MSR_CORE_PERF_GLOBAL_STATUS:
3443            env->msr_global_status = msrs[i].data;
3444            break;
3445        case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
3446            env->msr_global_ovf_ctrl = msrs[i].data;
3447            break;
3448        case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR0 + MAX_FIXED_COUNTERS - 1:
3449            env->msr_fixed_counters[index - MSR_CORE_PERF_FIXED_CTR0] = msrs[i].data;
3450            break;
3451        case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR0 + MAX_GP_COUNTERS - 1:
3452            env->msr_gp_counters[index - MSR_P6_PERFCTR0] = msrs[i].data;
3453            break;
3454        case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL0 + MAX_GP_COUNTERS - 1:
3455            env->msr_gp_evtsel[index - MSR_P6_EVNTSEL0] = msrs[i].data;
3456            break;
3457        case HV_X64_MSR_HYPERCALL:
3458            env->msr_hv_hypercall = msrs[i].data;
3459            break;
3460        case HV_X64_MSR_GUEST_OS_ID:
3461            env->msr_hv_guest_os_id = msrs[i].data;
3462            break;
3463        case HV_X64_MSR_APIC_ASSIST_PAGE:
3464            env->msr_hv_vapic = msrs[i].data;
3465            break;
3466        case HV_X64_MSR_REFERENCE_TSC:
3467            env->msr_hv_tsc = msrs[i].data;
3468            break;
3469        case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
3470            env->msr_hv_crash_params[index - HV_X64_MSR_CRASH_P0] = msrs[i].data;
3471            break;
3472        case HV_X64_MSR_VP_RUNTIME:
3473            env->msr_hv_runtime = msrs[i].data;
3474            break;
3475        case HV_X64_MSR_SCONTROL:
3476            env->msr_hv_synic_control = msrs[i].data;
3477            break;
3478        case HV_X64_MSR_SIEFP:
3479            env->msr_hv_synic_evt_page = msrs[i].data;
3480            break;
3481        case HV_X64_MSR_SIMP:
3482            env->msr_hv_synic_msg_page = msrs[i].data;
3483            break;
3484        case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
3485            env->msr_hv_synic_sint[index - HV_X64_MSR_SINT0] = msrs[i].data;
3486            break;
3487        case HV_X64_MSR_STIMER0_CONFIG:
3488        case HV_X64_MSR_STIMER1_CONFIG:
3489        case HV_X64_MSR_STIMER2_CONFIG:
3490        case HV_X64_MSR_STIMER3_CONFIG:
3491            env->msr_hv_stimer_config[(index - HV_X64_MSR_STIMER0_CONFIG)/2] =
3492                                msrs[i].data;
3493            break;
3494        case HV_X64_MSR_STIMER0_COUNT:
3495        case HV_X64_MSR_STIMER1_COUNT:
3496        case HV_X64_MSR_STIMER2_COUNT:
3497        case HV_X64_MSR_STIMER3_COUNT:
3498            env->msr_hv_stimer_count[(index - HV_X64_MSR_STIMER0_COUNT)/2] =
3499                                msrs[i].data;
3500            break;
3501        case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
3502            env->msr_hv_reenlightenment_control = msrs[i].data;
3503            break;
3504        case HV_X64_MSR_TSC_EMULATION_CONTROL:
3505            env->msr_hv_tsc_emulation_control = msrs[i].data;
3506            break;
3507        case HV_X64_MSR_TSC_EMULATION_STATUS:
3508            env->msr_hv_tsc_emulation_status = msrs[i].data;
3509            break;
3510        case MSR_MTRRdefType:
3511            env->mtrr_deftype = msrs[i].data;
3512            break;
3513        case MSR_MTRRfix64K_00000:
3514            env->mtrr_fixed[0] = msrs[i].data;
3515            break;
3516        case MSR_MTRRfix16K_80000:
3517            env->mtrr_fixed[1] = msrs[i].data;
3518            break;
3519        case MSR_MTRRfix16K_A0000:
3520            env->mtrr_fixed[2] = msrs[i].data;
3521            break;
3522        case MSR_MTRRfix4K_C0000:
3523            env->mtrr_fixed[3] = msrs[i].data;
3524            break;
3525        case MSR_MTRRfix4K_C8000:
3526            env->mtrr_fixed[4] = msrs[i].data;
3527            break;
3528        case MSR_MTRRfix4K_D0000:
3529            env->mtrr_fixed[5] = msrs[i].data;
3530            break;
3531        case MSR_MTRRfix4K_D8000:
3532            env->mtrr_fixed[6] = msrs[i].data;
3533            break;
3534        case MSR_MTRRfix4K_E0000:
3535            env->mtrr_fixed[7] = msrs[i].data;
3536            break;
3537        case MSR_MTRRfix4K_E8000:
3538            env->mtrr_fixed[8] = msrs[i].data;
3539            break;
3540        case MSR_MTRRfix4K_F0000:
3541            env->mtrr_fixed[9] = msrs[i].data;
3542            break;
3543        case MSR_MTRRfix4K_F8000:
3544            env->mtrr_fixed[10] = msrs[i].data;
3545            break;
3546        case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT - 1):
3547            if (index & 1) {
3548                env->mtrr_var[MSR_MTRRphysIndex(index)].mask = msrs[i].data |
3549                                                               mtrr_top_bits;
3550            } else {
3551                env->mtrr_var[MSR_MTRRphysIndex(index)].base = msrs[i].data;
3552            }
3553            break;
3554        case MSR_IA32_SPEC_CTRL:
3555            env->spec_ctrl = msrs[i].data;
3556            break;
3557        case MSR_IA32_TSX_CTRL:
3558            env->tsx_ctrl = msrs[i].data;
3559            break;
3560        case MSR_VIRT_SSBD:
3561            env->virt_ssbd = msrs[i].data;
3562            break;
3563        case MSR_IA32_RTIT_CTL:
3564            env->msr_rtit_ctrl = msrs[i].data;
3565            break;
3566        case MSR_IA32_RTIT_STATUS:
3567            env->msr_rtit_status = msrs[i].data;
3568            break;
3569        case MSR_IA32_RTIT_OUTPUT_BASE:
3570            env->msr_rtit_output_base = msrs[i].data;
3571            break;
3572        case MSR_IA32_RTIT_OUTPUT_MASK:
3573            env->msr_rtit_output_mask = msrs[i].data;
3574            break;
3575        case MSR_IA32_RTIT_CR3_MATCH:
3576            env->msr_rtit_cr3_match = msrs[i].data;
3577            break;
3578        case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
3579            env->msr_rtit_addrs[index - MSR_IA32_RTIT_ADDR0_A] = msrs[i].data;
3580            break;
3581        }
3582    }
3583
3584    return 0;
3585}
3586
3587static int kvm_put_mp_state(X86CPU *cpu)
3588{
3589    struct kvm_mp_state mp_state = { .mp_state = cpu->env.mp_state };
3590
3591    return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
3592}
3593
3594static int kvm_get_mp_state(X86CPU *cpu)
3595{
3596    CPUState *cs = CPU(cpu);
3597    CPUX86State *env = &cpu->env;
3598    struct kvm_mp_state mp_state;
3599    int ret;
3600
3601    ret = kvm_vcpu_ioctl(cs, KVM_GET_MP_STATE, &mp_state);
3602    if (ret < 0) {
3603        return ret;
3604    }
3605    env->mp_state = mp_state.mp_state;
3606    if (kvm_irqchip_in_kernel()) {
3607        cs->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED);
3608    }
3609    return 0;
3610}
3611
3612static int kvm_get_apic(X86CPU *cpu)
3613{
3614    DeviceState *apic = cpu->apic_state;
3615    struct kvm_lapic_state kapic;
3616    int ret;
3617
3618    if (apic && kvm_irqchip_in_kernel()) {
3619        ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_LAPIC, &kapic);
3620        if (ret < 0) {
3621            return ret;
3622        }
3623
3624        kvm_get_apic_state(apic, &kapic);
3625    }
3626    return 0;
3627}
3628
3629static int kvm_put_vcpu_events(X86CPU *cpu, int level)
3630{
3631    CPUState *cs = CPU(cpu);
3632    CPUX86State *env = &cpu->env;
3633    struct kvm_vcpu_events events = {};
3634
3635    if (!kvm_has_vcpu_events()) {
3636        return 0;
3637    }
3638
3639    events.flags = 0;
3640
3641    if (has_exception_payload) {
3642        events.flags |= KVM_VCPUEVENT_VALID_PAYLOAD;
3643        events.exception.pending = env->exception_pending;
3644        events.exception_has_payload = env->exception_has_payload;
3645        events.exception_payload = env->exception_payload;
3646    }
3647    events.exception.nr = env->exception_nr;
3648    events.exception.injected = env->exception_injected;
3649    events.exception.has_error_code = env->has_error_code;
3650    events.exception.error_code = env->error_code;
3651
3652    events.interrupt.injected = (env->interrupt_injected >= 0);
3653    events.interrupt.nr = env->interrupt_injected;
3654    events.interrupt.soft = env->soft_interrupt;
3655
3656    events.nmi.injected = env->nmi_injected;
3657    events.nmi.pending = env->nmi_pending;
3658    events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK);
3659
3660    events.sipi_vector = env->sipi_vector;
3661
3662    if (has_msr_smbase) {
3663        events.smi.smm = !!(env->hflags & HF_SMM_MASK);
3664        events.smi.smm_inside_nmi = !!(env->hflags2 & HF2_SMM_INSIDE_NMI_MASK);
3665        if (kvm_irqchip_in_kernel()) {
3666            /* As soon as these are moved to the kernel, remove them
3667             * from cs->interrupt_request.
3668             */
3669            events.smi.pending = cs->interrupt_request & CPU_INTERRUPT_SMI;
3670            events.smi.latched_init = cs->interrupt_request & CPU_INTERRUPT_INIT;
3671            cs->interrupt_request &= ~(CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI);
3672        } else {
3673            /* Keep these in cs->interrupt_request.  */
3674            events.smi.pending = 0;
3675            events.smi.latched_init = 0;
3676        }
3677        /* Stop SMI delivery on old machine types to avoid a reboot
3678         * on an inward migration of an old VM.
3679         */
3680        if (!cpu->kvm_no_smi_migration) {
3681            events.flags |= KVM_VCPUEVENT_VALID_SMM;
3682        }
3683    }
3684
3685    if (level >= KVM_PUT_RESET_STATE) {
3686        events.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING;
3687        if (env->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
3688            events.flags |= KVM_VCPUEVENT_VALID_SIPI_VECTOR;
3689        }
3690    }
3691
3692    return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
3693}
3694
3695static int kvm_get_vcpu_events(X86CPU *cpu)
3696{
3697    CPUX86State *env = &cpu->env;
3698    struct kvm_vcpu_events events;
3699    int ret;
3700
3701    if (!kvm_has_vcpu_events()) {
3702        return 0;
3703    }
3704
3705    memset(&events, 0, sizeof(events));
3706    ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
3707    if (ret < 0) {
3708       return ret;
3709    }
3710
3711    if (events.flags & KVM_VCPUEVENT_VALID_PAYLOAD) {
3712        env->exception_pending = events.exception.pending;
3713        env->exception_has_payload = events.exception_has_payload;
3714        env->exception_payload = events.exception_payload;
3715    } else {
3716        env->exception_pending = 0;
3717        env->exception_has_payload = false;
3718    }
3719    env->exception_injected = events.exception.injected;
3720    env->exception_nr =
3721        (env->exception_pending || env->exception_injected) ?
3722        events.exception.nr : -1;
3723    env->has_error_code = events.exception.has_error_code;
3724    env->error_code = events.exception.error_code;
3725
3726    env->interrupt_injected =
3727        events.interrupt.injected ? events.interrupt.nr : -1;
3728    env->soft_interrupt = events.interrupt.soft;
3729
3730    env->nmi_injected = events.nmi.injected;
3731    env->nmi_pending = events.nmi.pending;
3732    if (events.nmi.masked) {
3733        env->hflags2 |= HF2_NMI_MASK;
3734    } else {
3735        env->hflags2 &= ~HF2_NMI_MASK;
3736    }
3737
3738    if (events.flags & KVM_VCPUEVENT_VALID_SMM) {
3739        if (events.smi.smm) {
3740            env->hflags |= HF_SMM_MASK;
3741        } else {
3742            env->hflags &= ~HF_SMM_MASK;
3743        }
3744        if (events.smi.pending) {
3745            cpu_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
3746        } else {
3747            cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
3748        }
3749        if (events.smi.smm_inside_nmi) {
3750            env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK;
3751        } else {
3752            env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK;
3753        }
3754        if (events.smi.latched_init) {
3755            cpu_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
3756        } else {
3757            cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
3758        }
3759    }
3760
3761    env->sipi_vector = events.sipi_vector;
3762
3763    return 0;
3764}
3765
3766static int kvm_guest_debug_workarounds(X86CPU *cpu)
3767{
3768    CPUState *cs = CPU(cpu);
3769    CPUX86State *env = &cpu->env;
3770    int ret = 0;
3771    unsigned long reinject_trap = 0;
3772
3773    if (!kvm_has_vcpu_events()) {
3774        if (env->exception_nr == EXCP01_DB) {
3775            reinject_trap = KVM_GUESTDBG_INJECT_DB;
3776        } else if (env->exception_injected == EXCP03_INT3) {
3777            reinject_trap = KVM_GUESTDBG_INJECT_BP;
3778        }
3779        kvm_reset_exception(env);
3780    }
3781
3782    /*
3783     * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
3784     * injected via SET_GUEST_DEBUG while updating GP regs. Work around this
3785     * by updating the debug state once again if single-stepping is on.
3786     * Another reason to call kvm_update_guest_debug here is a pending debug
3787     * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
3788     * reinject them via SET_GUEST_DEBUG.
3789     */
3790    if (reinject_trap ||
3791        (!kvm_has_robust_singlestep() && cs->singlestep_enabled)) {
3792        ret = kvm_update_guest_debug(cs, reinject_trap);
3793    }
3794    return ret;
3795}
3796
3797static int kvm_put_debugregs(X86CPU *cpu)
3798{
3799    CPUX86State *env = &cpu->env;
3800    struct kvm_debugregs dbgregs;
3801    int i;
3802
3803    if (!kvm_has_debugregs()) {
3804        return 0;
3805    }
3806
3807    memset(&dbgregs, 0, sizeof(dbgregs));
3808    for (i = 0; i < 4; i++) {
3809        dbgregs.db[i] = env->dr[i];
3810    }
3811    dbgregs.dr6 = env->dr[6];
3812    dbgregs.dr7 = env->dr[7];
3813    dbgregs.flags = 0;
3814
3815    return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_DEBUGREGS, &dbgregs);
3816}
3817
3818static int kvm_get_debugregs(X86CPU *cpu)
3819{
3820    CPUX86State *env = &cpu->env;
3821    struct kvm_debugregs dbgregs;
3822    int i, ret;
3823
3824    if (!kvm_has_debugregs()) {
3825        return 0;
3826    }
3827
3828    ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_DEBUGREGS, &dbgregs);
3829    if (ret < 0) {
3830        return ret;
3831    }
3832    for (i = 0; i < 4; i++) {
3833        env->dr[i] = dbgregs.db[i];
3834    }
3835    env->dr[4] = env->dr[6] = dbgregs.dr6;
3836    env->dr[5] = env->dr[7] = dbgregs.dr7;
3837
3838    return 0;
3839}
3840
3841static int kvm_put_nested_state(X86CPU *cpu)
3842{
3843    CPUX86State *env = &cpu->env;
3844    int max_nested_state_len = kvm_max_nested_state_length();
3845
3846    if (!env->nested_state) {
3847        return 0;
3848    }
3849
3850    assert(env->nested_state->size <= max_nested_state_len);
3851    return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_NESTED_STATE, env->nested_state);
3852}
3853
3854static int kvm_get_nested_state(X86CPU *cpu)
3855{
3856    CPUX86State *env = &cpu->env;
3857    int max_nested_state_len = kvm_max_nested_state_length();
3858    int ret;
3859
3860    if (!env->nested_state) {
3861        return 0;
3862    }
3863
3864    /*
3865     * It is possible that migration restored a smaller size into
3866     * nested_state->hdr.size than what our kernel support.
3867     * We preserve migration origin nested_state->hdr.size for
3868     * call to KVM_SET_NESTED_STATE but wish that our next call
3869     * to KVM_GET_NESTED_STATE will use max size our kernel support.
3870     */
3871    env->nested_state->size = max_nested_state_len;
3872
3873    ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_NESTED_STATE, env->nested_state);
3874    if (ret < 0) {
3875        return ret;
3876    }
3877
3878    if (env->nested_state->flags & KVM_STATE_NESTED_GUEST_MODE) {
3879        env->hflags |= HF_GUEST_MASK;
3880    } else {
3881        env->hflags &= ~HF_GUEST_MASK;
3882    }
3883
3884    return ret;
3885}
3886
3887int kvm_arch_put_registers(CPUState *cpu, int level)
3888{
3889    X86CPU *x86_cpu = X86_CPU(cpu);
3890    int ret;
3891
3892    assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
3893
3894    if (level >= KVM_PUT_RESET_STATE) {
3895        ret = kvm_put_nested_state(x86_cpu);
3896        if (ret < 0) {
3897            return ret;
3898        }
3899
3900        ret = kvm_put_msr_feature_control(x86_cpu);
3901        if (ret < 0) {
3902            return ret;
3903        }
3904    }
3905
3906    if (level == KVM_PUT_FULL_STATE) {
3907        /* We don't check for kvm_arch_set_tsc_khz() errors here,
3908         * because TSC frequency mismatch shouldn't abort migration,
3909         * unless the user explicitly asked for a more strict TSC
3910         * setting (e.g. using an explicit "tsc-freq" option).
3911         */
3912        kvm_arch_set_tsc_khz(cpu);
3913    }
3914
3915    ret = kvm_getput_regs(x86_cpu, 1);
3916    if (ret < 0) {
3917        return ret;
3918    }
3919    ret = kvm_put_xsave(x86_cpu);
3920    if (ret < 0) {
3921        return ret;
3922    }
3923    ret = kvm_put_xcrs(x86_cpu);
3924    if (ret < 0) {
3925        return ret;
3926    }
3927    ret = kvm_put_sregs(x86_cpu);
3928    if (ret < 0) {
3929        return ret;
3930    }
3931    /* must be before kvm_put_msrs */
3932    ret = kvm_inject_mce_oldstyle(x86_cpu);
3933    if (ret < 0) {
3934        return ret;
3935    }
3936    ret = kvm_put_msrs(x86_cpu, level);
3937    if (ret < 0) {
3938        return ret;
3939    }
3940    ret = kvm_put_vcpu_events(x86_cpu, level);
3941    if (ret < 0) {
3942        return ret;
3943    }
3944    if (level >= KVM_PUT_RESET_STATE) {
3945        ret = kvm_put_mp_state(x86_cpu);
3946        if (ret < 0) {
3947            return ret;
3948        }
3949    }
3950
3951    ret = kvm_put_tscdeadline_msr(x86_cpu);
3952    if (ret < 0) {
3953        return ret;
3954    }
3955    ret = kvm_put_debugregs(x86_cpu);
3956    if (ret < 0) {
3957        return ret;
3958    }
3959    /* must be last */
3960    ret = kvm_guest_debug_workarounds(x86_cpu);
3961    if (ret < 0) {
3962        return ret;
3963    }
3964    return 0;
3965}
3966
3967int kvm_arch_get_registers(CPUState *cs)
3968{
3969    X86CPU *cpu = X86_CPU(cs);
3970    int ret;
3971
3972    assert(cpu_is_stopped(cs) || qemu_cpu_is_self(cs));
3973
3974    ret = kvm_get_vcpu_events(cpu);
3975    if (ret < 0) {
3976        goto out;
3977    }
3978    /*
3979     * KVM_GET_MPSTATE can modify CS and RIP, call it before
3980     * KVM_GET_REGS and KVM_GET_SREGS.
3981     */
3982    ret = kvm_get_mp_state(cpu);
3983    if (ret < 0) {
3984        goto out;
3985    }
3986    ret = kvm_getput_regs(cpu, 0);
3987    if (ret < 0) {
3988        goto out;
3989    }
3990    ret = kvm_get_xsave(cpu);
3991    if (ret < 0) {
3992        goto out;
3993    }
3994    ret = kvm_get_xcrs(cpu);
3995    if (ret < 0) {
3996        goto out;
3997    }
3998    ret = kvm_get_sregs(cpu);
3999    if (ret < 0) {
4000        goto out;
4001    }
4002    ret = kvm_get_msrs(cpu);
4003    if (ret < 0) {
4004        goto out;
4005    }
4006    ret = kvm_get_apic(cpu);
4007    if (ret < 0) {
4008        goto out;
4009    }
4010    ret = kvm_get_debugregs(cpu);
4011    if (ret < 0) {
4012        goto out;
4013    }
4014    ret = kvm_get_nested_state(cpu);
4015    if (ret < 0) {
4016        goto out;
4017    }
4018    ret = 0;
4019 out:
4020    cpu_sync_bndcs_hflags(&cpu->env);
4021    return ret;
4022}
4023
4024void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
4025{
4026    X86CPU *x86_cpu = X86_CPU(cpu);
4027    CPUX86State *env = &x86_cpu->env;
4028    int ret;
4029
4030    /* Inject NMI */
4031    if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
4032        if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
4033            qemu_mutex_lock_iothread();
4034            cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
4035            qemu_mutex_unlock_iothread();
4036            DPRINTF("injected NMI\n");
4037            ret = kvm_vcpu_ioctl(cpu, KVM_NMI);
4038            if (ret < 0) {
4039                fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n",
4040                        strerror(-ret));
4041            }
4042        }
4043        if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
4044            qemu_mutex_lock_iothread();
4045            cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
4046            qemu_mutex_unlock_iothread();
4047            DPRINTF("injected SMI\n");
4048            ret = kvm_vcpu_ioctl(cpu, KVM_SMI);
4049            if (ret < 0) {
4050                fprintf(stderr, "KVM: injection failed, SMI lost (%s)\n",
4051                        strerror(-ret));
4052            }
4053        }
4054    }
4055
4056    if (!kvm_pic_in_kernel()) {
4057        qemu_mutex_lock_iothread();
4058    }
4059
4060    /* Force the VCPU out of its inner loop to process any INIT requests
4061     * or (for userspace APIC, but it is cheap to combine the checks here)
4062     * pending TPR access reports.
4063     */
4064    if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
4065        if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
4066            !(env->hflags & HF_SMM_MASK)) {
4067            cpu->exit_request = 1;
4068        }
4069        if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
4070            cpu->exit_request = 1;
4071        }
4072    }
4073
4074    if (!kvm_pic_in_kernel()) {
4075        /* Try to inject an interrupt if the guest can accept it */
4076        if (run->ready_for_interrupt_injection &&
4077            (cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
4078            (env->eflags & IF_MASK)) {
4079            int irq;
4080
4081            cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
4082            irq = cpu_get_pic_interrupt(env);
4083            if (irq >= 0) {
4084                struct kvm_interrupt intr;
4085
4086                intr.irq = irq;
4087                DPRINTF("injected interrupt %d\n", irq);
4088                ret = kvm_vcpu_ioctl(cpu, KVM_INTERRUPT, &intr);
4089                if (ret < 0) {
4090                    fprintf(stderr,
4091                            "KVM: injection failed, interrupt lost (%s)\n",
4092                            strerror(-ret));
4093                }
4094            }
4095        }
4096
4097        /* If we have an interrupt but the guest is not ready to receive an
4098         * interrupt, request an interrupt window exit.  This will
4099         * cause a return to userspace as soon as the guest is ready to
4100         * receive interrupts. */
4101        if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
4102            run->request_interrupt_window = 1;
4103        } else {
4104            run->request_interrupt_window = 0;
4105        }
4106
4107        DPRINTF("setting tpr\n");
4108        run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state);
4109
4110        qemu_mutex_unlock_iothread();
4111    }
4112}
4113
4114MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
4115{
4116    X86CPU *x86_cpu = X86_CPU(cpu);
4117    CPUX86State *env = &x86_cpu->env;
4118
4119    if (run->flags & KVM_RUN_X86_SMM) {
4120        env->hflags |= HF_SMM_MASK;
4121    } else {
4122        env->hflags &= ~HF_SMM_MASK;
4123    }
4124    if (run->if_flag) {
4125        env->eflags |= IF_MASK;
4126    } else {
4127        env->eflags &= ~IF_MASK;
4128    }
4129
4130    /* We need to protect the apic state against concurrent accesses from
4131     * different threads in case the userspace irqchip is used. */
4132    if (!kvm_irqchip_in_kernel()) {
4133        qemu_mutex_lock_iothread();
4134    }
4135    cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8);
4136    cpu_set_apic_base(x86_cpu->apic_state, run->apic_base);
4137    if (!kvm_irqchip_in_kernel()) {
4138        qemu_mutex_unlock_iothread();
4139    }
4140    return cpu_get_mem_attrs(env);
4141}
4142
4143int kvm_arch_process_async_events(CPUState *cs)
4144{
4145    X86CPU *cpu = X86_CPU(cs);
4146    CPUX86State *env = &cpu->env;
4147
4148    if (cs->interrupt_request & CPU_INTERRUPT_MCE) {
4149        /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
4150        assert(env->mcg_cap);
4151
4152        cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
4153
4154        kvm_cpu_synchronize_state(cs);
4155
4156        if (env->exception_nr == EXCP08_DBLE) {
4157            /* this means triple fault */
4158            qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
4159            cs->exit_request = 1;
4160            return 0;
4161        }
4162        kvm_queue_exception(env, EXCP12_MCHK, 0, 0);
4163        env->has_error_code = 0;
4164
4165        cs->halted = 0;
4166        if (kvm_irqchip_in_kernel() && env->mp_state == KVM_MP_STATE_HALTED) {
4167            env->mp_state = KVM_MP_STATE_RUNNABLE;
4168        }
4169    }
4170
4171    if ((cs->interrupt_request & CPU_INTERRUPT_INIT) &&
4172        !(env->hflags & HF_SMM_MASK)) {
4173        kvm_cpu_synchronize_state(cs);
4174        do_cpu_init(cpu);
4175    }
4176
4177    if (kvm_irqchip_in_kernel()) {
4178        return 0;
4179    }
4180
4181    if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
4182        cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
4183        apic_poll_irq(cpu->apic_state);
4184    }
4185    if (((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
4186         (env->eflags & IF_MASK)) ||
4187        (cs->interrupt_request & CPU_INTERRUPT_NMI)) {
4188        cs->halted = 0;
4189    }
4190    if (cs->interrupt_request & CPU_INTERRUPT_SIPI) {
4191        kvm_cpu_synchronize_state(cs);
4192        do_cpu_sipi(cpu);
4193    }
4194    if (cs->interrupt_request & CPU_INTERRUPT_TPR) {
4195        cs->interrupt_request &= ~CPU_INTERRUPT_TPR;
4196        kvm_cpu_synchronize_state(cs);
4197        apic_handle_tpr_access_report(cpu->apic_state, env->eip,
4198                                      env->tpr_access_type);
4199    }
4200
4201    return cs->halted;
4202}
4203
4204static int kvm_handle_halt(X86CPU *cpu)
4205{
4206    CPUState *cs = CPU(cpu);
4207    CPUX86State *env = &cpu->env;
4208
4209    if (!((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
4210          (env->eflags & IF_MASK)) &&
4211        !(cs->interrupt_request & CPU_INTERRUPT_NMI)) {
4212        cs->halted = 1;
4213        return EXCP_HLT;
4214    }
4215
4216    return 0;
4217}
4218
4219static int kvm_handle_tpr_access(X86CPU *cpu)
4220{
4221    CPUState *cs = CPU(cpu);
4222    struct kvm_run *run = cs->kvm_run;
4223
4224    apic_handle_tpr_access_report(cpu->apic_state, run->tpr_access.rip,
4225                                  run->tpr_access.is_write ? TPR_ACCESS_WRITE
4226                                                           : TPR_ACCESS_READ);
4227    return 1;
4228}
4229
4230int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
4231{
4232    static const uint8_t int3 = 0xcc;
4233
4234    if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
4235        cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&int3, 1, 1)) {
4236        return -EINVAL;
4237    }
4238    return 0;
4239}
4240
4241int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
4242{
4243    uint8_t int3;
4244
4245    if (cpu_memory_rw_debug(cs, bp->pc, &int3, 1, 0) || int3 != 0xcc ||
4246        cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) {
4247        return -EINVAL;
4248    }
4249    return 0;
4250}
4251
4252static struct {
4253    target_ulong addr;
4254    int len;
4255    int type;
4256} hw_breakpoint[4];
4257
4258static int nb_hw_breakpoint;
4259
4260static int find_hw_breakpoint(target_ulong addr, int len, int type)
4261{
4262    int n;
4263
4264    for (n = 0; n < nb_hw_breakpoint; n++) {
4265        if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
4266            (hw_breakpoint[n].len == len || len == -1)) {
4267            return n;
4268        }
4269    }
4270    return -1;
4271}
4272
4273int kvm_arch_insert_hw_breakpoint(target_ulong addr,
4274                                  target_ulong len, int type)
4275{
4276    switch (type) {
4277    case GDB_BREAKPOINT_HW:
4278        len = 1;
4279        break;
4280    case GDB_WATCHPOINT_WRITE:
4281    case GDB_WATCHPOINT_ACCESS:
4282        switch (len) {
4283        case 1:
4284            break;
4285        case 2:
4286        case 4:
4287        case 8:
4288            if (addr & (len - 1)) {
4289                return -EINVAL;
4290            }
4291            break;
4292        default:
4293            return -EINVAL;
4294        }
4295        break;
4296    default:
4297        return -ENOSYS;
4298    }
4299
4300    if (nb_hw_breakpoint == 4) {
4301        return -ENOBUFS;
4302    }
4303    if (find_hw_breakpoint(addr, len, type) >= 0) {
4304        return -EEXIST;
4305    }
4306    hw_breakpoint[nb_hw_breakpoint].addr = addr;
4307    hw_breakpoint[nb_hw_breakpoint].len = len;
4308    hw_breakpoint[nb_hw_breakpoint].type = type;
4309    nb_hw_breakpoint++;
4310
4311    return 0;
4312}
4313
4314int kvm_arch_remove_hw_breakpoint(target_ulong addr,
4315                                  target_ulong len, int type)
4316{
4317    int n;
4318
4319    n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
4320    if (n < 0) {
4321        return -ENOENT;
4322    }
4323    nb_hw_breakpoint--;
4324    hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint];
4325
4326    return 0;
4327}
4328
4329void kvm_arch_remove_all_hw_breakpoints(void)
4330{
4331    nb_hw_breakpoint = 0;
4332}
4333
4334static CPUWatchpoint hw_watchpoint;
4335
4336static int kvm_handle_debug(X86CPU *cpu,
4337                            struct kvm_debug_exit_arch *arch_info)
4338{
4339    CPUState *cs = CPU(cpu);
4340    CPUX86State *env = &cpu->env;
4341    int ret = 0;
4342    int n;
4343
4344    if (arch_info->exception == EXCP01_DB) {
4345        if (arch_info->dr6 & DR6_BS) {
4346            if (cs->singlestep_enabled) {
4347                ret = EXCP_DEBUG;
4348            }
4349        } else {
4350            for (n = 0; n < 4; n++) {
4351                if (arch_info->dr6 & (1 << n)) {
4352                    switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) {
4353                    case 0x0:
4354                        ret = EXCP_DEBUG;
4355                        break;
4356                    case 0x1:
4357                        ret = EXCP_DEBUG;
4358                        cs->watchpoint_hit = &hw_watchpoint;
4359                        hw_watchpoint.vaddr = hw_breakpoint[n].addr;
4360                        hw_watchpoint.flags = BP_MEM_WRITE;
4361                        break;
4362                    case 0x3:
4363                        ret = EXCP_DEBUG;
4364                        cs->watchpoint_hit = &hw_watchpoint;
4365                        hw_watchpoint.vaddr = hw_breakpoint[n].addr;
4366                        hw_watchpoint.flags = BP_MEM_ACCESS;
4367                        break;
4368                    }
4369                }
4370            }
4371        }
4372    } else if (kvm_find_sw_breakpoint(cs, arch_info->pc)) {
4373        ret = EXCP_DEBUG;
4374    }
4375    if (ret == 0) {
4376        cpu_synchronize_state(cs);
4377        assert(env->exception_nr == -1);
4378
4379        /* pass to guest */
4380        kvm_queue_exception(env, arch_info->exception,
4381                            arch_info->exception == EXCP01_DB,
4382                            arch_info->dr6);
4383        env->has_error_code = 0;
4384    }
4385
4386    return ret;
4387}
4388
4389void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
4390{
4391    const uint8_t type_code[] = {
4392        [GDB_BREAKPOINT_HW] = 0x0,
4393        [GDB_WATCHPOINT_WRITE] = 0x1,
4394        [GDB_WATCHPOINT_ACCESS] = 0x3
4395    };
4396    const uint8_t len_code[] = {
4397        [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
4398    };
4399    int n;
4400
4401    if (kvm_sw_breakpoints_active(cpu)) {
4402        dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
4403    }
4404    if (nb_hw_breakpoint > 0) {
4405        dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
4406        dbg->arch.debugreg[7] = 0x0600;
4407        for (n = 0; n < nb_hw_breakpoint; n++) {
4408            dbg->arch.debugreg[n] = hw_breakpoint[n].addr;
4409            dbg->arch.debugreg[7] |= (2 << (n * 2)) |
4410                (type_code[hw_breakpoint[n].type] << (16 + n*4)) |
4411                ((uint32_t)len_code[hw_breakpoint[n].len] << (18 + n*4));
4412        }
4413    }
4414}
4415
4416static bool host_supports_vmx(void)
4417{
4418    uint32_t ecx, unused;
4419
4420    host_cpuid(1, 0, &unused, &unused, &ecx, &unused);
4421    return ecx & CPUID_EXT_VMX;
4422}
4423
4424#define VMX_INVALID_GUEST_STATE 0x80000021
4425
4426int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
4427{
4428    X86CPU *cpu = X86_CPU(cs);
4429    uint64_t code;
4430    int ret;
4431
4432    switch (run->exit_reason) {
4433    case KVM_EXIT_HLT:
4434        DPRINTF("handle_hlt\n");
4435        qemu_mutex_lock_iothread();
4436        ret = kvm_handle_halt(cpu);
4437        qemu_mutex_unlock_iothread();
4438        break;
4439    case KVM_EXIT_SET_TPR:
4440        ret = 0;
4441        break;
4442    case KVM_EXIT_TPR_ACCESS:
4443        qemu_mutex_lock_iothread();
4444        ret = kvm_handle_tpr_access(cpu);
4445        qemu_mutex_unlock_iothread();
4446        break;
4447    case KVM_EXIT_FAIL_ENTRY:
4448        code = run->fail_entry.hardware_entry_failure_reason;
4449        fprintf(stderr, "KVM: entry failed, hardware error 0x%" PRIx64 "\n",
4450                code);
4451        if (host_supports_vmx() && code == VMX_INVALID_GUEST_STATE) {
4452            fprintf(stderr,
4453                    "\nIf you're running a guest on an Intel machine without "
4454                        "unrestricted mode\n"
4455                    "support, the failure can be most likely due to the guest "
4456                        "entering an invalid\n"
4457                    "state for Intel VT. For example, the guest maybe running "
4458                        "in big real mode\n"
4459                    "which is not supported on less recent Intel processors."
4460                        "\n\n");
4461        }
4462        ret = -1;
4463        break;
4464    case KVM_EXIT_EXCEPTION:
4465        fprintf(stderr, "KVM: exception %d exit (error code 0x%x)\n",
4466                run->ex.exception, run->ex.error_code);
4467        ret = -1;
4468        break;
4469    case KVM_EXIT_DEBUG:
4470        DPRINTF("kvm_exit_debug\n");
4471        qemu_mutex_lock_iothread();
4472        ret = kvm_handle_debug(cpu, &run->debug.arch);
4473        qemu_mutex_unlock_iothread();
4474        break;
4475    case KVM_EXIT_HYPERV:
4476        ret = kvm_hv_handle_exit(cpu, &run->hyperv);
4477        break;
4478    case KVM_EXIT_IOAPIC_EOI:
4479        ioapic_eoi_broadcast(run->eoi.vector);
4480        ret = 0;
4481        break;
4482    default:
4483        fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
4484        ret = -1;
4485        break;
4486    }
4487
4488    return ret;
4489}
4490
4491bool kvm_arch_stop_on_emulation_error(CPUState *cs)
4492{
4493    X86CPU *cpu = X86_CPU(cs);
4494    CPUX86State *env = &cpu->env;
4495
4496    kvm_cpu_synchronize_state(cs);
4497    return !(env->cr[0] & CR0_PE_MASK) ||
4498           ((env->segs[R_CS].selector  & 3) != 3);
4499}
4500
4501void kvm_arch_init_irq_routing(KVMState *s)
4502{
4503    if (!kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) {
4504        /* If kernel can't do irq routing, interrupt source
4505         * override 0->2 cannot be set up as required by HPET.
4506         * So we have to disable it.
4507         */
4508        no_hpet = 1;
4509    }
4510    /* We know at this point that we're using the in-kernel
4511     * irqchip, so we can use irqfds, and on x86 we know
4512     * we can use msi via irqfd and GSI routing.
4513     */
4514    kvm_msi_via_irqfd_allowed = true;
4515    kvm_gsi_routing_allowed = true;
4516
4517    if (kvm_irqchip_is_split()) {
4518        int i;
4519
4520        /* If the ioapic is in QEMU and the lapics are in KVM, reserve
4521           MSI routes for signaling interrupts to the local apics. */
4522        for (i = 0; i < IOAPIC_NUM_PINS; i++) {
4523            if (kvm_irqchip_add_msi_route(s, 0, NULL) < 0) {
4524                error_report("Could not enable split IRQ mode.");
4525                exit(1);
4526            }
4527        }
4528    }
4529}
4530
4531int kvm_arch_irqchip_create(KVMState *s)
4532{
4533    int ret;
4534    if (kvm_kernel_irqchip_split()) {
4535        ret = kvm_vm_enable_cap(s, KVM_CAP_SPLIT_IRQCHIP, 0, 24);
4536        if (ret) {
4537            error_report("Could not enable split irqchip mode: %s",
4538                         strerror(-ret));
4539            exit(1);
4540        } else {
4541            DPRINTF("Enabled KVM_CAP_SPLIT_IRQCHIP\n");
4542            kvm_split_irqchip = true;
4543            return 1;
4544        }
4545    } else {
4546        return 0;
4547    }
4548}
4549
4550int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
4551                             uint64_t address, uint32_t data, PCIDevice *dev)
4552{
4553    X86IOMMUState *iommu = x86_iommu_get_default();
4554
4555    if (iommu) {
4556        int ret;
4557        MSIMessage src, dst;
4558        X86IOMMUClass *class = X86_IOMMU_GET_CLASS(iommu);
4559
4560        if (!class->int_remap) {
4561            return 0;
4562        }
4563
4564        src.address = route->u.msi.address_hi;
4565        src.address <<= VTD_MSI_ADDR_HI_SHIFT;
4566        src.address |= route->u.msi.address_lo;
4567        src.data = route->u.msi.data;
4568
4569        ret = class->int_remap(iommu, &src, &dst, dev ? \
4570                               pci_requester_id(dev) : \
4571                               X86_IOMMU_SID_INVALID);
4572        if (ret) {
4573            trace_kvm_x86_fixup_msi_error(route->gsi);
4574            return 1;
4575        }
4576
4577        route->u.msi.address_hi = dst.address >> VTD_MSI_ADDR_HI_SHIFT;
4578        route->u.msi.address_lo = dst.address & VTD_MSI_ADDR_LO_MASK;
4579        route->u.msi.data = dst.data;
4580    }
4581
4582    return 0;
4583}
4584
4585typedef struct MSIRouteEntry MSIRouteEntry;
4586
4587struct MSIRouteEntry {
4588    PCIDevice *dev;             /* Device pointer */
4589    int vector;                 /* MSI/MSIX vector index */
4590    int virq;                   /* Virtual IRQ index */
4591    QLIST_ENTRY(MSIRouteEntry) list;
4592};
4593
4594/* List of used GSI routes */
4595static QLIST_HEAD(, MSIRouteEntry) msi_route_list = \
4596    QLIST_HEAD_INITIALIZER(msi_route_list);
4597
4598static void kvm_update_msi_routes_all(void *private, bool global,
4599                                      uint32_t index, uint32_t mask)
4600{
4601    int cnt = 0, vector;
4602    MSIRouteEntry *entry;
4603    MSIMessage msg;
4604    PCIDevice *dev;
4605
4606    /* TODO: explicit route update */
4607    QLIST_FOREACH(entry, &msi_route_list, list) {
4608        cnt++;
4609        vector = entry->vector;
4610        dev = entry->dev;
4611        if (msix_enabled(dev) && !msix_is_masked(dev, vector)) {
4612            msg = msix_get_message(dev, vector);
4613        } else if (msi_enabled(dev) && !msi_is_masked(dev, vector)) {
4614            msg = msi_get_message(dev, vector);
4615        } else {
4616            /*
4617             * Either MSI/MSIX is disabled for the device, or the
4618             * specific message was masked out.  Skip this one.
4619             */
4620            continue;
4621        }
4622        kvm_irqchip_update_msi_route(kvm_state, entry->virq, msg, dev);
4623    }
4624    kvm_irqchip_commit_routes(kvm_state);
4625    trace_kvm_x86_update_msi_routes(cnt);
4626}
4627
4628int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
4629                                int vector, PCIDevice *dev)
4630{
4631    static bool notify_list_inited = false;
4632    MSIRouteEntry *entry;
4633
4634    if (!dev) {
4635        /* These are (possibly) IOAPIC routes only used for split
4636         * kernel irqchip mode, while what we are housekeeping are
4637         * PCI devices only. */
4638        return 0;
4639    }
4640
4641    entry = g_new0(MSIRouteEntry, 1);
4642    entry->dev = dev;
4643    entry->vector = vector;
4644    entry->virq = route->gsi;
4645    QLIST_INSERT_HEAD(&msi_route_list, entry, list);
4646
4647    trace_kvm_x86_add_msi_route(route->gsi);
4648
4649    if (!notify_list_inited) {
4650        /* For the first time we do add route, add ourselves into
4651         * IOMMU's IEC notify list if needed. */
4652        X86IOMMUState *iommu = x86_iommu_get_default();
4653        if (iommu) {
4654            x86_iommu_iec_register_notifier(iommu,
4655                                            kvm_update_msi_routes_all,
4656                                            NULL);
4657        }
4658        notify_list_inited = true;
4659    }
4660    return 0;
4661}
4662
4663int kvm_arch_release_virq_post(int virq)
4664{
4665    MSIRouteEntry *entry, *next;
4666    QLIST_FOREACH_SAFE(entry, &msi_route_list, list, next) {
4667        if (entry->virq == virq) {
4668            trace_kvm_x86_remove_msi_route(virq);
4669            QLIST_REMOVE(entry, list);
4670            g_free(entry);
4671            break;
4672        }
4673    }
4674    return 0;
4675}
4676
4677int kvm_arch_msi_data_to_gsi(uint32_t data)
4678{
4679    abort();
4680}
4681