qemu/target/i386/machine.c
<<
>>
Prefs
   1#include "qemu/osdep.h"
   2#include "cpu.h"
   3#include "exec/exec-all.h"
   4#include "hw/isa/isa.h"
   5#include "migration/cpu.h"
   6#include "hyperv.h"
   7#include "hw/i386/x86.h"
   8#include "kvm_i386.h"
   9
  10#include "sysemu/kvm.h"
  11#include "sysemu/tcg.h"
  12
  13#include "qemu/error-report.h"
  14
  15static const VMStateDescription vmstate_segment = {
  16    .name = "segment",
  17    .version_id = 1,
  18    .minimum_version_id = 1,
  19    .fields = (VMStateField[]) {
  20        VMSTATE_UINT32(selector, SegmentCache),
  21        VMSTATE_UINTTL(base, SegmentCache),
  22        VMSTATE_UINT32(limit, SegmentCache),
  23        VMSTATE_UINT32(flags, SegmentCache),
  24        VMSTATE_END_OF_LIST()
  25    }
  26};
  27
  28#define VMSTATE_SEGMENT(_field, _state) {                            \
  29    .name       = (stringify(_field)),                               \
  30    .size       = sizeof(SegmentCache),                              \
  31    .vmsd       = &vmstate_segment,                                  \
  32    .flags      = VMS_STRUCT,                                        \
  33    .offset     = offsetof(_state, _field)                           \
  34            + type_check(SegmentCache,typeof_field(_state, _field))  \
  35}
  36
  37#define VMSTATE_SEGMENT_ARRAY(_field, _state, _n)                    \
  38    VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_segment, SegmentCache)
  39
  40static const VMStateDescription vmstate_xmm_reg = {
  41    .name = "xmm_reg",
  42    .version_id = 1,
  43    .minimum_version_id = 1,
  44    .fields = (VMStateField[]) {
  45        VMSTATE_UINT64(ZMM_Q(0), ZMMReg),
  46        VMSTATE_UINT64(ZMM_Q(1), ZMMReg),
  47        VMSTATE_END_OF_LIST()
  48    }
  49};
  50
  51#define VMSTATE_XMM_REGS(_field, _state, _start)                         \
  52    VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0,     \
  53                             vmstate_xmm_reg, ZMMReg)
  54
  55/* YMMH format is the same as XMM, but for bits 128-255 */
  56static const VMStateDescription vmstate_ymmh_reg = {
  57    .name = "ymmh_reg",
  58    .version_id = 1,
  59    .minimum_version_id = 1,
  60    .fields = (VMStateField[]) {
  61        VMSTATE_UINT64(ZMM_Q(2), ZMMReg),
  62        VMSTATE_UINT64(ZMM_Q(3), ZMMReg),
  63        VMSTATE_END_OF_LIST()
  64    }
  65};
  66
  67#define VMSTATE_YMMH_REGS_VARS(_field, _state, _start, _v)               \
  68    VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, _v,    \
  69                             vmstate_ymmh_reg, ZMMReg)
  70
  71static const VMStateDescription vmstate_zmmh_reg = {
  72    .name = "zmmh_reg",
  73    .version_id = 1,
  74    .minimum_version_id = 1,
  75    .fields = (VMStateField[]) {
  76        VMSTATE_UINT64(ZMM_Q(4), ZMMReg),
  77        VMSTATE_UINT64(ZMM_Q(5), ZMMReg),
  78        VMSTATE_UINT64(ZMM_Q(6), ZMMReg),
  79        VMSTATE_UINT64(ZMM_Q(7), ZMMReg),
  80        VMSTATE_END_OF_LIST()
  81    }
  82};
  83
  84#define VMSTATE_ZMMH_REGS_VARS(_field, _state, _start)                   \
  85    VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0,     \
  86                             vmstate_zmmh_reg, ZMMReg)
  87
  88#ifdef TARGET_X86_64
  89static const VMStateDescription vmstate_hi16_zmm_reg = {
  90    .name = "hi16_zmm_reg",
  91    .version_id = 1,
  92    .minimum_version_id = 1,
  93    .fields = (VMStateField[]) {
  94        VMSTATE_UINT64(ZMM_Q(0), ZMMReg),
  95        VMSTATE_UINT64(ZMM_Q(1), ZMMReg),
  96        VMSTATE_UINT64(ZMM_Q(2), ZMMReg),
  97        VMSTATE_UINT64(ZMM_Q(3), ZMMReg),
  98        VMSTATE_UINT64(ZMM_Q(4), ZMMReg),
  99        VMSTATE_UINT64(ZMM_Q(5), ZMMReg),
 100        VMSTATE_UINT64(ZMM_Q(6), ZMMReg),
 101        VMSTATE_UINT64(ZMM_Q(7), ZMMReg),
 102        VMSTATE_END_OF_LIST()
 103    }
 104};
 105
 106#define VMSTATE_Hi16_ZMM_REGS_VARS(_field, _state, _start)               \
 107    VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0,     \
 108                             vmstate_hi16_zmm_reg, ZMMReg)
 109#endif
 110
 111static const VMStateDescription vmstate_bnd_regs = {
 112    .name = "bnd_regs",
 113    .version_id = 1,
 114    .minimum_version_id = 1,
 115    .fields = (VMStateField[]) {
 116        VMSTATE_UINT64(lb, BNDReg),
 117        VMSTATE_UINT64(ub, BNDReg),
 118        VMSTATE_END_OF_LIST()
 119    }
 120};
 121
 122#define VMSTATE_BND_REGS(_field, _state, _n)          \
 123    VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_bnd_regs, BNDReg)
 124
 125static const VMStateDescription vmstate_mtrr_var = {
 126    .name = "mtrr_var",
 127    .version_id = 1,
 128    .minimum_version_id = 1,
 129    .fields = (VMStateField[]) {
 130        VMSTATE_UINT64(base, MTRRVar),
 131        VMSTATE_UINT64(mask, MTRRVar),
 132        VMSTATE_END_OF_LIST()
 133    }
 134};
 135
 136#define VMSTATE_MTRR_VARS(_field, _state, _n, _v)                    \
 137    VMSTATE_STRUCT_ARRAY(_field, _state, _n, _v, vmstate_mtrr_var, MTRRVar)
 138
 139typedef struct x86_FPReg_tmp {
 140    FPReg *parent;
 141    uint64_t tmp_mant;
 142    uint16_t tmp_exp;
 143} x86_FPReg_tmp;
 144
 145static void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f)
 146{
 147    CPU_LDoubleU temp;
 148
 149    temp.d = f;
 150    *pmant = temp.l.lower;
 151    *pexp = temp.l.upper;
 152}
 153
 154static floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper)
 155{
 156    CPU_LDoubleU temp;
 157
 158    temp.l.upper = upper;
 159    temp.l.lower = mant;
 160    return temp.d;
 161}
 162
 163static int fpreg_pre_save(void *opaque)
 164{
 165    x86_FPReg_tmp *tmp = opaque;
 166
 167    /* we save the real CPU data (in case of MMX usage only 'mant'
 168       contains the MMX register */
 169    cpu_get_fp80(&tmp->tmp_mant, &tmp->tmp_exp, tmp->parent->d);
 170
 171    return 0;
 172}
 173
 174static int fpreg_post_load(void *opaque, int version)
 175{
 176    x86_FPReg_tmp *tmp = opaque;
 177
 178    tmp->parent->d = cpu_set_fp80(tmp->tmp_mant, tmp->tmp_exp);
 179    return 0;
 180}
 181
 182static const VMStateDescription vmstate_fpreg_tmp = {
 183    .name = "fpreg_tmp",
 184    .post_load = fpreg_post_load,
 185    .pre_save  = fpreg_pre_save,
 186    .fields = (VMStateField[]) {
 187        VMSTATE_UINT64(tmp_mant, x86_FPReg_tmp),
 188        VMSTATE_UINT16(tmp_exp, x86_FPReg_tmp),
 189        VMSTATE_END_OF_LIST()
 190    }
 191};
 192
 193static const VMStateDescription vmstate_fpreg = {
 194    .name = "fpreg",
 195    .fields = (VMStateField[]) {
 196        VMSTATE_WITH_TMP(FPReg, x86_FPReg_tmp, vmstate_fpreg_tmp),
 197        VMSTATE_END_OF_LIST()
 198    }
 199};
 200
 201static int cpu_pre_save(void *opaque)
 202{
 203    X86CPU *cpu = opaque;
 204    CPUX86State *env = &cpu->env;
 205    int i;
 206
 207    /* FPU */
 208    env->fpus_vmstate = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
 209    env->fptag_vmstate = 0;
 210    for(i = 0; i < 8; i++) {
 211        env->fptag_vmstate |= ((!env->fptags[i]) << i);
 212    }
 213
 214    env->fpregs_format_vmstate = 0;
 215
 216    /*
 217     * Real mode guest segments register DPL should be zero.
 218     * Older KVM version were setting it wrongly.
 219     * Fixing it will allow live migration to host with unrestricted guest
 220     * support (otherwise the migration will fail with invalid guest state
 221     * error).
 222     */
 223    if (!(env->cr[0] & CR0_PE_MASK) &&
 224        (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) {
 225        env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK);
 226        env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK);
 227        env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK);
 228        env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK);
 229        env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK);
 230        env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK);
 231    }
 232
 233#ifdef CONFIG_KVM
 234    /*
 235     * In case vCPU may have enabled VMX, we need to make sure kernel have
 236     * required capabilities in order to perform migration correctly:
 237     *
 238     * 1) We must be able to extract vCPU nested-state from KVM.
 239     *
 240     * 2) In case vCPU is running in guest-mode and it has a pending exception,
 241     * we must be able to determine if it's in a pending or injected state.
 242     * Note that in case KVM don't have required capability to do so,
 243     * a pending/injected exception will always appear as an
 244     * injected exception.
 245     */
 246    if (kvm_enabled() && cpu_vmx_maybe_enabled(env) &&
 247        (!env->nested_state ||
 248         (!kvm_has_exception_payload() && (env->hflags & HF_GUEST_MASK) &&
 249          env->exception_injected))) {
 250        error_report("Guest maybe enabled nested virtualization but kernel "
 251                "does not support required capabilities to save vCPU "
 252                "nested state");
 253        return -EINVAL;
 254    }
 255#endif
 256
 257    /*
 258     * When vCPU is running L2 and exception is still pending,
 259     * it can potentially be intercepted by L1 hypervisor.
 260     * In contrast to an injected exception which cannot be
 261     * intercepted anymore.
 262     *
 263     * Furthermore, when a L2 exception is intercepted by L1
 264     * hypervisor, its exception payload (CR2/DR6 on #PF/#DB)
 265     * should not be set yet in the respective vCPU register.
 266     * Thus, in case an exception is pending, it is
 267     * important to save the exception payload seperately.
 268     *
 269     * Therefore, if an exception is not in a pending state
 270     * or vCPU is not in guest-mode, it is not important to
 271     * distinguish between a pending and injected exception
 272     * and we don't need to store seperately the exception payload.
 273     *
 274     * In order to preserve better backwards-compatible migration,
 275     * convert a pending exception to an injected exception in
 276     * case it is not important to distinguish between them
 277     * as described above.
 278     */
 279    if (env->exception_pending && !(env->hflags & HF_GUEST_MASK)) {
 280        env->exception_pending = 0;
 281        env->exception_injected = 1;
 282
 283        if (env->exception_has_payload) {
 284            if (env->exception_nr == EXCP01_DB) {
 285                env->dr[6] = env->exception_payload;
 286            } else if (env->exception_nr == EXCP0E_PAGE) {
 287                env->cr[2] = env->exception_payload;
 288            }
 289        }
 290    }
 291
 292    return 0;
 293}
 294
 295static int cpu_post_load(void *opaque, int version_id)
 296{
 297    X86CPU *cpu = opaque;
 298    CPUState *cs = CPU(cpu);
 299    CPUX86State *env = &cpu->env;
 300    int i;
 301
 302    if (env->tsc_khz && env->user_tsc_khz &&
 303        env->tsc_khz != env->user_tsc_khz) {
 304        error_report("Mismatch between user-specified TSC frequency and "
 305                     "migrated TSC frequency");
 306        return -EINVAL;
 307    }
 308
 309    if (env->fpregs_format_vmstate) {
 310        error_report("Unsupported old non-softfloat CPU state");
 311        return -EINVAL;
 312    }
 313    /*
 314     * Real mode guest segments register DPL should be zero.
 315     * Older KVM version were setting it wrongly.
 316     * Fixing it will allow live migration from such host that don't have
 317     * restricted guest support to a host with unrestricted guest support
 318     * (otherwise the migration will fail with invalid guest state
 319     * error).
 320     */
 321    if (!(env->cr[0] & CR0_PE_MASK) &&
 322        (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) {
 323        env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK);
 324        env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK);
 325        env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK);
 326        env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK);
 327        env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK);
 328        env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK);
 329    }
 330
 331    /* Older versions of QEMU incorrectly used CS.DPL as the CPL when
 332     * running under KVM.  This is wrong for conforming code segments.
 333     * Luckily, in our implementation the CPL field of hflags is redundant
 334     * and we can get the right value from the SS descriptor privilege level.
 335     */
 336    env->hflags &= ~HF_CPL_MASK;
 337    env->hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
 338
 339#ifdef CONFIG_KVM
 340    if ((env->hflags & HF_GUEST_MASK) &&
 341        (!env->nested_state ||
 342        !(env->nested_state->flags & KVM_STATE_NESTED_GUEST_MODE))) {
 343        error_report("vCPU set in guest-mode inconsistent with "
 344                     "migrated kernel nested state");
 345        return -EINVAL;
 346    }
 347#endif
 348
 349    /*
 350     * There are cases that we can get valid exception_nr with both
 351     * exception_pending and exception_injected being cleared.
 352     * This can happen in one of the following scenarios:
 353     * 1) Source is older QEMU without KVM_CAP_EXCEPTION_PAYLOAD support.
 354     * 2) Source is running on kernel without KVM_CAP_EXCEPTION_PAYLOAD support.
 355     * 3) "cpu/exception_info" subsection not sent because there is no exception
 356     *    pending or guest wasn't running L2 (See comment in cpu_pre_save()).
 357     *
 358     * In those cases, we can just deduce that a valid exception_nr means
 359     * we can treat the exception as already injected.
 360     */
 361    if ((env->exception_nr != -1) &&
 362        !env->exception_pending && !env->exception_injected) {
 363        env->exception_injected = 1;
 364    }
 365
 366    env->fpstt = (env->fpus_vmstate >> 11) & 7;
 367    env->fpus = env->fpus_vmstate & ~0x3800;
 368    env->fptag_vmstate ^= 0xff;
 369    for(i = 0; i < 8; i++) {
 370        env->fptags[i] = (env->fptag_vmstate >> i) & 1;
 371    }
 372    if (tcg_enabled()) {
 373        target_ulong dr7;
 374        update_fp_status(env);
 375        update_mxcsr_status(env);
 376
 377        cpu_breakpoint_remove_all(cs, BP_CPU);
 378        cpu_watchpoint_remove_all(cs, BP_CPU);
 379
 380        /* Indicate all breakpoints disabled, as they are, then
 381           let the helper re-enable them.  */
 382        dr7 = env->dr[7];
 383        env->dr[7] = dr7 & ~(DR7_GLOBAL_BP_MASK | DR7_LOCAL_BP_MASK);
 384        cpu_x86_update_dr7(env, dr7);
 385    }
 386    tlb_flush(cs);
 387    return 0;
 388}
 389
 390static bool async_pf_msr_needed(void *opaque)
 391{
 392    X86CPU *cpu = opaque;
 393
 394    return cpu->env.async_pf_en_msr != 0;
 395}
 396
 397static bool pv_eoi_msr_needed(void *opaque)
 398{
 399    X86CPU *cpu = opaque;
 400
 401    return cpu->env.pv_eoi_en_msr != 0;
 402}
 403
 404static bool steal_time_msr_needed(void *opaque)
 405{
 406    X86CPU *cpu = opaque;
 407
 408    return cpu->env.steal_time_msr != 0;
 409}
 410
 411static bool exception_info_needed(void *opaque)
 412{
 413    X86CPU *cpu = opaque;
 414    CPUX86State *env = &cpu->env;
 415
 416    /*
 417     * It is important to save exception-info only in case
 418     * we need to distinguish between a pending and injected
 419     * exception. Which is only required in case there is a
 420     * pending exception and vCPU is running L2.
 421     * For more info, refer to comment in cpu_pre_save().
 422     */
 423    return env->exception_pending && (env->hflags & HF_GUEST_MASK);
 424}
 425
 426static const VMStateDescription vmstate_exception_info = {
 427    .name = "cpu/exception_info",
 428    .version_id = 1,
 429    .minimum_version_id = 1,
 430    .needed = exception_info_needed,
 431    .fields = (VMStateField[]) {
 432        VMSTATE_UINT8(env.exception_pending, X86CPU),
 433        VMSTATE_UINT8(env.exception_injected, X86CPU),
 434        VMSTATE_UINT8(env.exception_has_payload, X86CPU),
 435        VMSTATE_UINT64(env.exception_payload, X86CPU),
 436        VMSTATE_END_OF_LIST()
 437    }
 438};
 439
 440/* Poll control MSR enabled by default */
 441static bool poll_control_msr_needed(void *opaque)
 442{
 443    X86CPU *cpu = opaque;
 444
 445    return cpu->env.poll_control_msr != 1;
 446}
 447
 448static const VMStateDescription vmstate_steal_time_msr = {
 449    .name = "cpu/steal_time_msr",
 450    .version_id = 1,
 451    .minimum_version_id = 1,
 452    .needed = steal_time_msr_needed,
 453    .fields = (VMStateField[]) {
 454        VMSTATE_UINT64(env.steal_time_msr, X86CPU),
 455        VMSTATE_END_OF_LIST()
 456    }
 457};
 458
 459static const VMStateDescription vmstate_async_pf_msr = {
 460    .name = "cpu/async_pf_msr",
 461    .version_id = 1,
 462    .minimum_version_id = 1,
 463    .needed = async_pf_msr_needed,
 464    .fields = (VMStateField[]) {
 465        VMSTATE_UINT64(env.async_pf_en_msr, X86CPU),
 466        VMSTATE_END_OF_LIST()
 467    }
 468};
 469
 470static const VMStateDescription vmstate_pv_eoi_msr = {
 471    .name = "cpu/async_pv_eoi_msr",
 472    .version_id = 1,
 473    .minimum_version_id = 1,
 474    .needed = pv_eoi_msr_needed,
 475    .fields = (VMStateField[]) {
 476        VMSTATE_UINT64(env.pv_eoi_en_msr, X86CPU),
 477        VMSTATE_END_OF_LIST()
 478    }
 479};
 480
 481static const VMStateDescription vmstate_poll_control_msr = {
 482    .name = "cpu/poll_control_msr",
 483    .version_id = 1,
 484    .minimum_version_id = 1,
 485    .needed = poll_control_msr_needed,
 486    .fields = (VMStateField[]) {
 487        VMSTATE_UINT64(env.poll_control_msr, X86CPU),
 488        VMSTATE_END_OF_LIST()
 489    }
 490};
 491
 492static bool fpop_ip_dp_needed(void *opaque)
 493{
 494    X86CPU *cpu = opaque;
 495    CPUX86State *env = &cpu->env;
 496
 497    return env->fpop != 0 || env->fpip != 0 || env->fpdp != 0;
 498}
 499
 500static const VMStateDescription vmstate_fpop_ip_dp = {
 501    .name = "cpu/fpop_ip_dp",
 502    .version_id = 1,
 503    .minimum_version_id = 1,
 504    .needed = fpop_ip_dp_needed,
 505    .fields = (VMStateField[]) {
 506        VMSTATE_UINT16(env.fpop, X86CPU),
 507        VMSTATE_UINT64(env.fpip, X86CPU),
 508        VMSTATE_UINT64(env.fpdp, X86CPU),
 509        VMSTATE_END_OF_LIST()
 510    }
 511};
 512
 513static bool tsc_adjust_needed(void *opaque)
 514{
 515    X86CPU *cpu = opaque;
 516    CPUX86State *env = &cpu->env;
 517
 518    return env->tsc_adjust != 0;
 519}
 520
 521static const VMStateDescription vmstate_msr_tsc_adjust = {
 522    .name = "cpu/msr_tsc_adjust",
 523    .version_id = 1,
 524    .minimum_version_id = 1,
 525    .needed = tsc_adjust_needed,
 526    .fields = (VMStateField[]) {
 527        VMSTATE_UINT64(env.tsc_adjust, X86CPU),
 528        VMSTATE_END_OF_LIST()
 529    }
 530};
 531
 532static bool msr_smi_count_needed(void *opaque)
 533{
 534    X86CPU *cpu = opaque;
 535    CPUX86State *env = &cpu->env;
 536
 537    return cpu->migrate_smi_count && env->msr_smi_count != 0;
 538}
 539
 540static const VMStateDescription vmstate_msr_smi_count = {
 541    .name = "cpu/msr_smi_count",
 542    .version_id = 1,
 543    .minimum_version_id = 1,
 544    .needed = msr_smi_count_needed,
 545    .fields = (VMStateField[]) {
 546        VMSTATE_UINT64(env.msr_smi_count, X86CPU),
 547        VMSTATE_END_OF_LIST()
 548    }
 549};
 550
 551static bool tscdeadline_needed(void *opaque)
 552{
 553    X86CPU *cpu = opaque;
 554    CPUX86State *env = &cpu->env;
 555
 556    return env->tsc_deadline != 0;
 557}
 558
 559static const VMStateDescription vmstate_msr_tscdeadline = {
 560    .name = "cpu/msr_tscdeadline",
 561    .version_id = 1,
 562    .minimum_version_id = 1,
 563    .needed = tscdeadline_needed,
 564    .fields = (VMStateField[]) {
 565        VMSTATE_UINT64(env.tsc_deadline, X86CPU),
 566        VMSTATE_END_OF_LIST()
 567    }
 568};
 569
 570static bool misc_enable_needed(void *opaque)
 571{
 572    X86CPU *cpu = opaque;
 573    CPUX86State *env = &cpu->env;
 574
 575    return env->msr_ia32_misc_enable != MSR_IA32_MISC_ENABLE_DEFAULT;
 576}
 577
 578static bool feature_control_needed(void *opaque)
 579{
 580    X86CPU *cpu = opaque;
 581    CPUX86State *env = &cpu->env;
 582
 583    return env->msr_ia32_feature_control != 0;
 584}
 585
 586static const VMStateDescription vmstate_msr_ia32_misc_enable = {
 587    .name = "cpu/msr_ia32_misc_enable",
 588    .version_id = 1,
 589    .minimum_version_id = 1,
 590    .needed = misc_enable_needed,
 591    .fields = (VMStateField[]) {
 592        VMSTATE_UINT64(env.msr_ia32_misc_enable, X86CPU),
 593        VMSTATE_END_OF_LIST()
 594    }
 595};
 596
 597static const VMStateDescription vmstate_msr_ia32_feature_control = {
 598    .name = "cpu/msr_ia32_feature_control",
 599    .version_id = 1,
 600    .minimum_version_id = 1,
 601    .needed = feature_control_needed,
 602    .fields = (VMStateField[]) {
 603        VMSTATE_UINT64(env.msr_ia32_feature_control, X86CPU),
 604        VMSTATE_END_OF_LIST()
 605    }
 606};
 607
 608static bool pmu_enable_needed(void *opaque)
 609{
 610    X86CPU *cpu = opaque;
 611    CPUX86State *env = &cpu->env;
 612    int i;
 613
 614    if (env->msr_fixed_ctr_ctrl || env->msr_global_ctrl ||
 615        env->msr_global_status || env->msr_global_ovf_ctrl) {
 616        return true;
 617    }
 618    for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
 619        if (env->msr_fixed_counters[i]) {
 620            return true;
 621        }
 622    }
 623    for (i = 0; i < MAX_GP_COUNTERS; i++) {
 624        if (env->msr_gp_counters[i] || env->msr_gp_evtsel[i]) {
 625            return true;
 626        }
 627    }
 628
 629    return false;
 630}
 631
 632static const VMStateDescription vmstate_msr_architectural_pmu = {
 633    .name = "cpu/msr_architectural_pmu",
 634    .version_id = 1,
 635    .minimum_version_id = 1,
 636    .needed = pmu_enable_needed,
 637    .fields = (VMStateField[]) {
 638        VMSTATE_UINT64(env.msr_fixed_ctr_ctrl, X86CPU),
 639        VMSTATE_UINT64(env.msr_global_ctrl, X86CPU),
 640        VMSTATE_UINT64(env.msr_global_status, X86CPU),
 641        VMSTATE_UINT64(env.msr_global_ovf_ctrl, X86CPU),
 642        VMSTATE_UINT64_ARRAY(env.msr_fixed_counters, X86CPU, MAX_FIXED_COUNTERS),
 643        VMSTATE_UINT64_ARRAY(env.msr_gp_counters, X86CPU, MAX_GP_COUNTERS),
 644        VMSTATE_UINT64_ARRAY(env.msr_gp_evtsel, X86CPU, MAX_GP_COUNTERS),
 645        VMSTATE_END_OF_LIST()
 646    }
 647};
 648
 649static bool mpx_needed(void *opaque)
 650{
 651    X86CPU *cpu = opaque;
 652    CPUX86State *env = &cpu->env;
 653    unsigned int i;
 654
 655    for (i = 0; i < 4; i++) {
 656        if (env->bnd_regs[i].lb || env->bnd_regs[i].ub) {
 657            return true;
 658        }
 659    }
 660
 661    if (env->bndcs_regs.cfgu || env->bndcs_regs.sts) {
 662        return true;
 663    }
 664
 665    return !!env->msr_bndcfgs;
 666}
 667
 668static const VMStateDescription vmstate_mpx = {
 669    .name = "cpu/mpx",
 670    .version_id = 1,
 671    .minimum_version_id = 1,
 672    .needed = mpx_needed,
 673    .fields = (VMStateField[]) {
 674        VMSTATE_BND_REGS(env.bnd_regs, X86CPU, 4),
 675        VMSTATE_UINT64(env.bndcs_regs.cfgu, X86CPU),
 676        VMSTATE_UINT64(env.bndcs_regs.sts, X86CPU),
 677        VMSTATE_UINT64(env.msr_bndcfgs, X86CPU),
 678        VMSTATE_END_OF_LIST()
 679    }
 680};
 681
 682static bool hyperv_hypercall_enable_needed(void *opaque)
 683{
 684    X86CPU *cpu = opaque;
 685    CPUX86State *env = &cpu->env;
 686
 687    return env->msr_hv_hypercall != 0 || env->msr_hv_guest_os_id != 0;
 688}
 689
 690static const VMStateDescription vmstate_msr_hypercall_hypercall = {
 691    .name = "cpu/msr_hyperv_hypercall",
 692    .version_id = 1,
 693    .minimum_version_id = 1,
 694    .needed = hyperv_hypercall_enable_needed,
 695    .fields = (VMStateField[]) {
 696        VMSTATE_UINT64(env.msr_hv_guest_os_id, X86CPU),
 697        VMSTATE_UINT64(env.msr_hv_hypercall, X86CPU),
 698        VMSTATE_END_OF_LIST()
 699    }
 700};
 701
 702static bool hyperv_vapic_enable_needed(void *opaque)
 703{
 704    X86CPU *cpu = opaque;
 705    CPUX86State *env = &cpu->env;
 706
 707    return env->msr_hv_vapic != 0;
 708}
 709
 710static const VMStateDescription vmstate_msr_hyperv_vapic = {
 711    .name = "cpu/msr_hyperv_vapic",
 712    .version_id = 1,
 713    .minimum_version_id = 1,
 714    .needed = hyperv_vapic_enable_needed,
 715    .fields = (VMStateField[]) {
 716        VMSTATE_UINT64(env.msr_hv_vapic, X86CPU),
 717        VMSTATE_END_OF_LIST()
 718    }
 719};
 720
 721static bool hyperv_time_enable_needed(void *opaque)
 722{
 723    X86CPU *cpu = opaque;
 724    CPUX86State *env = &cpu->env;
 725
 726    return env->msr_hv_tsc != 0;
 727}
 728
 729static const VMStateDescription vmstate_msr_hyperv_time = {
 730    .name = "cpu/msr_hyperv_time",
 731    .version_id = 1,
 732    .minimum_version_id = 1,
 733    .needed = hyperv_time_enable_needed,
 734    .fields = (VMStateField[]) {
 735        VMSTATE_UINT64(env.msr_hv_tsc, X86CPU),
 736        VMSTATE_END_OF_LIST()
 737    }
 738};
 739
 740static bool hyperv_crash_enable_needed(void *opaque)
 741{
 742    X86CPU *cpu = opaque;
 743    CPUX86State *env = &cpu->env;
 744    int i;
 745
 746    for (i = 0; i < HV_CRASH_PARAMS; i++) {
 747        if (env->msr_hv_crash_params[i]) {
 748            return true;
 749        }
 750    }
 751    return false;
 752}
 753
 754static const VMStateDescription vmstate_msr_hyperv_crash = {
 755    .name = "cpu/msr_hyperv_crash",
 756    .version_id = 1,
 757    .minimum_version_id = 1,
 758    .needed = hyperv_crash_enable_needed,
 759    .fields = (VMStateField[]) {
 760        VMSTATE_UINT64_ARRAY(env.msr_hv_crash_params, X86CPU, HV_CRASH_PARAMS),
 761        VMSTATE_END_OF_LIST()
 762    }
 763};
 764
 765static bool hyperv_runtime_enable_needed(void *opaque)
 766{
 767    X86CPU *cpu = opaque;
 768    CPUX86State *env = &cpu->env;
 769
 770    if (!hyperv_feat_enabled(cpu, HYPERV_FEAT_RUNTIME)) {
 771        return false;
 772    }
 773
 774    return env->msr_hv_runtime != 0;
 775}
 776
 777static const VMStateDescription vmstate_msr_hyperv_runtime = {
 778    .name = "cpu/msr_hyperv_runtime",
 779    .version_id = 1,
 780    .minimum_version_id = 1,
 781    .needed = hyperv_runtime_enable_needed,
 782    .fields = (VMStateField[]) {
 783        VMSTATE_UINT64(env.msr_hv_runtime, X86CPU),
 784        VMSTATE_END_OF_LIST()
 785    }
 786};
 787
 788static bool hyperv_synic_enable_needed(void *opaque)
 789{
 790    X86CPU *cpu = opaque;
 791    CPUX86State *env = &cpu->env;
 792    int i;
 793
 794    if (env->msr_hv_synic_control != 0 ||
 795        env->msr_hv_synic_evt_page != 0 ||
 796        env->msr_hv_synic_msg_page != 0) {
 797        return true;
 798    }
 799
 800    for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) {
 801        if (env->msr_hv_synic_sint[i] != 0) {
 802            return true;
 803        }
 804    }
 805
 806    return false;
 807}
 808
 809static int hyperv_synic_post_load(void *opaque, int version_id)
 810{
 811    X86CPU *cpu = opaque;
 812    hyperv_x86_synic_update(cpu);
 813    return 0;
 814}
 815
 816static const VMStateDescription vmstate_msr_hyperv_synic = {
 817    .name = "cpu/msr_hyperv_synic",
 818    .version_id = 1,
 819    .minimum_version_id = 1,
 820    .needed = hyperv_synic_enable_needed,
 821    .post_load = hyperv_synic_post_load,
 822    .fields = (VMStateField[]) {
 823        VMSTATE_UINT64(env.msr_hv_synic_control, X86CPU),
 824        VMSTATE_UINT64(env.msr_hv_synic_evt_page, X86CPU),
 825        VMSTATE_UINT64(env.msr_hv_synic_msg_page, X86CPU),
 826        VMSTATE_UINT64_ARRAY(env.msr_hv_synic_sint, X86CPU, HV_SINT_COUNT),
 827        VMSTATE_END_OF_LIST()
 828    }
 829};
 830
 831static bool hyperv_stimer_enable_needed(void *opaque)
 832{
 833    X86CPU *cpu = opaque;
 834    CPUX86State *env = &cpu->env;
 835    int i;
 836
 837    for (i = 0; i < ARRAY_SIZE(env->msr_hv_stimer_config); i++) {
 838        if (env->msr_hv_stimer_config[i] || env->msr_hv_stimer_count[i]) {
 839            return true;
 840        }
 841    }
 842    return false;
 843}
 844
 845static const VMStateDescription vmstate_msr_hyperv_stimer = {
 846    .name = "cpu/msr_hyperv_stimer",
 847    .version_id = 1,
 848    .minimum_version_id = 1,
 849    .needed = hyperv_stimer_enable_needed,
 850    .fields = (VMStateField[]) {
 851        VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_config, X86CPU,
 852                             HV_STIMER_COUNT),
 853        VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_count, X86CPU, HV_STIMER_COUNT),
 854        VMSTATE_END_OF_LIST()
 855    }
 856};
 857
 858static bool hyperv_reenlightenment_enable_needed(void *opaque)
 859{
 860    X86CPU *cpu = opaque;
 861    CPUX86State *env = &cpu->env;
 862
 863    return env->msr_hv_reenlightenment_control != 0 ||
 864        env->msr_hv_tsc_emulation_control != 0 ||
 865        env->msr_hv_tsc_emulation_status != 0;
 866}
 867
 868static const VMStateDescription vmstate_msr_hyperv_reenlightenment = {
 869    .name = "cpu/msr_hyperv_reenlightenment",
 870    .version_id = 1,
 871    .minimum_version_id = 1,
 872    .needed = hyperv_reenlightenment_enable_needed,
 873    .fields = (VMStateField[]) {
 874        VMSTATE_UINT64(env.msr_hv_reenlightenment_control, X86CPU),
 875        VMSTATE_UINT64(env.msr_hv_tsc_emulation_control, X86CPU),
 876        VMSTATE_UINT64(env.msr_hv_tsc_emulation_status, X86CPU),
 877        VMSTATE_END_OF_LIST()
 878    }
 879};
 880
 881static bool avx512_needed(void *opaque)
 882{
 883    X86CPU *cpu = opaque;
 884    CPUX86State *env = &cpu->env;
 885    unsigned int i;
 886
 887    for (i = 0; i < NB_OPMASK_REGS; i++) {
 888        if (env->opmask_regs[i]) {
 889            return true;
 890        }
 891    }
 892
 893    for (i = 0; i < CPU_NB_REGS; i++) {
 894#define ENV_XMM(reg, field) (env->xmm_regs[reg].ZMM_Q(field))
 895        if (ENV_XMM(i, 4) || ENV_XMM(i, 6) ||
 896            ENV_XMM(i, 5) || ENV_XMM(i, 7)) {
 897            return true;
 898        }
 899#ifdef TARGET_X86_64
 900        if (ENV_XMM(i+16, 0) || ENV_XMM(i+16, 1) ||
 901            ENV_XMM(i+16, 2) || ENV_XMM(i+16, 3) ||
 902            ENV_XMM(i+16, 4) || ENV_XMM(i+16, 5) ||
 903            ENV_XMM(i+16, 6) || ENV_XMM(i+16, 7)) {
 904            return true;
 905        }
 906#endif
 907    }
 908
 909    return false;
 910}
 911
 912static const VMStateDescription vmstate_avx512 = {
 913    .name = "cpu/avx512",
 914    .version_id = 1,
 915    .minimum_version_id = 1,
 916    .needed = avx512_needed,
 917    .fields = (VMStateField[]) {
 918        VMSTATE_UINT64_ARRAY(env.opmask_regs, X86CPU, NB_OPMASK_REGS),
 919        VMSTATE_ZMMH_REGS_VARS(env.xmm_regs, X86CPU, 0),
 920#ifdef TARGET_X86_64
 921        VMSTATE_Hi16_ZMM_REGS_VARS(env.xmm_regs, X86CPU, 16),
 922#endif
 923        VMSTATE_END_OF_LIST()
 924    }
 925};
 926
 927static bool xss_needed(void *opaque)
 928{
 929    X86CPU *cpu = opaque;
 930    CPUX86State *env = &cpu->env;
 931
 932    return env->xss != 0;
 933}
 934
 935static const VMStateDescription vmstate_xss = {
 936    .name = "cpu/xss",
 937    .version_id = 1,
 938    .minimum_version_id = 1,
 939    .needed = xss_needed,
 940    .fields = (VMStateField[]) {
 941        VMSTATE_UINT64(env.xss, X86CPU),
 942        VMSTATE_END_OF_LIST()
 943    }
 944};
 945
 946static bool umwait_needed(void *opaque)
 947{
 948    X86CPU *cpu = opaque;
 949    CPUX86State *env = &cpu->env;
 950
 951    return env->umwait != 0;
 952}
 953
 954static const VMStateDescription vmstate_umwait = {
 955    .name = "cpu/umwait",
 956    .version_id = 1,
 957    .minimum_version_id = 1,
 958    .needed = umwait_needed,
 959    .fields = (VMStateField[]) {
 960        VMSTATE_UINT32(env.umwait, X86CPU),
 961        VMSTATE_END_OF_LIST()
 962    }
 963};
 964
 965#ifdef TARGET_X86_64
 966static bool pkru_needed(void *opaque)
 967{
 968    X86CPU *cpu = opaque;
 969    CPUX86State *env = &cpu->env;
 970
 971    return env->pkru != 0;
 972}
 973
 974static const VMStateDescription vmstate_pkru = {
 975    .name = "cpu/pkru",
 976    .version_id = 1,
 977    .minimum_version_id = 1,
 978    .needed = pkru_needed,
 979    .fields = (VMStateField[]){
 980        VMSTATE_UINT32(env.pkru, X86CPU),
 981        VMSTATE_END_OF_LIST()
 982    }
 983};
 984#endif
 985
 986static bool tsc_khz_needed(void *opaque)
 987{
 988    X86CPU *cpu = opaque;
 989    CPUX86State *env = &cpu->env;
 990    MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
 991    X86MachineClass *x86mc = X86_MACHINE_CLASS(mc);
 992    return env->tsc_khz && x86mc->save_tsc_khz;
 993}
 994
 995static const VMStateDescription vmstate_tsc_khz = {
 996    .name = "cpu/tsc_khz",
 997    .version_id = 1,
 998    .minimum_version_id = 1,
 999    .needed = tsc_khz_needed,
1000    .fields = (VMStateField[]) {
1001        VMSTATE_INT64(env.tsc_khz, X86CPU),
1002        VMSTATE_END_OF_LIST()
1003    }
1004};
1005
1006#ifdef CONFIG_KVM
1007
1008static bool vmx_vmcs12_needed(void *opaque)
1009{
1010    struct kvm_nested_state *nested_state = opaque;
1011    return (nested_state->size >
1012            offsetof(struct kvm_nested_state, data.vmx[0].vmcs12));
1013}
1014
1015static const VMStateDescription vmstate_vmx_vmcs12 = {
1016    .name = "cpu/kvm_nested_state/vmx/vmcs12",
1017    .version_id = 1,
1018    .minimum_version_id = 1,
1019    .needed = vmx_vmcs12_needed,
1020    .fields = (VMStateField[]) {
1021        VMSTATE_UINT8_ARRAY(data.vmx[0].vmcs12,
1022                            struct kvm_nested_state,
1023                            KVM_STATE_NESTED_VMX_VMCS_SIZE),
1024        VMSTATE_END_OF_LIST()
1025    }
1026};
1027
1028static bool vmx_shadow_vmcs12_needed(void *opaque)
1029{
1030    struct kvm_nested_state *nested_state = opaque;
1031    return (nested_state->size >
1032            offsetof(struct kvm_nested_state, data.vmx[0].shadow_vmcs12));
1033}
1034
1035static const VMStateDescription vmstate_vmx_shadow_vmcs12 = {
1036    .name = "cpu/kvm_nested_state/vmx/shadow_vmcs12",
1037    .version_id = 1,
1038    .minimum_version_id = 1,
1039    .needed = vmx_shadow_vmcs12_needed,
1040    .fields = (VMStateField[]) {
1041        VMSTATE_UINT8_ARRAY(data.vmx[0].shadow_vmcs12,
1042                            struct kvm_nested_state,
1043                            KVM_STATE_NESTED_VMX_VMCS_SIZE),
1044        VMSTATE_END_OF_LIST()
1045    }
1046};
1047
1048static bool vmx_nested_state_needed(void *opaque)
1049{
1050    struct kvm_nested_state *nested_state = opaque;
1051
1052    return (nested_state->format == KVM_STATE_NESTED_FORMAT_VMX &&
1053            nested_state->hdr.vmx.vmxon_pa != -1ull);
1054}
1055
1056static const VMStateDescription vmstate_vmx_nested_state = {
1057    .name = "cpu/kvm_nested_state/vmx",
1058    .version_id = 1,
1059    .minimum_version_id = 1,
1060    .needed = vmx_nested_state_needed,
1061    .fields = (VMStateField[]) {
1062        VMSTATE_U64(hdr.vmx.vmxon_pa, struct kvm_nested_state),
1063        VMSTATE_U64(hdr.vmx.vmcs12_pa, struct kvm_nested_state),
1064        VMSTATE_U16(hdr.vmx.smm.flags, struct kvm_nested_state),
1065        VMSTATE_END_OF_LIST()
1066    },
1067    .subsections = (const VMStateDescription*[]) {
1068        &vmstate_vmx_vmcs12,
1069        &vmstate_vmx_shadow_vmcs12,
1070        NULL,
1071    }
1072};
1073
1074static bool nested_state_needed(void *opaque)
1075{
1076    X86CPU *cpu = opaque;
1077    CPUX86State *env = &cpu->env;
1078
1079    return (env->nested_state &&
1080            vmx_nested_state_needed(env->nested_state));
1081}
1082
1083static int nested_state_post_load(void *opaque, int version_id)
1084{
1085    X86CPU *cpu = opaque;
1086    CPUX86State *env = &cpu->env;
1087    struct kvm_nested_state *nested_state = env->nested_state;
1088    int min_nested_state_len = offsetof(struct kvm_nested_state, data);
1089    int max_nested_state_len = kvm_max_nested_state_length();
1090
1091    /*
1092     * If our kernel don't support setting nested state
1093     * and we have received nested state from migration stream,
1094     * we need to fail migration
1095     */
1096    if (max_nested_state_len <= 0) {
1097        error_report("Received nested state when kernel cannot restore it");
1098        return -EINVAL;
1099    }
1100
1101    /*
1102     * Verify that the size of received nested_state struct
1103     * at least cover required header and is not larger
1104     * than the max size that our kernel support
1105     */
1106    if (nested_state->size < min_nested_state_len) {
1107        error_report("Received nested state size less than min: "
1108                     "len=%d, min=%d",
1109                     nested_state->size, min_nested_state_len);
1110        return -EINVAL;
1111    }
1112    if (nested_state->size > max_nested_state_len) {
1113        error_report("Recieved unsupported nested state size: "
1114                     "nested_state->size=%d, max=%d",
1115                     nested_state->size, max_nested_state_len);
1116        return -EINVAL;
1117    }
1118
1119    /* Verify format is valid */
1120    if ((nested_state->format != KVM_STATE_NESTED_FORMAT_VMX) &&
1121        (nested_state->format != KVM_STATE_NESTED_FORMAT_SVM)) {
1122        error_report("Received invalid nested state format: %d",
1123                     nested_state->format);
1124        return -EINVAL;
1125    }
1126
1127    return 0;
1128}
1129
1130static const VMStateDescription vmstate_kvm_nested_state = {
1131    .name = "cpu/kvm_nested_state",
1132    .version_id = 1,
1133    .minimum_version_id = 1,
1134    .fields = (VMStateField[]) {
1135        VMSTATE_U16(flags, struct kvm_nested_state),
1136        VMSTATE_U16(format, struct kvm_nested_state),
1137        VMSTATE_U32(size, struct kvm_nested_state),
1138        VMSTATE_END_OF_LIST()
1139    },
1140    .subsections = (const VMStateDescription*[]) {
1141        &vmstate_vmx_nested_state,
1142        NULL
1143    }
1144};
1145
1146static const VMStateDescription vmstate_nested_state = {
1147    .name = "cpu/nested_state",
1148    .version_id = 1,
1149    .minimum_version_id = 1,
1150    .needed = nested_state_needed,
1151    .post_load = nested_state_post_load,
1152    .fields = (VMStateField[]) {
1153        VMSTATE_STRUCT_POINTER(env.nested_state, X86CPU,
1154                vmstate_kvm_nested_state,
1155                struct kvm_nested_state),
1156        VMSTATE_END_OF_LIST()
1157    }
1158};
1159
1160#endif
1161
1162static bool mcg_ext_ctl_needed(void *opaque)
1163{
1164    X86CPU *cpu = opaque;
1165    CPUX86State *env = &cpu->env;
1166    return cpu->enable_lmce && env->mcg_ext_ctl;
1167}
1168
1169static const VMStateDescription vmstate_mcg_ext_ctl = {
1170    .name = "cpu/mcg_ext_ctl",
1171    .version_id = 1,
1172    .minimum_version_id = 1,
1173    .needed = mcg_ext_ctl_needed,
1174    .fields = (VMStateField[]) {
1175        VMSTATE_UINT64(env.mcg_ext_ctl, X86CPU),
1176        VMSTATE_END_OF_LIST()
1177    }
1178};
1179
1180static bool spec_ctrl_needed(void *opaque)
1181{
1182    X86CPU *cpu = opaque;
1183    CPUX86State *env = &cpu->env;
1184
1185    return env->spec_ctrl != 0;
1186}
1187
1188static const VMStateDescription vmstate_spec_ctrl = {
1189    .name = "cpu/spec_ctrl",
1190    .version_id = 1,
1191    .minimum_version_id = 1,
1192    .needed = spec_ctrl_needed,
1193    .fields = (VMStateField[]){
1194        VMSTATE_UINT64(env.spec_ctrl, X86CPU),
1195        VMSTATE_END_OF_LIST()
1196    }
1197};
1198
1199static bool intel_pt_enable_needed(void *opaque)
1200{
1201    X86CPU *cpu = opaque;
1202    CPUX86State *env = &cpu->env;
1203    int i;
1204
1205    if (env->msr_rtit_ctrl || env->msr_rtit_status ||
1206        env->msr_rtit_output_base || env->msr_rtit_output_mask ||
1207        env->msr_rtit_cr3_match) {
1208        return true;
1209    }
1210
1211    for (i = 0; i < MAX_RTIT_ADDRS; i++) {
1212        if (env->msr_rtit_addrs[i]) {
1213            return true;
1214        }
1215    }
1216
1217    return false;
1218}
1219
1220static const VMStateDescription vmstate_msr_intel_pt = {
1221    .name = "cpu/intel_pt",
1222    .version_id = 1,
1223    .minimum_version_id = 1,
1224    .needed = intel_pt_enable_needed,
1225    .fields = (VMStateField[]) {
1226        VMSTATE_UINT64(env.msr_rtit_ctrl, X86CPU),
1227        VMSTATE_UINT64(env.msr_rtit_status, X86CPU),
1228        VMSTATE_UINT64(env.msr_rtit_output_base, X86CPU),
1229        VMSTATE_UINT64(env.msr_rtit_output_mask, X86CPU),
1230        VMSTATE_UINT64(env.msr_rtit_cr3_match, X86CPU),
1231        VMSTATE_UINT64_ARRAY(env.msr_rtit_addrs, X86CPU, MAX_RTIT_ADDRS),
1232        VMSTATE_END_OF_LIST()
1233    }
1234};
1235
1236static bool virt_ssbd_needed(void *opaque)
1237{
1238    X86CPU *cpu = opaque;
1239    CPUX86State *env = &cpu->env;
1240
1241    return env->virt_ssbd != 0;
1242}
1243
1244static const VMStateDescription vmstate_msr_virt_ssbd = {
1245    .name = "cpu/virt_ssbd",
1246    .version_id = 1,
1247    .minimum_version_id = 1,
1248    .needed = virt_ssbd_needed,
1249    .fields = (VMStateField[]){
1250        VMSTATE_UINT64(env.virt_ssbd, X86CPU),
1251        VMSTATE_END_OF_LIST()
1252    }
1253};
1254
1255static bool svm_npt_needed(void *opaque)
1256{
1257    X86CPU *cpu = opaque;
1258    CPUX86State *env = &cpu->env;
1259
1260    return !!(env->hflags2 & HF2_NPT_MASK);
1261}
1262
1263static const VMStateDescription vmstate_svm_npt = {
1264    .name = "cpu/svn_npt",
1265    .version_id = 1,
1266    .minimum_version_id = 1,
1267    .needed = svm_npt_needed,
1268    .fields = (VMStateField[]){
1269        VMSTATE_UINT64(env.nested_cr3, X86CPU),
1270        VMSTATE_UINT32(env.nested_pg_mode, X86CPU),
1271        VMSTATE_END_OF_LIST()
1272    }
1273};
1274
1275#ifndef TARGET_X86_64
1276static bool intel_efer32_needed(void *opaque)
1277{
1278    X86CPU *cpu = opaque;
1279    CPUX86State *env = &cpu->env;
1280
1281    return env->efer != 0;
1282}
1283
1284static const VMStateDescription vmstate_efer32 = {
1285    .name = "cpu/efer32",
1286    .version_id = 1,
1287    .minimum_version_id = 1,
1288    .needed = intel_efer32_needed,
1289    .fields = (VMStateField[]) {
1290        VMSTATE_UINT64(env.efer, X86CPU),
1291        VMSTATE_END_OF_LIST()
1292    }
1293};
1294#endif
1295
1296static bool msr_tsx_ctrl_needed(void *opaque)
1297{
1298    X86CPU *cpu = opaque;
1299    CPUX86State *env = &cpu->env;
1300
1301    return env->features[FEAT_ARCH_CAPABILITIES] & ARCH_CAP_TSX_CTRL_MSR;
1302}
1303
1304static const VMStateDescription vmstate_msr_tsx_ctrl = {
1305    .name = "cpu/msr_tsx_ctrl",
1306    .version_id = 1,
1307    .minimum_version_id = 1,
1308    .needed = msr_tsx_ctrl_needed,
1309    .fields = (VMStateField[]) {
1310        VMSTATE_UINT32(env.tsx_ctrl, X86CPU),
1311        VMSTATE_END_OF_LIST()
1312    }
1313};
1314
1315VMStateDescription vmstate_x86_cpu = {
1316    .name = "cpu",
1317    .version_id = 12,
1318    .minimum_version_id = 11,
1319    .pre_save = cpu_pre_save,
1320    .post_load = cpu_post_load,
1321    .fields = (VMStateField[]) {
1322        VMSTATE_UINTTL_ARRAY(env.regs, X86CPU, CPU_NB_REGS),
1323        VMSTATE_UINTTL(env.eip, X86CPU),
1324        VMSTATE_UINTTL(env.eflags, X86CPU),
1325        VMSTATE_UINT32(env.hflags, X86CPU),
1326        /* FPU */
1327        VMSTATE_UINT16(env.fpuc, X86CPU),
1328        VMSTATE_UINT16(env.fpus_vmstate, X86CPU),
1329        VMSTATE_UINT16(env.fptag_vmstate, X86CPU),
1330        VMSTATE_UINT16(env.fpregs_format_vmstate, X86CPU),
1331
1332        VMSTATE_STRUCT_ARRAY(env.fpregs, X86CPU, 8, 0, vmstate_fpreg, FPReg),
1333
1334        VMSTATE_SEGMENT_ARRAY(env.segs, X86CPU, 6),
1335        VMSTATE_SEGMENT(env.ldt, X86CPU),
1336        VMSTATE_SEGMENT(env.tr, X86CPU),
1337        VMSTATE_SEGMENT(env.gdt, X86CPU),
1338        VMSTATE_SEGMENT(env.idt, X86CPU),
1339
1340        VMSTATE_UINT32(env.sysenter_cs, X86CPU),
1341        VMSTATE_UINTTL(env.sysenter_esp, X86CPU),
1342        VMSTATE_UINTTL(env.sysenter_eip, X86CPU),
1343
1344        VMSTATE_UINTTL(env.cr[0], X86CPU),
1345        VMSTATE_UINTTL(env.cr[2], X86CPU),
1346        VMSTATE_UINTTL(env.cr[3], X86CPU),
1347        VMSTATE_UINTTL(env.cr[4], X86CPU),
1348        VMSTATE_UINTTL_ARRAY(env.dr, X86CPU, 8),
1349        /* MMU */
1350        VMSTATE_INT32(env.a20_mask, X86CPU),
1351        /* XMM */
1352        VMSTATE_UINT32(env.mxcsr, X86CPU),
1353        VMSTATE_XMM_REGS(env.xmm_regs, X86CPU, 0),
1354
1355#ifdef TARGET_X86_64
1356        VMSTATE_UINT64(env.efer, X86CPU),
1357        VMSTATE_UINT64(env.star, X86CPU),
1358        VMSTATE_UINT64(env.lstar, X86CPU),
1359        VMSTATE_UINT64(env.cstar, X86CPU),
1360        VMSTATE_UINT64(env.fmask, X86CPU),
1361        VMSTATE_UINT64(env.kernelgsbase, X86CPU),
1362#endif
1363        VMSTATE_UINT32(env.smbase, X86CPU),
1364
1365        VMSTATE_UINT64(env.pat, X86CPU),
1366        VMSTATE_UINT32(env.hflags2, X86CPU),
1367
1368        VMSTATE_UINT64(env.vm_hsave, X86CPU),
1369        VMSTATE_UINT64(env.vm_vmcb, X86CPU),
1370        VMSTATE_UINT64(env.tsc_offset, X86CPU),
1371        VMSTATE_UINT64(env.intercept, X86CPU),
1372        VMSTATE_UINT16(env.intercept_cr_read, X86CPU),
1373        VMSTATE_UINT16(env.intercept_cr_write, X86CPU),
1374        VMSTATE_UINT16(env.intercept_dr_read, X86CPU),
1375        VMSTATE_UINT16(env.intercept_dr_write, X86CPU),
1376        VMSTATE_UINT32(env.intercept_exceptions, X86CPU),
1377        VMSTATE_UINT8(env.v_tpr, X86CPU),
1378        /* MTRRs */
1379        VMSTATE_UINT64_ARRAY(env.mtrr_fixed, X86CPU, 11),
1380        VMSTATE_UINT64(env.mtrr_deftype, X86CPU),
1381        VMSTATE_MTRR_VARS(env.mtrr_var, X86CPU, MSR_MTRRcap_VCNT, 8),
1382        /* KVM-related states */
1383        VMSTATE_INT32(env.interrupt_injected, X86CPU),
1384        VMSTATE_UINT32(env.mp_state, X86CPU),
1385        VMSTATE_UINT64(env.tsc, X86CPU),
1386        VMSTATE_INT32(env.exception_nr, X86CPU),
1387        VMSTATE_UINT8(env.soft_interrupt, X86CPU),
1388        VMSTATE_UINT8(env.nmi_injected, X86CPU),
1389        VMSTATE_UINT8(env.nmi_pending, X86CPU),
1390        VMSTATE_UINT8(env.has_error_code, X86CPU),
1391        VMSTATE_UINT32(env.sipi_vector, X86CPU),
1392        /* MCE */
1393        VMSTATE_UINT64(env.mcg_cap, X86CPU),
1394        VMSTATE_UINT64(env.mcg_status, X86CPU),
1395        VMSTATE_UINT64(env.mcg_ctl, X86CPU),
1396        VMSTATE_UINT64_ARRAY(env.mce_banks, X86CPU, MCE_BANKS_DEF * 4),
1397        /* rdtscp */
1398        VMSTATE_UINT64(env.tsc_aux, X86CPU),
1399        /* KVM pvclock msr */
1400        VMSTATE_UINT64(env.system_time_msr, X86CPU),
1401        VMSTATE_UINT64(env.wall_clock_msr, X86CPU),
1402        /* XSAVE related fields */
1403        VMSTATE_UINT64_V(env.xcr0, X86CPU, 12),
1404        VMSTATE_UINT64_V(env.xstate_bv, X86CPU, 12),
1405        VMSTATE_YMMH_REGS_VARS(env.xmm_regs, X86CPU, 0, 12),
1406        VMSTATE_END_OF_LIST()
1407        /* The above list is not sorted /wrt version numbers, watch out! */
1408    },
1409    .subsections = (const VMStateDescription*[]) {
1410        &vmstate_exception_info,
1411        &vmstate_async_pf_msr,
1412        &vmstate_pv_eoi_msr,
1413        &vmstate_steal_time_msr,
1414        &vmstate_poll_control_msr,
1415        &vmstate_fpop_ip_dp,
1416        &vmstate_msr_tsc_adjust,
1417        &vmstate_msr_tscdeadline,
1418        &vmstate_msr_ia32_misc_enable,
1419        &vmstate_msr_ia32_feature_control,
1420        &vmstate_msr_architectural_pmu,
1421        &vmstate_mpx,
1422        &vmstate_msr_hypercall_hypercall,
1423        &vmstate_msr_hyperv_vapic,
1424        &vmstate_msr_hyperv_time,
1425        &vmstate_msr_hyperv_crash,
1426        &vmstate_msr_hyperv_runtime,
1427        &vmstate_msr_hyperv_synic,
1428        &vmstate_msr_hyperv_stimer,
1429        &vmstate_msr_hyperv_reenlightenment,
1430        &vmstate_avx512,
1431        &vmstate_xss,
1432        &vmstate_umwait,
1433        &vmstate_tsc_khz,
1434        &vmstate_msr_smi_count,
1435#ifdef TARGET_X86_64
1436        &vmstate_pkru,
1437#endif
1438        &vmstate_spec_ctrl,
1439        &vmstate_mcg_ext_ctl,
1440        &vmstate_msr_intel_pt,
1441        &vmstate_msr_virt_ssbd,
1442        &vmstate_svm_npt,
1443#ifndef TARGET_X86_64
1444        &vmstate_efer32,
1445#endif
1446#ifdef CONFIG_KVM
1447        &vmstate_nested_state,
1448#endif
1449        &vmstate_msr_tsx_ctrl,
1450        NULL
1451    }
1452};
1453