qemu/target/i386/machine.c
<<
>>
Prefs
   1#include "qemu/osdep.h"
   2#include "cpu.h"
   3#include "exec/exec-all.h"
   4#include "hw/hw.h"
   5#include "hw/boards.h"
   6#include "hw/i386/pc.h"
   7#include "hw/isa/isa.h"
   8#include "migration/cpu.h"
   9#include "hyperv.h"
  10#include "kvm_i386.h"
  11
  12#include "sysemu/kvm.h"
  13#include "sysemu/tcg.h"
  14
  15#include "qemu/error-report.h"
  16
  17static const VMStateDescription vmstate_segment = {
  18    .name = "segment",
  19    .version_id = 1,
  20    .minimum_version_id = 1,
  21    .fields = (VMStateField[]) {
  22        VMSTATE_UINT32(selector, SegmentCache),
  23        VMSTATE_UINTTL(base, SegmentCache),
  24        VMSTATE_UINT32(limit, SegmentCache),
  25        VMSTATE_UINT32(flags, SegmentCache),
  26        VMSTATE_END_OF_LIST()
  27    }
  28};
  29
  30#define VMSTATE_SEGMENT(_field, _state) {                            \
  31    .name       = (stringify(_field)),                               \
  32    .size       = sizeof(SegmentCache),                              \
  33    .vmsd       = &vmstate_segment,                                  \
  34    .flags      = VMS_STRUCT,                                        \
  35    .offset     = offsetof(_state, _field)                           \
  36            + type_check(SegmentCache,typeof_field(_state, _field))  \
  37}
  38
  39#define VMSTATE_SEGMENT_ARRAY(_field, _state, _n)                    \
  40    VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_segment, SegmentCache)
  41
  42static const VMStateDescription vmstate_xmm_reg = {
  43    .name = "xmm_reg",
  44    .version_id = 1,
  45    .minimum_version_id = 1,
  46    .fields = (VMStateField[]) {
  47        VMSTATE_UINT64(ZMM_Q(0), ZMMReg),
  48        VMSTATE_UINT64(ZMM_Q(1), ZMMReg),
  49        VMSTATE_END_OF_LIST()
  50    }
  51};
  52
  53#define VMSTATE_XMM_REGS(_field, _state, _start)                         \
  54    VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0,     \
  55                             vmstate_xmm_reg, ZMMReg)
  56
  57/* YMMH format is the same as XMM, but for bits 128-255 */
  58static const VMStateDescription vmstate_ymmh_reg = {
  59    .name = "ymmh_reg",
  60    .version_id = 1,
  61    .minimum_version_id = 1,
  62    .fields = (VMStateField[]) {
  63        VMSTATE_UINT64(ZMM_Q(2), ZMMReg),
  64        VMSTATE_UINT64(ZMM_Q(3), ZMMReg),
  65        VMSTATE_END_OF_LIST()
  66    }
  67};
  68
  69#define VMSTATE_YMMH_REGS_VARS(_field, _state, _start, _v)               \
  70    VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, _v,    \
  71                             vmstate_ymmh_reg, ZMMReg)
  72
  73static const VMStateDescription vmstate_zmmh_reg = {
  74    .name = "zmmh_reg",
  75    .version_id = 1,
  76    .minimum_version_id = 1,
  77    .fields = (VMStateField[]) {
  78        VMSTATE_UINT64(ZMM_Q(4), ZMMReg),
  79        VMSTATE_UINT64(ZMM_Q(5), ZMMReg),
  80        VMSTATE_UINT64(ZMM_Q(6), ZMMReg),
  81        VMSTATE_UINT64(ZMM_Q(7), ZMMReg),
  82        VMSTATE_END_OF_LIST()
  83    }
  84};
  85
  86#define VMSTATE_ZMMH_REGS_VARS(_field, _state, _start)                   \
  87    VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0,     \
  88                             vmstate_zmmh_reg, ZMMReg)
  89
  90#ifdef TARGET_X86_64
  91static const VMStateDescription vmstate_hi16_zmm_reg = {
  92    .name = "hi16_zmm_reg",
  93    .version_id = 1,
  94    .minimum_version_id = 1,
  95    .fields = (VMStateField[]) {
  96        VMSTATE_UINT64(ZMM_Q(0), ZMMReg),
  97        VMSTATE_UINT64(ZMM_Q(1), ZMMReg),
  98        VMSTATE_UINT64(ZMM_Q(2), ZMMReg),
  99        VMSTATE_UINT64(ZMM_Q(3), ZMMReg),
 100        VMSTATE_UINT64(ZMM_Q(4), ZMMReg),
 101        VMSTATE_UINT64(ZMM_Q(5), ZMMReg),
 102        VMSTATE_UINT64(ZMM_Q(6), ZMMReg),
 103        VMSTATE_UINT64(ZMM_Q(7), ZMMReg),
 104        VMSTATE_END_OF_LIST()
 105    }
 106};
 107
 108#define VMSTATE_Hi16_ZMM_REGS_VARS(_field, _state, _start)               \
 109    VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0,     \
 110                             vmstate_hi16_zmm_reg, ZMMReg)
 111#endif
 112
 113static const VMStateDescription vmstate_bnd_regs = {
 114    .name = "bnd_regs",
 115    .version_id = 1,
 116    .minimum_version_id = 1,
 117    .fields = (VMStateField[]) {
 118        VMSTATE_UINT64(lb, BNDReg),
 119        VMSTATE_UINT64(ub, BNDReg),
 120        VMSTATE_END_OF_LIST()
 121    }
 122};
 123
 124#define VMSTATE_BND_REGS(_field, _state, _n)          \
 125    VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_bnd_regs, BNDReg)
 126
 127static const VMStateDescription vmstate_mtrr_var = {
 128    .name = "mtrr_var",
 129    .version_id = 1,
 130    .minimum_version_id = 1,
 131    .fields = (VMStateField[]) {
 132        VMSTATE_UINT64(base, MTRRVar),
 133        VMSTATE_UINT64(mask, MTRRVar),
 134        VMSTATE_END_OF_LIST()
 135    }
 136};
 137
 138#define VMSTATE_MTRR_VARS(_field, _state, _n, _v)                    \
 139    VMSTATE_STRUCT_ARRAY(_field, _state, _n, _v, vmstate_mtrr_var, MTRRVar)
 140
 141typedef struct x86_FPReg_tmp {
 142    FPReg *parent;
 143    uint64_t tmp_mant;
 144    uint16_t tmp_exp;
 145} x86_FPReg_tmp;
 146
 147static void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f)
 148{
 149    CPU_LDoubleU temp;
 150
 151    temp.d = f;
 152    *pmant = temp.l.lower;
 153    *pexp = temp.l.upper;
 154}
 155
 156static floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper)
 157{
 158    CPU_LDoubleU temp;
 159
 160    temp.l.upper = upper;
 161    temp.l.lower = mant;
 162    return temp.d;
 163}
 164
 165static int fpreg_pre_save(void *opaque)
 166{
 167    x86_FPReg_tmp *tmp = opaque;
 168
 169    /* we save the real CPU data (in case of MMX usage only 'mant'
 170       contains the MMX register */
 171    cpu_get_fp80(&tmp->tmp_mant, &tmp->tmp_exp, tmp->parent->d);
 172
 173    return 0;
 174}
 175
 176static int fpreg_post_load(void *opaque, int version)
 177{
 178    x86_FPReg_tmp *tmp = opaque;
 179
 180    tmp->parent->d = cpu_set_fp80(tmp->tmp_mant, tmp->tmp_exp);
 181    return 0;
 182}
 183
 184static const VMStateDescription vmstate_fpreg_tmp = {
 185    .name = "fpreg_tmp",
 186    .post_load = fpreg_post_load,
 187    .pre_save  = fpreg_pre_save,
 188    .fields = (VMStateField[]) {
 189        VMSTATE_UINT64(tmp_mant, x86_FPReg_tmp),
 190        VMSTATE_UINT16(tmp_exp, x86_FPReg_tmp),
 191        VMSTATE_END_OF_LIST()
 192    }
 193};
 194
 195static const VMStateDescription vmstate_fpreg = {
 196    .name = "fpreg",
 197    .fields = (VMStateField[]) {
 198        VMSTATE_WITH_TMP(FPReg, x86_FPReg_tmp, vmstate_fpreg_tmp),
 199        VMSTATE_END_OF_LIST()
 200    }
 201};
 202
 203static int cpu_pre_save(void *opaque)
 204{
 205    X86CPU *cpu = opaque;
 206    CPUX86State *env = &cpu->env;
 207    int i;
 208
 209    /* FPU */
 210    env->fpus_vmstate = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
 211    env->fptag_vmstate = 0;
 212    for(i = 0; i < 8; i++) {
 213        env->fptag_vmstate |= ((!env->fptags[i]) << i);
 214    }
 215
 216    env->fpregs_format_vmstate = 0;
 217
 218    /*
 219     * Real mode guest segments register DPL should be zero.
 220     * Older KVM version were setting it wrongly.
 221     * Fixing it will allow live migration to host with unrestricted guest
 222     * support (otherwise the migration will fail with invalid guest state
 223     * error).
 224     */
 225    if (!(env->cr[0] & CR0_PE_MASK) &&
 226        (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) {
 227        env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK);
 228        env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK);
 229        env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK);
 230        env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK);
 231        env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK);
 232        env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK);
 233    }
 234
 235#ifdef CONFIG_KVM
 236    /*
 237     * In case vCPU may have enabled VMX, we need to make sure kernel have
 238     * required capabilities in order to perform migration correctly:
 239     *
 240     * 1) We must be able to extract vCPU nested-state from KVM.
 241     *
 242     * 2) In case vCPU is running in guest-mode and it has a pending exception,
 243     * we must be able to determine if it's in a pending or injected state.
 244     * Note that in case KVM don't have required capability to do so,
 245     * a pending/injected exception will always appear as an
 246     * injected exception.
 247     */
 248    if (kvm_enabled() && cpu_vmx_maybe_enabled(env) &&
 249        (!env->nested_state ||
 250         (!kvm_has_exception_payload() && (env->hflags & HF_GUEST_MASK) &&
 251          env->exception_injected))) {
 252        error_report("Guest maybe enabled nested virtualization but kernel "
 253                "does not support required capabilities to save vCPU "
 254                "nested state");
 255        return -EINVAL;
 256    }
 257#endif
 258
 259    /*
 260     * When vCPU is running L2 and exception is still pending,
 261     * it can potentially be intercepted by L1 hypervisor.
 262     * In contrast to an injected exception which cannot be
 263     * intercepted anymore.
 264     *
 265     * Furthermore, when a L2 exception is intercepted by L1
 266     * hypervisor, it's exception payload (CR2/DR6 on #PF/#DB)
 267     * should not be set yet in the respective vCPU register.
 268     * Thus, in case an exception is pending, it is
 269     * important to save the exception payload seperately.
 270     *
 271     * Therefore, if an exception is not in a pending state
 272     * or vCPU is not in guest-mode, it is not important to
 273     * distinguish between a pending and injected exception
 274     * and we don't need to store seperately the exception payload.
 275     *
 276     * In order to preserve better backwards-compatabile migration,
 277     * convert a pending exception to an injected exception in
 278     * case it is not important to distingiush between them
 279     * as described above.
 280     */
 281    if (env->exception_pending && !(env->hflags & HF_GUEST_MASK)) {
 282        env->exception_pending = 0;
 283        env->exception_injected = 1;
 284
 285        if (env->exception_has_payload) {
 286            if (env->exception_nr == EXCP01_DB) {
 287                env->dr[6] = env->exception_payload;
 288            } else if (env->exception_nr == EXCP0E_PAGE) {
 289                env->cr[2] = env->exception_payload;
 290            }
 291        }
 292    }
 293
 294    return 0;
 295}
 296
 297static int cpu_post_load(void *opaque, int version_id)
 298{
 299    X86CPU *cpu = opaque;
 300    CPUState *cs = CPU(cpu);
 301    CPUX86State *env = &cpu->env;
 302    int i;
 303
 304    if (env->tsc_khz && env->user_tsc_khz &&
 305        env->tsc_khz != env->user_tsc_khz) {
 306        error_report("Mismatch between user-specified TSC frequency and "
 307                     "migrated TSC frequency");
 308        return -EINVAL;
 309    }
 310
 311    if (env->fpregs_format_vmstate) {
 312        error_report("Unsupported old non-softfloat CPU state");
 313        return -EINVAL;
 314    }
 315    /*
 316     * Real mode guest segments register DPL should be zero.
 317     * Older KVM version were setting it wrongly.
 318     * Fixing it will allow live migration from such host that don't have
 319     * restricted guest support to a host with unrestricted guest support
 320     * (otherwise the migration will fail with invalid guest state
 321     * error).
 322     */
 323    if (!(env->cr[0] & CR0_PE_MASK) &&
 324        (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) {
 325        env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK);
 326        env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK);
 327        env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK);
 328        env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK);
 329        env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK);
 330        env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK);
 331    }
 332
 333    /* Older versions of QEMU incorrectly used CS.DPL as the CPL when
 334     * running under KVM.  This is wrong for conforming code segments.
 335     * Luckily, in our implementation the CPL field of hflags is redundant
 336     * and we can get the right value from the SS descriptor privilege level.
 337     */
 338    env->hflags &= ~HF_CPL_MASK;
 339    env->hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
 340
 341#ifdef CONFIG_KVM
 342    if ((env->hflags & HF_GUEST_MASK) &&
 343        (!env->nested_state ||
 344        !(env->nested_state->flags & KVM_STATE_NESTED_GUEST_MODE))) {
 345        error_report("vCPU set in guest-mode inconsistent with "
 346                     "migrated kernel nested state");
 347        return -EINVAL;
 348    }
 349#endif
 350
 351    /*
 352     * There are cases that we can get valid exception_nr with both
 353     * exception_pending and exception_injected being cleared.
 354     * This can happen in one of the following scenarios:
 355     * 1) Source is older QEMU without KVM_CAP_EXCEPTION_PAYLOAD support.
 356     * 2) Source is running on kernel without KVM_CAP_EXCEPTION_PAYLOAD support.
 357     * 3) "cpu/exception_info" subsection not sent because there is no exception
 358     *    pending or guest wasn't running L2 (See comment in cpu_pre_save()).
 359     *
 360     * In those cases, we can just deduce that a valid exception_nr means
 361     * we can treat the exception as already injected.
 362     */
 363    if ((env->exception_nr != -1) &&
 364        !env->exception_pending && !env->exception_injected) {
 365        env->exception_injected = 1;
 366    }
 367
 368    env->fpstt = (env->fpus_vmstate >> 11) & 7;
 369    env->fpus = env->fpus_vmstate & ~0x3800;
 370    env->fptag_vmstate ^= 0xff;
 371    for(i = 0; i < 8; i++) {
 372        env->fptags[i] = (env->fptag_vmstate >> i) & 1;
 373    }
 374    if (tcg_enabled()) {
 375        target_ulong dr7;
 376        update_fp_status(env);
 377        update_mxcsr_status(env);
 378
 379        cpu_breakpoint_remove_all(cs, BP_CPU);
 380        cpu_watchpoint_remove_all(cs, BP_CPU);
 381
 382        /* Indicate all breakpoints disabled, as they are, then
 383           let the helper re-enable them.  */
 384        dr7 = env->dr[7];
 385        env->dr[7] = dr7 & ~(DR7_GLOBAL_BP_MASK | DR7_LOCAL_BP_MASK);
 386        cpu_x86_update_dr7(env, dr7);
 387    }
 388    tlb_flush(cs);
 389    return 0;
 390}
 391
 392static bool async_pf_msr_needed(void *opaque)
 393{
 394    X86CPU *cpu = opaque;
 395
 396    return cpu->env.async_pf_en_msr != 0;
 397}
 398
 399static bool pv_eoi_msr_needed(void *opaque)
 400{
 401    X86CPU *cpu = opaque;
 402
 403    return cpu->env.pv_eoi_en_msr != 0;
 404}
 405
 406static bool steal_time_msr_needed(void *opaque)
 407{
 408    X86CPU *cpu = opaque;
 409
 410    return cpu->env.steal_time_msr != 0;
 411}
 412
 413static bool exception_info_needed(void *opaque)
 414{
 415    X86CPU *cpu = opaque;
 416    CPUX86State *env = &cpu->env;
 417
 418    /*
 419     * It is important to save exception-info only in case
 420     * we need to distingiush between a pending and injected
 421     * exception. Which is only required in case there is a
 422     * pending exception and vCPU is running L2.
 423     * For more info, refer to comment in cpu_pre_save().
 424     */
 425    return env->exception_pending && (env->hflags & HF_GUEST_MASK);
 426}
 427
 428static const VMStateDescription vmstate_exception_info = {
 429    .name = "cpu/exception_info",
 430    .version_id = 1,
 431    .minimum_version_id = 1,
 432    .needed = exception_info_needed,
 433    .fields = (VMStateField[]) {
 434        VMSTATE_UINT8(env.exception_pending, X86CPU),
 435        VMSTATE_UINT8(env.exception_injected, X86CPU),
 436        VMSTATE_UINT8(env.exception_has_payload, X86CPU),
 437        VMSTATE_UINT64(env.exception_payload, X86CPU),
 438        VMSTATE_END_OF_LIST()
 439    }
 440};
 441
 442static const VMStateDescription vmstate_steal_time_msr = {
 443    .name = "cpu/steal_time_msr",
 444    .version_id = 1,
 445    .minimum_version_id = 1,
 446    .needed = steal_time_msr_needed,
 447    .fields = (VMStateField[]) {
 448        VMSTATE_UINT64(env.steal_time_msr, X86CPU),
 449        VMSTATE_END_OF_LIST()
 450    }
 451};
 452
 453static const VMStateDescription vmstate_async_pf_msr = {
 454    .name = "cpu/async_pf_msr",
 455    .version_id = 1,
 456    .minimum_version_id = 1,
 457    .needed = async_pf_msr_needed,
 458    .fields = (VMStateField[]) {
 459        VMSTATE_UINT64(env.async_pf_en_msr, X86CPU),
 460        VMSTATE_END_OF_LIST()
 461    }
 462};
 463
 464static const VMStateDescription vmstate_pv_eoi_msr = {
 465    .name = "cpu/async_pv_eoi_msr",
 466    .version_id = 1,
 467    .minimum_version_id = 1,
 468    .needed = pv_eoi_msr_needed,
 469    .fields = (VMStateField[]) {
 470        VMSTATE_UINT64(env.pv_eoi_en_msr, X86CPU),
 471        VMSTATE_END_OF_LIST()
 472    }
 473};
 474
 475static bool fpop_ip_dp_needed(void *opaque)
 476{
 477    X86CPU *cpu = opaque;
 478    CPUX86State *env = &cpu->env;
 479
 480    return env->fpop != 0 || env->fpip != 0 || env->fpdp != 0;
 481}
 482
 483static const VMStateDescription vmstate_fpop_ip_dp = {
 484    .name = "cpu/fpop_ip_dp",
 485    .version_id = 1,
 486    .minimum_version_id = 1,
 487    .needed = fpop_ip_dp_needed,
 488    .fields = (VMStateField[]) {
 489        VMSTATE_UINT16(env.fpop, X86CPU),
 490        VMSTATE_UINT64(env.fpip, X86CPU),
 491        VMSTATE_UINT64(env.fpdp, X86CPU),
 492        VMSTATE_END_OF_LIST()
 493    }
 494};
 495
 496static bool tsc_adjust_needed(void *opaque)
 497{
 498    X86CPU *cpu = opaque;
 499    CPUX86State *env = &cpu->env;
 500
 501    return env->tsc_adjust != 0;
 502}
 503
 504static const VMStateDescription vmstate_msr_tsc_adjust = {
 505    .name = "cpu/msr_tsc_adjust",
 506    .version_id = 1,
 507    .minimum_version_id = 1,
 508    .needed = tsc_adjust_needed,
 509    .fields = (VMStateField[]) {
 510        VMSTATE_UINT64(env.tsc_adjust, X86CPU),
 511        VMSTATE_END_OF_LIST()
 512    }
 513};
 514
 515static bool msr_smi_count_needed(void *opaque)
 516{
 517    X86CPU *cpu = opaque;
 518    CPUX86State *env = &cpu->env;
 519
 520    return cpu->migrate_smi_count && env->msr_smi_count != 0;
 521}
 522
 523static const VMStateDescription vmstate_msr_smi_count = {
 524    .name = "cpu/msr_smi_count",
 525    .version_id = 1,
 526    .minimum_version_id = 1,
 527    .needed = msr_smi_count_needed,
 528    .fields = (VMStateField[]) {
 529        VMSTATE_UINT64(env.msr_smi_count, X86CPU),
 530        VMSTATE_END_OF_LIST()
 531    }
 532};
 533
 534static bool tscdeadline_needed(void *opaque)
 535{
 536    X86CPU *cpu = opaque;
 537    CPUX86State *env = &cpu->env;
 538
 539    return env->tsc_deadline != 0;
 540}
 541
 542static const VMStateDescription vmstate_msr_tscdeadline = {
 543    .name = "cpu/msr_tscdeadline",
 544    .version_id = 1,
 545    .minimum_version_id = 1,
 546    .needed = tscdeadline_needed,
 547    .fields = (VMStateField[]) {
 548        VMSTATE_UINT64(env.tsc_deadline, X86CPU),
 549        VMSTATE_END_OF_LIST()
 550    }
 551};
 552
 553static bool misc_enable_needed(void *opaque)
 554{
 555    X86CPU *cpu = opaque;
 556    CPUX86State *env = &cpu->env;
 557
 558    return env->msr_ia32_misc_enable != MSR_IA32_MISC_ENABLE_DEFAULT;
 559}
 560
 561static bool feature_control_needed(void *opaque)
 562{
 563    X86CPU *cpu = opaque;
 564    CPUX86State *env = &cpu->env;
 565
 566    return env->msr_ia32_feature_control != 0;
 567}
 568
 569static const VMStateDescription vmstate_msr_ia32_misc_enable = {
 570    .name = "cpu/msr_ia32_misc_enable",
 571    .version_id = 1,
 572    .minimum_version_id = 1,
 573    .needed = misc_enable_needed,
 574    .fields = (VMStateField[]) {
 575        VMSTATE_UINT64(env.msr_ia32_misc_enable, X86CPU),
 576        VMSTATE_END_OF_LIST()
 577    }
 578};
 579
 580static const VMStateDescription vmstate_msr_ia32_feature_control = {
 581    .name = "cpu/msr_ia32_feature_control",
 582    .version_id = 1,
 583    .minimum_version_id = 1,
 584    .needed = feature_control_needed,
 585    .fields = (VMStateField[]) {
 586        VMSTATE_UINT64(env.msr_ia32_feature_control, X86CPU),
 587        VMSTATE_END_OF_LIST()
 588    }
 589};
 590
 591static bool pmu_enable_needed(void *opaque)
 592{
 593    X86CPU *cpu = opaque;
 594    CPUX86State *env = &cpu->env;
 595    int i;
 596
 597    if (env->msr_fixed_ctr_ctrl || env->msr_global_ctrl ||
 598        env->msr_global_status || env->msr_global_ovf_ctrl) {
 599        return true;
 600    }
 601    for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
 602        if (env->msr_fixed_counters[i]) {
 603            return true;
 604        }
 605    }
 606    for (i = 0; i < MAX_GP_COUNTERS; i++) {
 607        if (env->msr_gp_counters[i] || env->msr_gp_evtsel[i]) {
 608            return true;
 609        }
 610    }
 611
 612    return false;
 613}
 614
 615static const VMStateDescription vmstate_msr_architectural_pmu = {
 616    .name = "cpu/msr_architectural_pmu",
 617    .version_id = 1,
 618    .minimum_version_id = 1,
 619    .needed = pmu_enable_needed,
 620    .fields = (VMStateField[]) {
 621        VMSTATE_UINT64(env.msr_fixed_ctr_ctrl, X86CPU),
 622        VMSTATE_UINT64(env.msr_global_ctrl, X86CPU),
 623        VMSTATE_UINT64(env.msr_global_status, X86CPU),
 624        VMSTATE_UINT64(env.msr_global_ovf_ctrl, X86CPU),
 625        VMSTATE_UINT64_ARRAY(env.msr_fixed_counters, X86CPU, MAX_FIXED_COUNTERS),
 626        VMSTATE_UINT64_ARRAY(env.msr_gp_counters, X86CPU, MAX_GP_COUNTERS),
 627        VMSTATE_UINT64_ARRAY(env.msr_gp_evtsel, X86CPU, MAX_GP_COUNTERS),
 628        VMSTATE_END_OF_LIST()
 629    }
 630};
 631
 632static bool mpx_needed(void *opaque)
 633{
 634    X86CPU *cpu = opaque;
 635    CPUX86State *env = &cpu->env;
 636    unsigned int i;
 637
 638    for (i = 0; i < 4; i++) {
 639        if (env->bnd_regs[i].lb || env->bnd_regs[i].ub) {
 640            return true;
 641        }
 642    }
 643
 644    if (env->bndcs_regs.cfgu || env->bndcs_regs.sts) {
 645        return true;
 646    }
 647
 648    return !!env->msr_bndcfgs;
 649}
 650
 651static const VMStateDescription vmstate_mpx = {
 652    .name = "cpu/mpx",
 653    .version_id = 1,
 654    .minimum_version_id = 1,
 655    .needed = mpx_needed,
 656    .fields = (VMStateField[]) {
 657        VMSTATE_BND_REGS(env.bnd_regs, X86CPU, 4),
 658        VMSTATE_UINT64(env.bndcs_regs.cfgu, X86CPU),
 659        VMSTATE_UINT64(env.bndcs_regs.sts, X86CPU),
 660        VMSTATE_UINT64(env.msr_bndcfgs, X86CPU),
 661        VMSTATE_END_OF_LIST()
 662    }
 663};
 664
 665static bool hyperv_hypercall_enable_needed(void *opaque)
 666{
 667    X86CPU *cpu = opaque;
 668    CPUX86State *env = &cpu->env;
 669
 670    return env->msr_hv_hypercall != 0 || env->msr_hv_guest_os_id != 0;
 671}
 672
 673static const VMStateDescription vmstate_msr_hypercall_hypercall = {
 674    .name = "cpu/msr_hyperv_hypercall",
 675    .version_id = 1,
 676    .minimum_version_id = 1,
 677    .needed = hyperv_hypercall_enable_needed,
 678    .fields = (VMStateField[]) {
 679        VMSTATE_UINT64(env.msr_hv_guest_os_id, X86CPU),
 680        VMSTATE_UINT64(env.msr_hv_hypercall, X86CPU),
 681        VMSTATE_END_OF_LIST()
 682    }
 683};
 684
 685static bool hyperv_vapic_enable_needed(void *opaque)
 686{
 687    X86CPU *cpu = opaque;
 688    CPUX86State *env = &cpu->env;
 689
 690    return env->msr_hv_vapic != 0;
 691}
 692
 693static const VMStateDescription vmstate_msr_hyperv_vapic = {
 694    .name = "cpu/msr_hyperv_vapic",
 695    .version_id = 1,
 696    .minimum_version_id = 1,
 697    .needed = hyperv_vapic_enable_needed,
 698    .fields = (VMStateField[]) {
 699        VMSTATE_UINT64(env.msr_hv_vapic, X86CPU),
 700        VMSTATE_END_OF_LIST()
 701    }
 702};
 703
 704static bool hyperv_time_enable_needed(void *opaque)
 705{
 706    X86CPU *cpu = opaque;
 707    CPUX86State *env = &cpu->env;
 708
 709    return env->msr_hv_tsc != 0;
 710}
 711
 712static const VMStateDescription vmstate_msr_hyperv_time = {
 713    .name = "cpu/msr_hyperv_time",
 714    .version_id = 1,
 715    .minimum_version_id = 1,
 716    .needed = hyperv_time_enable_needed,
 717    .fields = (VMStateField[]) {
 718        VMSTATE_UINT64(env.msr_hv_tsc, X86CPU),
 719        VMSTATE_END_OF_LIST()
 720    }
 721};
 722
 723static bool hyperv_crash_enable_needed(void *opaque)
 724{
 725    X86CPU *cpu = opaque;
 726    CPUX86State *env = &cpu->env;
 727    int i;
 728
 729    for (i = 0; i < HV_CRASH_PARAMS; i++) {
 730        if (env->msr_hv_crash_params[i]) {
 731            return true;
 732        }
 733    }
 734    return false;
 735}
 736
 737static const VMStateDescription vmstate_msr_hyperv_crash = {
 738    .name = "cpu/msr_hyperv_crash",
 739    .version_id = 1,
 740    .minimum_version_id = 1,
 741    .needed = hyperv_crash_enable_needed,
 742    .fields = (VMStateField[]) {
 743        VMSTATE_UINT64_ARRAY(env.msr_hv_crash_params, X86CPU, HV_CRASH_PARAMS),
 744        VMSTATE_END_OF_LIST()
 745    }
 746};
 747
 748static bool hyperv_runtime_enable_needed(void *opaque)
 749{
 750    X86CPU *cpu = opaque;
 751    CPUX86State *env = &cpu->env;
 752
 753    if (!hyperv_feat_enabled(cpu, HYPERV_FEAT_RUNTIME)) {
 754        return false;
 755    }
 756
 757    return env->msr_hv_runtime != 0;
 758}
 759
 760static const VMStateDescription vmstate_msr_hyperv_runtime = {
 761    .name = "cpu/msr_hyperv_runtime",
 762    .version_id = 1,
 763    .minimum_version_id = 1,
 764    .needed = hyperv_runtime_enable_needed,
 765    .fields = (VMStateField[]) {
 766        VMSTATE_UINT64(env.msr_hv_runtime, X86CPU),
 767        VMSTATE_END_OF_LIST()
 768    }
 769};
 770
 771static bool hyperv_synic_enable_needed(void *opaque)
 772{
 773    X86CPU *cpu = opaque;
 774    CPUX86State *env = &cpu->env;
 775    int i;
 776
 777    if (env->msr_hv_synic_control != 0 ||
 778        env->msr_hv_synic_evt_page != 0 ||
 779        env->msr_hv_synic_msg_page != 0) {
 780        return true;
 781    }
 782
 783    for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) {
 784        if (env->msr_hv_synic_sint[i] != 0) {
 785            return true;
 786        }
 787    }
 788
 789    return false;
 790}
 791
 792static int hyperv_synic_post_load(void *opaque, int version_id)
 793{
 794    X86CPU *cpu = opaque;
 795    hyperv_x86_synic_update(cpu);
 796    return 0;
 797}
 798
 799static const VMStateDescription vmstate_msr_hyperv_synic = {
 800    .name = "cpu/msr_hyperv_synic",
 801    .version_id = 1,
 802    .minimum_version_id = 1,
 803    .needed = hyperv_synic_enable_needed,
 804    .post_load = hyperv_synic_post_load,
 805    .fields = (VMStateField[]) {
 806        VMSTATE_UINT64(env.msr_hv_synic_control, X86CPU),
 807        VMSTATE_UINT64(env.msr_hv_synic_evt_page, X86CPU),
 808        VMSTATE_UINT64(env.msr_hv_synic_msg_page, X86CPU),
 809        VMSTATE_UINT64_ARRAY(env.msr_hv_synic_sint, X86CPU, HV_SINT_COUNT),
 810        VMSTATE_END_OF_LIST()
 811    }
 812};
 813
 814static bool hyperv_stimer_enable_needed(void *opaque)
 815{
 816    X86CPU *cpu = opaque;
 817    CPUX86State *env = &cpu->env;
 818    int i;
 819
 820    for (i = 0; i < ARRAY_SIZE(env->msr_hv_stimer_config); i++) {
 821        if (env->msr_hv_stimer_config[i] || env->msr_hv_stimer_count[i]) {
 822            return true;
 823        }
 824    }
 825    return false;
 826}
 827
 828static const VMStateDescription vmstate_msr_hyperv_stimer = {
 829    .name = "cpu/msr_hyperv_stimer",
 830    .version_id = 1,
 831    .minimum_version_id = 1,
 832    .needed = hyperv_stimer_enable_needed,
 833    .fields = (VMStateField[]) {
 834        VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_config, X86CPU,
 835                             HV_STIMER_COUNT),
 836        VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_count, X86CPU, HV_STIMER_COUNT),
 837        VMSTATE_END_OF_LIST()
 838    }
 839};
 840
 841static bool hyperv_reenlightenment_enable_needed(void *opaque)
 842{
 843    X86CPU *cpu = opaque;
 844    CPUX86State *env = &cpu->env;
 845
 846    return env->msr_hv_reenlightenment_control != 0 ||
 847        env->msr_hv_tsc_emulation_control != 0 ||
 848        env->msr_hv_tsc_emulation_status != 0;
 849}
 850
 851static const VMStateDescription vmstate_msr_hyperv_reenlightenment = {
 852    .name = "cpu/msr_hyperv_reenlightenment",
 853    .version_id = 1,
 854    .minimum_version_id = 1,
 855    .needed = hyperv_reenlightenment_enable_needed,
 856    .fields = (VMStateField[]) {
 857        VMSTATE_UINT64(env.msr_hv_reenlightenment_control, X86CPU),
 858        VMSTATE_UINT64(env.msr_hv_tsc_emulation_control, X86CPU),
 859        VMSTATE_UINT64(env.msr_hv_tsc_emulation_status, X86CPU),
 860        VMSTATE_END_OF_LIST()
 861    }
 862};
 863
 864static bool avx512_needed(void *opaque)
 865{
 866    X86CPU *cpu = opaque;
 867    CPUX86State *env = &cpu->env;
 868    unsigned int i;
 869
 870    for (i = 0; i < NB_OPMASK_REGS; i++) {
 871        if (env->opmask_regs[i]) {
 872            return true;
 873        }
 874    }
 875
 876    for (i = 0; i < CPU_NB_REGS; i++) {
 877#define ENV_XMM(reg, field) (env->xmm_regs[reg].ZMM_Q(field))
 878        if (ENV_XMM(i, 4) || ENV_XMM(i, 6) ||
 879            ENV_XMM(i, 5) || ENV_XMM(i, 7)) {
 880            return true;
 881        }
 882#ifdef TARGET_X86_64
 883        if (ENV_XMM(i+16, 0) || ENV_XMM(i+16, 1) ||
 884            ENV_XMM(i+16, 2) || ENV_XMM(i+16, 3) ||
 885            ENV_XMM(i+16, 4) || ENV_XMM(i+16, 5) ||
 886            ENV_XMM(i+16, 6) || ENV_XMM(i+16, 7)) {
 887            return true;
 888        }
 889#endif
 890    }
 891
 892    return false;
 893}
 894
 895static const VMStateDescription vmstate_avx512 = {
 896    .name = "cpu/avx512",
 897    .version_id = 1,
 898    .minimum_version_id = 1,
 899    .needed = avx512_needed,
 900    .fields = (VMStateField[]) {
 901        VMSTATE_UINT64_ARRAY(env.opmask_regs, X86CPU, NB_OPMASK_REGS),
 902        VMSTATE_ZMMH_REGS_VARS(env.xmm_regs, X86CPU, 0),
 903#ifdef TARGET_X86_64
 904        VMSTATE_Hi16_ZMM_REGS_VARS(env.xmm_regs, X86CPU, 16),
 905#endif
 906        VMSTATE_END_OF_LIST()
 907    }
 908};
 909
 910static bool xss_needed(void *opaque)
 911{
 912    X86CPU *cpu = opaque;
 913    CPUX86State *env = &cpu->env;
 914
 915    return env->xss != 0;
 916}
 917
 918static const VMStateDescription vmstate_xss = {
 919    .name = "cpu/xss",
 920    .version_id = 1,
 921    .minimum_version_id = 1,
 922    .needed = xss_needed,
 923    .fields = (VMStateField[]) {
 924        VMSTATE_UINT64(env.xss, X86CPU),
 925        VMSTATE_END_OF_LIST()
 926    }
 927};
 928
 929#ifdef TARGET_X86_64
 930static bool pkru_needed(void *opaque)
 931{
 932    X86CPU *cpu = opaque;
 933    CPUX86State *env = &cpu->env;
 934
 935    return env->pkru != 0;
 936}
 937
 938static const VMStateDescription vmstate_pkru = {
 939    .name = "cpu/pkru",
 940    .version_id = 1,
 941    .minimum_version_id = 1,
 942    .needed = pkru_needed,
 943    .fields = (VMStateField[]){
 944        VMSTATE_UINT32(env.pkru, X86CPU),
 945        VMSTATE_END_OF_LIST()
 946    }
 947};
 948#endif
 949
 950static bool tsc_khz_needed(void *opaque)
 951{
 952    X86CPU *cpu = opaque;
 953    CPUX86State *env = &cpu->env;
 954    MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
 955    PCMachineClass *pcmc = PC_MACHINE_CLASS(mc);
 956    return env->tsc_khz && pcmc->save_tsc_khz;
 957}
 958
 959static const VMStateDescription vmstate_tsc_khz = {
 960    .name = "cpu/tsc_khz",
 961    .version_id = 1,
 962    .minimum_version_id = 1,
 963    .needed = tsc_khz_needed,
 964    .fields = (VMStateField[]) {
 965        VMSTATE_INT64(env.tsc_khz, X86CPU),
 966        VMSTATE_END_OF_LIST()
 967    }
 968};
 969
 970#ifdef CONFIG_KVM
 971
 972static bool vmx_vmcs12_needed(void *opaque)
 973{
 974    struct kvm_nested_state *nested_state = opaque;
 975    return (nested_state->size >
 976            offsetof(struct kvm_nested_state, data.vmx[0].vmcs12));
 977}
 978
 979static const VMStateDescription vmstate_vmx_vmcs12 = {
 980    .name = "cpu/kvm_nested_state/vmx/vmcs12",
 981    .version_id = 1,
 982    .minimum_version_id = 1,
 983    .needed = vmx_vmcs12_needed,
 984    .fields = (VMStateField[]) {
 985        VMSTATE_UINT8_ARRAY(data.vmx[0].vmcs12,
 986                            struct kvm_nested_state,
 987                            KVM_STATE_NESTED_VMX_VMCS_SIZE),
 988        VMSTATE_END_OF_LIST()
 989    }
 990};
 991
 992static bool vmx_shadow_vmcs12_needed(void *opaque)
 993{
 994    struct kvm_nested_state *nested_state = opaque;
 995    return (nested_state->size >
 996            offsetof(struct kvm_nested_state, data.vmx[0].shadow_vmcs12));
 997}
 998
 999static const VMStateDescription vmstate_vmx_shadow_vmcs12 = {
1000    .name = "cpu/kvm_nested_state/vmx/shadow_vmcs12",
1001    .version_id = 1,
1002    .minimum_version_id = 1,
1003    .needed = vmx_shadow_vmcs12_needed,
1004    .fields = (VMStateField[]) {
1005        VMSTATE_UINT8_ARRAY(data.vmx[0].shadow_vmcs12,
1006                            struct kvm_nested_state,
1007                            KVM_STATE_NESTED_VMX_VMCS_SIZE),
1008        VMSTATE_END_OF_LIST()
1009    }
1010};
1011
1012static bool vmx_nested_state_needed(void *opaque)
1013{
1014    struct kvm_nested_state *nested_state = opaque;
1015
1016    return (nested_state->format == KVM_STATE_NESTED_FORMAT_VMX &&
1017            nested_state->hdr.vmx.vmxon_pa != -1ull);
1018}
1019
1020static const VMStateDescription vmstate_vmx_nested_state = {
1021    .name = "cpu/kvm_nested_state/vmx",
1022    .version_id = 1,
1023    .minimum_version_id = 1,
1024    .needed = vmx_nested_state_needed,
1025    .fields = (VMStateField[]) {
1026        VMSTATE_U64(hdr.vmx.vmxon_pa, struct kvm_nested_state),
1027        VMSTATE_U64(hdr.vmx.vmcs12_pa, struct kvm_nested_state),
1028        VMSTATE_U16(hdr.vmx.smm.flags, struct kvm_nested_state),
1029        VMSTATE_END_OF_LIST()
1030    },
1031    .subsections = (const VMStateDescription*[]) {
1032        &vmstate_vmx_vmcs12,
1033        &vmstate_vmx_shadow_vmcs12,
1034        NULL,
1035    }
1036};
1037
1038static bool nested_state_needed(void *opaque)
1039{
1040    X86CPU *cpu = opaque;
1041    CPUX86State *env = &cpu->env;
1042
1043    return (env->nested_state &&
1044            vmx_nested_state_needed(env->nested_state));
1045}
1046
1047static int nested_state_post_load(void *opaque, int version_id)
1048{
1049    X86CPU *cpu = opaque;
1050    CPUX86State *env = &cpu->env;
1051    struct kvm_nested_state *nested_state = env->nested_state;
1052    int min_nested_state_len = offsetof(struct kvm_nested_state, data);
1053    int max_nested_state_len = kvm_max_nested_state_length();
1054
1055    /*
1056     * If our kernel don't support setting nested state
1057     * and we have received nested state from migration stream,
1058     * we need to fail migration
1059     */
1060    if (max_nested_state_len <= 0) {
1061        error_report("Received nested state when kernel cannot restore it");
1062        return -EINVAL;
1063    }
1064
1065    /*
1066     * Verify that the size of received nested_state struct
1067     * at least cover required header and is not larger
1068     * than the max size that our kernel support
1069     */
1070    if (nested_state->size < min_nested_state_len) {
1071        error_report("Received nested state size less than min: "
1072                     "len=%d, min=%d",
1073                     nested_state->size, min_nested_state_len);
1074        return -EINVAL;
1075    }
1076    if (nested_state->size > max_nested_state_len) {
1077        error_report("Recieved unsupported nested state size: "
1078                     "nested_state->size=%d, max=%d",
1079                     nested_state->size, max_nested_state_len);
1080        return -EINVAL;
1081    }
1082
1083    /* Verify format is valid */
1084    if ((nested_state->format != KVM_STATE_NESTED_FORMAT_VMX) &&
1085        (nested_state->format != KVM_STATE_NESTED_FORMAT_SVM)) {
1086        error_report("Received invalid nested state format: %d",
1087                     nested_state->format);
1088        return -EINVAL;
1089    }
1090
1091    return 0;
1092}
1093
1094static const VMStateDescription vmstate_kvm_nested_state = {
1095    .name = "cpu/kvm_nested_state",
1096    .version_id = 1,
1097    .minimum_version_id = 1,
1098    .fields = (VMStateField[]) {
1099        VMSTATE_U16(flags, struct kvm_nested_state),
1100        VMSTATE_U16(format, struct kvm_nested_state),
1101        VMSTATE_U32(size, struct kvm_nested_state),
1102        VMSTATE_END_OF_LIST()
1103    },
1104    .subsections = (const VMStateDescription*[]) {
1105        &vmstate_vmx_nested_state,
1106        NULL
1107    }
1108};
1109
1110static const VMStateDescription vmstate_nested_state = {
1111    .name = "cpu/nested_state",
1112    .version_id = 1,
1113    .minimum_version_id = 1,
1114    .needed = nested_state_needed,
1115    .post_load = nested_state_post_load,
1116    .fields = (VMStateField[]) {
1117        VMSTATE_STRUCT_POINTER(env.nested_state, X86CPU,
1118                vmstate_kvm_nested_state,
1119                struct kvm_nested_state),
1120        VMSTATE_END_OF_LIST()
1121    }
1122};
1123
1124#endif
1125
1126static bool mcg_ext_ctl_needed(void *opaque)
1127{
1128    X86CPU *cpu = opaque;
1129    CPUX86State *env = &cpu->env;
1130    return cpu->enable_lmce && env->mcg_ext_ctl;
1131}
1132
1133static const VMStateDescription vmstate_mcg_ext_ctl = {
1134    .name = "cpu/mcg_ext_ctl",
1135    .version_id = 1,
1136    .minimum_version_id = 1,
1137    .needed = mcg_ext_ctl_needed,
1138    .fields = (VMStateField[]) {
1139        VMSTATE_UINT64(env.mcg_ext_ctl, X86CPU),
1140        VMSTATE_END_OF_LIST()
1141    }
1142};
1143
1144static bool spec_ctrl_needed(void *opaque)
1145{
1146    X86CPU *cpu = opaque;
1147    CPUX86State *env = &cpu->env;
1148
1149    return env->spec_ctrl != 0;
1150}
1151
1152static const VMStateDescription vmstate_spec_ctrl = {
1153    .name = "cpu/spec_ctrl",
1154    .version_id = 1,
1155    .minimum_version_id = 1,
1156    .needed = spec_ctrl_needed,
1157    .fields = (VMStateField[]){
1158        VMSTATE_UINT64(env.spec_ctrl, X86CPU),
1159        VMSTATE_END_OF_LIST()
1160    }
1161};
1162
1163static bool intel_pt_enable_needed(void *opaque)
1164{
1165    X86CPU *cpu = opaque;
1166    CPUX86State *env = &cpu->env;
1167    int i;
1168
1169    if (env->msr_rtit_ctrl || env->msr_rtit_status ||
1170        env->msr_rtit_output_base || env->msr_rtit_output_mask ||
1171        env->msr_rtit_cr3_match) {
1172        return true;
1173    }
1174
1175    for (i = 0; i < MAX_RTIT_ADDRS; i++) {
1176        if (env->msr_rtit_addrs[i]) {
1177            return true;
1178        }
1179    }
1180
1181    return false;
1182}
1183
1184static const VMStateDescription vmstate_msr_intel_pt = {
1185    .name = "cpu/intel_pt",
1186    .version_id = 1,
1187    .minimum_version_id = 1,
1188    .needed = intel_pt_enable_needed,
1189    .fields = (VMStateField[]) {
1190        VMSTATE_UINT64(env.msr_rtit_ctrl, X86CPU),
1191        VMSTATE_UINT64(env.msr_rtit_status, X86CPU),
1192        VMSTATE_UINT64(env.msr_rtit_output_base, X86CPU),
1193        VMSTATE_UINT64(env.msr_rtit_output_mask, X86CPU),
1194        VMSTATE_UINT64(env.msr_rtit_cr3_match, X86CPU),
1195        VMSTATE_UINT64_ARRAY(env.msr_rtit_addrs, X86CPU, MAX_RTIT_ADDRS),
1196        VMSTATE_END_OF_LIST()
1197    }
1198};
1199
1200static bool virt_ssbd_needed(void *opaque)
1201{
1202    X86CPU *cpu = opaque;
1203    CPUX86State *env = &cpu->env;
1204
1205    return env->virt_ssbd != 0;
1206}
1207
1208static const VMStateDescription vmstate_msr_virt_ssbd = {
1209    .name = "cpu/virt_ssbd",
1210    .version_id = 1,
1211    .minimum_version_id = 1,
1212    .needed = virt_ssbd_needed,
1213    .fields = (VMStateField[]){
1214        VMSTATE_UINT64(env.virt_ssbd, X86CPU),
1215        VMSTATE_END_OF_LIST()
1216    }
1217};
1218
1219static bool svm_npt_needed(void *opaque)
1220{
1221    X86CPU *cpu = opaque;
1222    CPUX86State *env = &cpu->env;
1223
1224    return !!(env->hflags2 & HF2_NPT_MASK);
1225}
1226
1227static const VMStateDescription vmstate_svm_npt = {
1228    .name = "cpu/svn_npt",
1229    .version_id = 1,
1230    .minimum_version_id = 1,
1231    .needed = svm_npt_needed,
1232    .fields = (VMStateField[]){
1233        VMSTATE_UINT64(env.nested_cr3, X86CPU),
1234        VMSTATE_UINT32(env.nested_pg_mode, X86CPU),
1235        VMSTATE_END_OF_LIST()
1236    }
1237};
1238
1239#ifndef TARGET_X86_64
1240static bool intel_efer32_needed(void *opaque)
1241{
1242    X86CPU *cpu = opaque;
1243    CPUX86State *env = &cpu->env;
1244
1245    return env->efer != 0;
1246}
1247
1248static const VMStateDescription vmstate_efer32 = {
1249    .name = "cpu/efer32",
1250    .version_id = 1,
1251    .minimum_version_id = 1,
1252    .needed = intel_efer32_needed,
1253    .fields = (VMStateField[]) {
1254        VMSTATE_UINT64(env.efer, X86CPU),
1255        VMSTATE_END_OF_LIST()
1256    }
1257};
1258#endif
1259
1260VMStateDescription vmstate_x86_cpu = {
1261    .name = "cpu",
1262    .version_id = 12,
1263    .minimum_version_id = 11,
1264    .pre_save = cpu_pre_save,
1265    .post_load = cpu_post_load,
1266    .fields = (VMStateField[]) {
1267        VMSTATE_UINTTL_ARRAY(env.regs, X86CPU, CPU_NB_REGS),
1268        VMSTATE_UINTTL(env.eip, X86CPU),
1269        VMSTATE_UINTTL(env.eflags, X86CPU),
1270        VMSTATE_UINT32(env.hflags, X86CPU),
1271        /* FPU */
1272        VMSTATE_UINT16(env.fpuc, X86CPU),
1273        VMSTATE_UINT16(env.fpus_vmstate, X86CPU),
1274        VMSTATE_UINT16(env.fptag_vmstate, X86CPU),
1275        VMSTATE_UINT16(env.fpregs_format_vmstate, X86CPU),
1276
1277        VMSTATE_STRUCT_ARRAY(env.fpregs, X86CPU, 8, 0, vmstate_fpreg, FPReg),
1278
1279        VMSTATE_SEGMENT_ARRAY(env.segs, X86CPU, 6),
1280        VMSTATE_SEGMENT(env.ldt, X86CPU),
1281        VMSTATE_SEGMENT(env.tr, X86CPU),
1282        VMSTATE_SEGMENT(env.gdt, X86CPU),
1283        VMSTATE_SEGMENT(env.idt, X86CPU),
1284
1285        VMSTATE_UINT32(env.sysenter_cs, X86CPU),
1286        VMSTATE_UINTTL(env.sysenter_esp, X86CPU),
1287        VMSTATE_UINTTL(env.sysenter_eip, X86CPU),
1288
1289        VMSTATE_UINTTL(env.cr[0], X86CPU),
1290        VMSTATE_UINTTL(env.cr[2], X86CPU),
1291        VMSTATE_UINTTL(env.cr[3], X86CPU),
1292        VMSTATE_UINTTL(env.cr[4], X86CPU),
1293        VMSTATE_UINTTL_ARRAY(env.dr, X86CPU, 8),
1294        /* MMU */
1295        VMSTATE_INT32(env.a20_mask, X86CPU),
1296        /* XMM */
1297        VMSTATE_UINT32(env.mxcsr, X86CPU),
1298        VMSTATE_XMM_REGS(env.xmm_regs, X86CPU, 0),
1299
1300#ifdef TARGET_X86_64
1301        VMSTATE_UINT64(env.efer, X86CPU),
1302        VMSTATE_UINT64(env.star, X86CPU),
1303        VMSTATE_UINT64(env.lstar, X86CPU),
1304        VMSTATE_UINT64(env.cstar, X86CPU),
1305        VMSTATE_UINT64(env.fmask, X86CPU),
1306        VMSTATE_UINT64(env.kernelgsbase, X86CPU),
1307#endif
1308        VMSTATE_UINT32(env.smbase, X86CPU),
1309
1310        VMSTATE_UINT64(env.pat, X86CPU),
1311        VMSTATE_UINT32(env.hflags2, X86CPU),
1312
1313        VMSTATE_UINT64(env.vm_hsave, X86CPU),
1314        VMSTATE_UINT64(env.vm_vmcb, X86CPU),
1315        VMSTATE_UINT64(env.tsc_offset, X86CPU),
1316        VMSTATE_UINT64(env.intercept, X86CPU),
1317        VMSTATE_UINT16(env.intercept_cr_read, X86CPU),
1318        VMSTATE_UINT16(env.intercept_cr_write, X86CPU),
1319        VMSTATE_UINT16(env.intercept_dr_read, X86CPU),
1320        VMSTATE_UINT16(env.intercept_dr_write, X86CPU),
1321        VMSTATE_UINT32(env.intercept_exceptions, X86CPU),
1322        VMSTATE_UINT8(env.v_tpr, X86CPU),
1323        /* MTRRs */
1324        VMSTATE_UINT64_ARRAY(env.mtrr_fixed, X86CPU, 11),
1325        VMSTATE_UINT64(env.mtrr_deftype, X86CPU),
1326        VMSTATE_MTRR_VARS(env.mtrr_var, X86CPU, MSR_MTRRcap_VCNT, 8),
1327        /* KVM-related states */
1328        VMSTATE_INT32(env.interrupt_injected, X86CPU),
1329        VMSTATE_UINT32(env.mp_state, X86CPU),
1330        VMSTATE_UINT64(env.tsc, X86CPU),
1331        VMSTATE_INT32(env.exception_nr, X86CPU),
1332        VMSTATE_UINT8(env.soft_interrupt, X86CPU),
1333        VMSTATE_UINT8(env.nmi_injected, X86CPU),
1334        VMSTATE_UINT8(env.nmi_pending, X86CPU),
1335        VMSTATE_UINT8(env.has_error_code, X86CPU),
1336        VMSTATE_UINT32(env.sipi_vector, X86CPU),
1337        /* MCE */
1338        VMSTATE_UINT64(env.mcg_cap, X86CPU),
1339        VMSTATE_UINT64(env.mcg_status, X86CPU),
1340        VMSTATE_UINT64(env.mcg_ctl, X86CPU),
1341        VMSTATE_UINT64_ARRAY(env.mce_banks, X86CPU, MCE_BANKS_DEF * 4),
1342        /* rdtscp */
1343        VMSTATE_UINT64(env.tsc_aux, X86CPU),
1344        /* KVM pvclock msr */
1345        VMSTATE_UINT64(env.system_time_msr, X86CPU),
1346        VMSTATE_UINT64(env.wall_clock_msr, X86CPU),
1347        /* XSAVE related fields */
1348        VMSTATE_UINT64_V(env.xcr0, X86CPU, 12),
1349        VMSTATE_UINT64_V(env.xstate_bv, X86CPU, 12),
1350        VMSTATE_YMMH_REGS_VARS(env.xmm_regs, X86CPU, 0, 12),
1351        VMSTATE_END_OF_LIST()
1352        /* The above list is not sorted /wrt version numbers, watch out! */
1353    },
1354    .subsections = (const VMStateDescription*[]) {
1355        &vmstate_exception_info,
1356        &vmstate_async_pf_msr,
1357        &vmstate_pv_eoi_msr,
1358        &vmstate_steal_time_msr,
1359        &vmstate_fpop_ip_dp,
1360        &vmstate_msr_tsc_adjust,
1361        &vmstate_msr_tscdeadline,
1362        &vmstate_msr_ia32_misc_enable,
1363        &vmstate_msr_ia32_feature_control,
1364        &vmstate_msr_architectural_pmu,
1365        &vmstate_mpx,
1366        &vmstate_msr_hypercall_hypercall,
1367        &vmstate_msr_hyperv_vapic,
1368        &vmstate_msr_hyperv_time,
1369        &vmstate_msr_hyperv_crash,
1370        &vmstate_msr_hyperv_runtime,
1371        &vmstate_msr_hyperv_synic,
1372        &vmstate_msr_hyperv_stimer,
1373        &vmstate_msr_hyperv_reenlightenment,
1374        &vmstate_avx512,
1375        &vmstate_xss,
1376        &vmstate_tsc_khz,
1377        &vmstate_msr_smi_count,
1378#ifdef TARGET_X86_64
1379        &vmstate_pkru,
1380#endif
1381        &vmstate_spec_ctrl,
1382        &vmstate_mcg_ext_ctl,
1383        &vmstate_msr_intel_pt,
1384        &vmstate_msr_virt_ssbd,
1385        &vmstate_svm_npt,
1386#ifndef TARGET_X86_64
1387        &vmstate_efer32,
1388#endif
1389#ifdef CONFIG_KVM
1390        &vmstate_nested_state,
1391#endif
1392        NULL
1393    }
1394};
1395