qemu/target/i386/machine.c
<<
>>
Prefs
   1#include "qemu/osdep.h"
   2#include "qemu-common.h"
   3#include "cpu.h"
   4#include "exec/exec-all.h"
   5#include "hw/hw.h"
   6#include "hw/boards.h"
   7#include "hw/i386/pc.h"
   8#include "hw/isa/isa.h"
   9#include "migration/cpu.h"
  10
  11#include "sysemu/kvm.h"
  12
  13#include "qemu/error-report.h"
  14
  15static const VMStateDescription vmstate_segment = {
  16    .name = "segment",
  17    .version_id = 1,
  18    .minimum_version_id = 1,
  19    .fields = (VMStateField[]) {
  20        VMSTATE_UINT32(selector, SegmentCache),
  21        VMSTATE_UINTTL(base, SegmentCache),
  22        VMSTATE_UINT32(limit, SegmentCache),
  23        VMSTATE_UINT32(flags, SegmentCache),
  24        VMSTATE_END_OF_LIST()
  25    }
  26};
  27
  28#define VMSTATE_SEGMENT(_field, _state) {                            \
  29    .name       = (stringify(_field)),                               \
  30    .size       = sizeof(SegmentCache),                              \
  31    .vmsd       = &vmstate_segment,                                  \
  32    .flags      = VMS_STRUCT,                                        \
  33    .offset     = offsetof(_state, _field)                           \
  34            + type_check(SegmentCache,typeof_field(_state, _field))  \
  35}
  36
  37#define VMSTATE_SEGMENT_ARRAY(_field, _state, _n)                    \
  38    VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_segment, SegmentCache)
  39
  40static const VMStateDescription vmstate_xmm_reg = {
  41    .name = "xmm_reg",
  42    .version_id = 1,
  43    .minimum_version_id = 1,
  44    .fields = (VMStateField[]) {
  45        VMSTATE_UINT64(ZMM_Q(0), ZMMReg),
  46        VMSTATE_UINT64(ZMM_Q(1), ZMMReg),
  47        VMSTATE_END_OF_LIST()
  48    }
  49};
  50
  51#define VMSTATE_XMM_REGS(_field, _state, _start)                         \
  52    VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0,     \
  53                             vmstate_xmm_reg, ZMMReg)
  54
  55/* YMMH format is the same as XMM, but for bits 128-255 */
  56static const VMStateDescription vmstate_ymmh_reg = {
  57    .name = "ymmh_reg",
  58    .version_id = 1,
  59    .minimum_version_id = 1,
  60    .fields = (VMStateField[]) {
  61        VMSTATE_UINT64(ZMM_Q(2), ZMMReg),
  62        VMSTATE_UINT64(ZMM_Q(3), ZMMReg),
  63        VMSTATE_END_OF_LIST()
  64    }
  65};
  66
  67#define VMSTATE_YMMH_REGS_VARS(_field, _state, _start, _v)               \
  68    VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, _v,    \
  69                             vmstate_ymmh_reg, ZMMReg)
  70
  71static const VMStateDescription vmstate_zmmh_reg = {
  72    .name = "zmmh_reg",
  73    .version_id = 1,
  74    .minimum_version_id = 1,
  75    .fields = (VMStateField[]) {
  76        VMSTATE_UINT64(ZMM_Q(4), ZMMReg),
  77        VMSTATE_UINT64(ZMM_Q(5), ZMMReg),
  78        VMSTATE_UINT64(ZMM_Q(6), ZMMReg),
  79        VMSTATE_UINT64(ZMM_Q(7), ZMMReg),
  80        VMSTATE_END_OF_LIST()
  81    }
  82};
  83
  84#define VMSTATE_ZMMH_REGS_VARS(_field, _state, _start)                   \
  85    VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0,     \
  86                             vmstate_zmmh_reg, ZMMReg)
  87
  88#ifdef TARGET_X86_64
  89static const VMStateDescription vmstate_hi16_zmm_reg = {
  90    .name = "hi16_zmm_reg",
  91    .version_id = 1,
  92    .minimum_version_id = 1,
  93    .fields = (VMStateField[]) {
  94        VMSTATE_UINT64(ZMM_Q(0), ZMMReg),
  95        VMSTATE_UINT64(ZMM_Q(1), ZMMReg),
  96        VMSTATE_UINT64(ZMM_Q(2), ZMMReg),
  97        VMSTATE_UINT64(ZMM_Q(3), ZMMReg),
  98        VMSTATE_UINT64(ZMM_Q(4), ZMMReg),
  99        VMSTATE_UINT64(ZMM_Q(5), ZMMReg),
 100        VMSTATE_UINT64(ZMM_Q(6), ZMMReg),
 101        VMSTATE_UINT64(ZMM_Q(7), ZMMReg),
 102        VMSTATE_END_OF_LIST()
 103    }
 104};
 105
 106#define VMSTATE_Hi16_ZMM_REGS_VARS(_field, _state, _start)               \
 107    VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0,     \
 108                             vmstate_hi16_zmm_reg, ZMMReg)
 109#endif
 110
 111static const VMStateDescription vmstate_bnd_regs = {
 112    .name = "bnd_regs",
 113    .version_id = 1,
 114    .minimum_version_id = 1,
 115    .fields = (VMStateField[]) {
 116        VMSTATE_UINT64(lb, BNDReg),
 117        VMSTATE_UINT64(ub, BNDReg),
 118        VMSTATE_END_OF_LIST()
 119    }
 120};
 121
 122#define VMSTATE_BND_REGS(_field, _state, _n)          \
 123    VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_bnd_regs, BNDReg)
 124
 125static const VMStateDescription vmstate_mtrr_var = {
 126    .name = "mtrr_var",
 127    .version_id = 1,
 128    .minimum_version_id = 1,
 129    .fields = (VMStateField[]) {
 130        VMSTATE_UINT64(base, MTRRVar),
 131        VMSTATE_UINT64(mask, MTRRVar),
 132        VMSTATE_END_OF_LIST()
 133    }
 134};
 135
 136#define VMSTATE_MTRR_VARS(_field, _state, _n, _v)                    \
 137    VMSTATE_STRUCT_ARRAY(_field, _state, _n, _v, vmstate_mtrr_var, MTRRVar)
 138
 139typedef struct x86_FPReg_tmp {
 140    FPReg *parent;
 141    uint64_t tmp_mant;
 142    uint16_t tmp_exp;
 143} x86_FPReg_tmp;
 144
 145static void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f)
 146{
 147    CPU_LDoubleU temp;
 148
 149    temp.d = f;
 150    *pmant = temp.l.lower;
 151    *pexp = temp.l.upper;
 152}
 153
 154static floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper)
 155{
 156    CPU_LDoubleU temp;
 157
 158    temp.l.upper = upper;
 159    temp.l.lower = mant;
 160    return temp.d;
 161}
 162
 163static int fpreg_pre_save(void *opaque)
 164{
 165    x86_FPReg_tmp *tmp = opaque;
 166
 167    /* we save the real CPU data (in case of MMX usage only 'mant'
 168       contains the MMX register */
 169    cpu_get_fp80(&tmp->tmp_mant, &tmp->tmp_exp, tmp->parent->d);
 170
 171    return 0;
 172}
 173
 174static int fpreg_post_load(void *opaque, int version)
 175{
 176    x86_FPReg_tmp *tmp = opaque;
 177
 178    tmp->parent->d = cpu_set_fp80(tmp->tmp_mant, tmp->tmp_exp);
 179    return 0;
 180}
 181
 182static const VMStateDescription vmstate_fpreg_tmp = {
 183    .name = "fpreg_tmp",
 184    .post_load = fpreg_post_load,
 185    .pre_save  = fpreg_pre_save,
 186    .fields = (VMStateField[]) {
 187        VMSTATE_UINT64(tmp_mant, x86_FPReg_tmp),
 188        VMSTATE_UINT16(tmp_exp, x86_FPReg_tmp),
 189        VMSTATE_END_OF_LIST()
 190    }
 191};
 192
 193static const VMStateDescription vmstate_fpreg = {
 194    .name = "fpreg",
 195    .fields = (VMStateField[]) {
 196        VMSTATE_WITH_TMP(FPReg, x86_FPReg_tmp, vmstate_fpreg_tmp),
 197        VMSTATE_END_OF_LIST()
 198    }
 199};
 200
 201static int cpu_pre_save(void *opaque)
 202{
 203    X86CPU *cpu = opaque;
 204    CPUX86State *env = &cpu->env;
 205    int i;
 206
 207    /* FPU */
 208    env->fpus_vmstate = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
 209    env->fptag_vmstate = 0;
 210    for(i = 0; i < 8; i++) {
 211        env->fptag_vmstate |= ((!env->fptags[i]) << i);
 212    }
 213
 214    env->fpregs_format_vmstate = 0;
 215
 216    /*
 217     * Real mode guest segments register DPL should be zero.
 218     * Older KVM version were setting it wrongly.
 219     * Fixing it will allow live migration to host with unrestricted guest
 220     * support (otherwise the migration will fail with invalid guest state
 221     * error).
 222     */
 223    if (!(env->cr[0] & CR0_PE_MASK) &&
 224        (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) {
 225        env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK);
 226        env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK);
 227        env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK);
 228        env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK);
 229        env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK);
 230        env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK);
 231    }
 232
 233    return 0;
 234}
 235
 236static int cpu_post_load(void *opaque, int version_id)
 237{
 238    X86CPU *cpu = opaque;
 239    CPUState *cs = CPU(cpu);
 240    CPUX86State *env = &cpu->env;
 241    int i;
 242
 243    if (env->tsc_khz && env->user_tsc_khz &&
 244        env->tsc_khz != env->user_tsc_khz) {
 245        error_report("Mismatch between user-specified TSC frequency and "
 246                     "migrated TSC frequency");
 247        return -EINVAL;
 248    }
 249
 250    if (env->fpregs_format_vmstate) {
 251        error_report("Unsupported old non-softfloat CPU state");
 252        return -EINVAL;
 253    }
 254    /*
 255     * Real mode guest segments register DPL should be zero.
 256     * Older KVM version were setting it wrongly.
 257     * Fixing it will allow live migration from such host that don't have
 258     * restricted guest support to a host with unrestricted guest support
 259     * (otherwise the migration will fail with invalid guest state
 260     * error).
 261     */
 262    if (!(env->cr[0] & CR0_PE_MASK) &&
 263        (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) {
 264        env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK);
 265        env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK);
 266        env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK);
 267        env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK);
 268        env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK);
 269        env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK);
 270    }
 271
 272    /* Older versions of QEMU incorrectly used CS.DPL as the CPL when
 273     * running under KVM.  This is wrong for conforming code segments.
 274     * Luckily, in our implementation the CPL field of hflags is redundant
 275     * and we can get the right value from the SS descriptor privilege level.
 276     */
 277    env->hflags &= ~HF_CPL_MASK;
 278    env->hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
 279
 280    env->fpstt = (env->fpus_vmstate >> 11) & 7;
 281    env->fpus = env->fpus_vmstate & ~0x3800;
 282    env->fptag_vmstate ^= 0xff;
 283    for(i = 0; i < 8; i++) {
 284        env->fptags[i] = (env->fptag_vmstate >> i) & 1;
 285    }
 286    if (tcg_enabled()) {
 287        target_ulong dr7;
 288        update_fp_status(env);
 289        update_mxcsr_status(env);
 290
 291        cpu_breakpoint_remove_all(cs, BP_CPU);
 292        cpu_watchpoint_remove_all(cs, BP_CPU);
 293
 294        /* Indicate all breakpoints disabled, as they are, then
 295           let the helper re-enable them.  */
 296        dr7 = env->dr[7];
 297        env->dr[7] = dr7 & ~(DR7_GLOBAL_BP_MASK | DR7_LOCAL_BP_MASK);
 298        cpu_x86_update_dr7(env, dr7);
 299    }
 300    tlb_flush(cs);
 301    return 0;
 302}
 303
 304static bool async_pf_msr_needed(void *opaque)
 305{
 306    X86CPU *cpu = opaque;
 307
 308    return cpu->env.async_pf_en_msr != 0;
 309}
 310
 311static bool pv_eoi_msr_needed(void *opaque)
 312{
 313    X86CPU *cpu = opaque;
 314
 315    return cpu->env.pv_eoi_en_msr != 0;
 316}
 317
 318static bool steal_time_msr_needed(void *opaque)
 319{
 320    X86CPU *cpu = opaque;
 321
 322    return cpu->env.steal_time_msr != 0;
 323}
 324
 325static const VMStateDescription vmstate_steal_time_msr = {
 326    .name = "cpu/steal_time_msr",
 327    .version_id = 1,
 328    .minimum_version_id = 1,
 329    .needed = steal_time_msr_needed,
 330    .fields = (VMStateField[]) {
 331        VMSTATE_UINT64(env.steal_time_msr, X86CPU),
 332        VMSTATE_END_OF_LIST()
 333    }
 334};
 335
 336static const VMStateDescription vmstate_async_pf_msr = {
 337    .name = "cpu/async_pf_msr",
 338    .version_id = 1,
 339    .minimum_version_id = 1,
 340    .needed = async_pf_msr_needed,
 341    .fields = (VMStateField[]) {
 342        VMSTATE_UINT64(env.async_pf_en_msr, X86CPU),
 343        VMSTATE_END_OF_LIST()
 344    }
 345};
 346
 347static const VMStateDescription vmstate_pv_eoi_msr = {
 348    .name = "cpu/async_pv_eoi_msr",
 349    .version_id = 1,
 350    .minimum_version_id = 1,
 351    .needed = pv_eoi_msr_needed,
 352    .fields = (VMStateField[]) {
 353        VMSTATE_UINT64(env.pv_eoi_en_msr, X86CPU),
 354        VMSTATE_END_OF_LIST()
 355    }
 356};
 357
 358static bool fpop_ip_dp_needed(void *opaque)
 359{
 360    X86CPU *cpu = opaque;
 361    CPUX86State *env = &cpu->env;
 362
 363    return env->fpop != 0 || env->fpip != 0 || env->fpdp != 0;
 364}
 365
 366static const VMStateDescription vmstate_fpop_ip_dp = {
 367    .name = "cpu/fpop_ip_dp",
 368    .version_id = 1,
 369    .minimum_version_id = 1,
 370    .needed = fpop_ip_dp_needed,
 371    .fields = (VMStateField[]) {
 372        VMSTATE_UINT16(env.fpop, X86CPU),
 373        VMSTATE_UINT64(env.fpip, X86CPU),
 374        VMSTATE_UINT64(env.fpdp, X86CPU),
 375        VMSTATE_END_OF_LIST()
 376    }
 377};
 378
 379static bool tsc_adjust_needed(void *opaque)
 380{
 381    X86CPU *cpu = opaque;
 382    CPUX86State *env = &cpu->env;
 383
 384    return env->tsc_adjust != 0;
 385}
 386
 387static const VMStateDescription vmstate_msr_tsc_adjust = {
 388    .name = "cpu/msr_tsc_adjust",
 389    .version_id = 1,
 390    .minimum_version_id = 1,
 391    .needed = tsc_adjust_needed,
 392    .fields = (VMStateField[]) {
 393        VMSTATE_UINT64(env.tsc_adjust, X86CPU),
 394        VMSTATE_END_OF_LIST()
 395    }
 396};
 397
 398static bool msr_smi_count_needed(void *opaque)
 399{
 400    X86CPU *cpu = opaque;
 401    CPUX86State *env = &cpu->env;
 402
 403    return env->msr_smi_count != 0;
 404}
 405
 406static const VMStateDescription vmstate_msr_smi_count = {
 407    .name = "cpu/msr_smi_count",
 408    .version_id = 1,
 409    .minimum_version_id = 1,
 410    .needed = msr_smi_count_needed,
 411    .fields = (VMStateField[]) {
 412        VMSTATE_UINT64(env.msr_smi_count, X86CPU),
 413        VMSTATE_END_OF_LIST()
 414    }
 415};
 416
 417static bool tscdeadline_needed(void *opaque)
 418{
 419    X86CPU *cpu = opaque;
 420    CPUX86State *env = &cpu->env;
 421
 422    return env->tsc_deadline != 0;
 423}
 424
 425static const VMStateDescription vmstate_msr_tscdeadline = {
 426    .name = "cpu/msr_tscdeadline",
 427    .version_id = 1,
 428    .minimum_version_id = 1,
 429    .needed = tscdeadline_needed,
 430    .fields = (VMStateField[]) {
 431        VMSTATE_UINT64(env.tsc_deadline, X86CPU),
 432        VMSTATE_END_OF_LIST()
 433    }
 434};
 435
 436static bool misc_enable_needed(void *opaque)
 437{
 438    X86CPU *cpu = opaque;
 439    CPUX86State *env = &cpu->env;
 440
 441    return env->msr_ia32_misc_enable != MSR_IA32_MISC_ENABLE_DEFAULT;
 442}
 443
 444static bool feature_control_needed(void *opaque)
 445{
 446    X86CPU *cpu = opaque;
 447    CPUX86State *env = &cpu->env;
 448
 449    return env->msr_ia32_feature_control != 0;
 450}
 451
 452static const VMStateDescription vmstate_msr_ia32_misc_enable = {
 453    .name = "cpu/msr_ia32_misc_enable",
 454    .version_id = 1,
 455    .minimum_version_id = 1,
 456    .needed = misc_enable_needed,
 457    .fields = (VMStateField[]) {
 458        VMSTATE_UINT64(env.msr_ia32_misc_enable, X86CPU),
 459        VMSTATE_END_OF_LIST()
 460    }
 461};
 462
 463static const VMStateDescription vmstate_msr_ia32_feature_control = {
 464    .name = "cpu/msr_ia32_feature_control",
 465    .version_id = 1,
 466    .minimum_version_id = 1,
 467    .needed = feature_control_needed,
 468    .fields = (VMStateField[]) {
 469        VMSTATE_UINT64(env.msr_ia32_feature_control, X86CPU),
 470        VMSTATE_END_OF_LIST()
 471    }
 472};
 473
 474static bool pmu_enable_needed(void *opaque)
 475{
 476    X86CPU *cpu = opaque;
 477    CPUX86State *env = &cpu->env;
 478    int i;
 479
 480    if (env->msr_fixed_ctr_ctrl || env->msr_global_ctrl ||
 481        env->msr_global_status || env->msr_global_ovf_ctrl) {
 482        return true;
 483    }
 484    for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
 485        if (env->msr_fixed_counters[i]) {
 486            return true;
 487        }
 488    }
 489    for (i = 0; i < MAX_GP_COUNTERS; i++) {
 490        if (env->msr_gp_counters[i] || env->msr_gp_evtsel[i]) {
 491            return true;
 492        }
 493    }
 494
 495    return false;
 496}
 497
 498static const VMStateDescription vmstate_msr_architectural_pmu = {
 499    .name = "cpu/msr_architectural_pmu",
 500    .version_id = 1,
 501    .minimum_version_id = 1,
 502    .needed = pmu_enable_needed,
 503    .fields = (VMStateField[]) {
 504        VMSTATE_UINT64(env.msr_fixed_ctr_ctrl, X86CPU),
 505        VMSTATE_UINT64(env.msr_global_ctrl, X86CPU),
 506        VMSTATE_UINT64(env.msr_global_status, X86CPU),
 507        VMSTATE_UINT64(env.msr_global_ovf_ctrl, X86CPU),
 508        VMSTATE_UINT64_ARRAY(env.msr_fixed_counters, X86CPU, MAX_FIXED_COUNTERS),
 509        VMSTATE_UINT64_ARRAY(env.msr_gp_counters, X86CPU, MAX_GP_COUNTERS),
 510        VMSTATE_UINT64_ARRAY(env.msr_gp_evtsel, X86CPU, MAX_GP_COUNTERS),
 511        VMSTATE_END_OF_LIST()
 512    }
 513};
 514
 515static bool mpx_needed(void *opaque)
 516{
 517    X86CPU *cpu = opaque;
 518    CPUX86State *env = &cpu->env;
 519    unsigned int i;
 520
 521    for (i = 0; i < 4; i++) {
 522        if (env->bnd_regs[i].lb || env->bnd_regs[i].ub) {
 523            return true;
 524        }
 525    }
 526
 527    if (env->bndcs_regs.cfgu || env->bndcs_regs.sts) {
 528        return true;
 529    }
 530
 531    return !!env->msr_bndcfgs;
 532}
 533
 534static const VMStateDescription vmstate_mpx = {
 535    .name = "cpu/mpx",
 536    .version_id = 1,
 537    .minimum_version_id = 1,
 538    .needed = mpx_needed,
 539    .fields = (VMStateField[]) {
 540        VMSTATE_BND_REGS(env.bnd_regs, X86CPU, 4),
 541        VMSTATE_UINT64(env.bndcs_regs.cfgu, X86CPU),
 542        VMSTATE_UINT64(env.bndcs_regs.sts, X86CPU),
 543        VMSTATE_UINT64(env.msr_bndcfgs, X86CPU),
 544        VMSTATE_END_OF_LIST()
 545    }
 546};
 547
 548static bool hyperv_hypercall_enable_needed(void *opaque)
 549{
 550    X86CPU *cpu = opaque;
 551    CPUX86State *env = &cpu->env;
 552
 553    return env->msr_hv_hypercall != 0 || env->msr_hv_guest_os_id != 0;
 554}
 555
 556static const VMStateDescription vmstate_msr_hypercall_hypercall = {
 557    .name = "cpu/msr_hyperv_hypercall",
 558    .version_id = 1,
 559    .minimum_version_id = 1,
 560    .needed = hyperv_hypercall_enable_needed,
 561    .fields = (VMStateField[]) {
 562        VMSTATE_UINT64(env.msr_hv_guest_os_id, X86CPU),
 563        VMSTATE_UINT64(env.msr_hv_hypercall, X86CPU),
 564        VMSTATE_END_OF_LIST()
 565    }
 566};
 567
 568static bool hyperv_vapic_enable_needed(void *opaque)
 569{
 570    X86CPU *cpu = opaque;
 571    CPUX86State *env = &cpu->env;
 572
 573    return env->msr_hv_vapic != 0;
 574}
 575
 576static const VMStateDescription vmstate_msr_hyperv_vapic = {
 577    .name = "cpu/msr_hyperv_vapic",
 578    .version_id = 1,
 579    .minimum_version_id = 1,
 580    .needed = hyperv_vapic_enable_needed,
 581    .fields = (VMStateField[]) {
 582        VMSTATE_UINT64(env.msr_hv_vapic, X86CPU),
 583        VMSTATE_END_OF_LIST()
 584    }
 585};
 586
 587static bool hyperv_time_enable_needed(void *opaque)
 588{
 589    X86CPU *cpu = opaque;
 590    CPUX86State *env = &cpu->env;
 591
 592    return env->msr_hv_tsc != 0;
 593}
 594
 595static const VMStateDescription vmstate_msr_hyperv_time = {
 596    .name = "cpu/msr_hyperv_time",
 597    .version_id = 1,
 598    .minimum_version_id = 1,
 599    .needed = hyperv_time_enable_needed,
 600    .fields = (VMStateField[]) {
 601        VMSTATE_UINT64(env.msr_hv_tsc, X86CPU),
 602        VMSTATE_END_OF_LIST()
 603    }
 604};
 605
 606static bool hyperv_crash_enable_needed(void *opaque)
 607{
 608    X86CPU *cpu = opaque;
 609    CPUX86State *env = &cpu->env;
 610    int i;
 611
 612    for (i = 0; i < HV_CRASH_PARAMS; i++) {
 613        if (env->msr_hv_crash_params[i]) {
 614            return true;
 615        }
 616    }
 617    return false;
 618}
 619
 620static const VMStateDescription vmstate_msr_hyperv_crash = {
 621    .name = "cpu/msr_hyperv_crash",
 622    .version_id = 1,
 623    .minimum_version_id = 1,
 624    .needed = hyperv_crash_enable_needed,
 625    .fields = (VMStateField[]) {
 626        VMSTATE_UINT64_ARRAY(env.msr_hv_crash_params, X86CPU, HV_CRASH_PARAMS),
 627        VMSTATE_END_OF_LIST()
 628    }
 629};
 630
 631static bool hyperv_runtime_enable_needed(void *opaque)
 632{
 633    X86CPU *cpu = opaque;
 634    CPUX86State *env = &cpu->env;
 635
 636    if (!cpu->hyperv_runtime) {
 637        return false;
 638    }
 639
 640    return env->msr_hv_runtime != 0;
 641}
 642
 643static const VMStateDescription vmstate_msr_hyperv_runtime = {
 644    .name = "cpu/msr_hyperv_runtime",
 645    .version_id = 1,
 646    .minimum_version_id = 1,
 647    .needed = hyperv_runtime_enable_needed,
 648    .fields = (VMStateField[]) {
 649        VMSTATE_UINT64(env.msr_hv_runtime, X86CPU),
 650        VMSTATE_END_OF_LIST()
 651    }
 652};
 653
 654static bool hyperv_synic_enable_needed(void *opaque)
 655{
 656    X86CPU *cpu = opaque;
 657    CPUX86State *env = &cpu->env;
 658    int i;
 659
 660    if (env->msr_hv_synic_control != 0 ||
 661        env->msr_hv_synic_evt_page != 0 ||
 662        env->msr_hv_synic_msg_page != 0) {
 663        return true;
 664    }
 665
 666    for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) {
 667        if (env->msr_hv_synic_sint[i] != 0) {
 668            return true;
 669        }
 670    }
 671
 672    return false;
 673}
 674
 675static const VMStateDescription vmstate_msr_hyperv_synic = {
 676    .name = "cpu/msr_hyperv_synic",
 677    .version_id = 1,
 678    .minimum_version_id = 1,
 679    .needed = hyperv_synic_enable_needed,
 680    .fields = (VMStateField[]) {
 681        VMSTATE_UINT64(env.msr_hv_synic_control, X86CPU),
 682        VMSTATE_UINT64(env.msr_hv_synic_evt_page, X86CPU),
 683        VMSTATE_UINT64(env.msr_hv_synic_msg_page, X86CPU),
 684        VMSTATE_UINT64_ARRAY(env.msr_hv_synic_sint, X86CPU, HV_SINT_COUNT),
 685        VMSTATE_END_OF_LIST()
 686    }
 687};
 688
 689static bool hyperv_stimer_enable_needed(void *opaque)
 690{
 691    X86CPU *cpu = opaque;
 692    CPUX86State *env = &cpu->env;
 693    int i;
 694
 695    for (i = 0; i < ARRAY_SIZE(env->msr_hv_stimer_config); i++) {
 696        if (env->msr_hv_stimer_config[i] || env->msr_hv_stimer_count[i]) {
 697            return true;
 698        }
 699    }
 700    return false;
 701}
 702
 703static const VMStateDescription vmstate_msr_hyperv_stimer = {
 704    .name = "cpu/msr_hyperv_stimer",
 705    .version_id = 1,
 706    .minimum_version_id = 1,
 707    .needed = hyperv_stimer_enable_needed,
 708    .fields = (VMStateField[]) {
 709        VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_config, X86CPU,
 710                             HV_STIMER_COUNT),
 711        VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_count, X86CPU, HV_STIMER_COUNT),
 712        VMSTATE_END_OF_LIST()
 713    }
 714};
 715
 716static bool avx512_needed(void *opaque)
 717{
 718    X86CPU *cpu = opaque;
 719    CPUX86State *env = &cpu->env;
 720    unsigned int i;
 721
 722    for (i = 0; i < NB_OPMASK_REGS; i++) {
 723        if (env->opmask_regs[i]) {
 724            return true;
 725        }
 726    }
 727
 728    for (i = 0; i < CPU_NB_REGS; i++) {
 729#define ENV_XMM(reg, field) (env->xmm_regs[reg].ZMM_Q(field))
 730        if (ENV_XMM(i, 4) || ENV_XMM(i, 6) ||
 731            ENV_XMM(i, 5) || ENV_XMM(i, 7)) {
 732            return true;
 733        }
 734#ifdef TARGET_X86_64
 735        if (ENV_XMM(i+16, 0) || ENV_XMM(i+16, 1) ||
 736            ENV_XMM(i+16, 2) || ENV_XMM(i+16, 3) ||
 737            ENV_XMM(i+16, 4) || ENV_XMM(i+16, 5) ||
 738            ENV_XMM(i+16, 6) || ENV_XMM(i+16, 7)) {
 739            return true;
 740        }
 741#endif
 742    }
 743
 744    return false;
 745}
 746
 747static const VMStateDescription vmstate_avx512 = {
 748    .name = "cpu/avx512",
 749    .version_id = 1,
 750    .minimum_version_id = 1,
 751    .needed = avx512_needed,
 752    .fields = (VMStateField[]) {
 753        VMSTATE_UINT64_ARRAY(env.opmask_regs, X86CPU, NB_OPMASK_REGS),
 754        VMSTATE_ZMMH_REGS_VARS(env.xmm_regs, X86CPU, 0),
 755#ifdef TARGET_X86_64
 756        VMSTATE_Hi16_ZMM_REGS_VARS(env.xmm_regs, X86CPU, 16),
 757#endif
 758        VMSTATE_END_OF_LIST()
 759    }
 760};
 761
 762static bool xss_needed(void *opaque)
 763{
 764    X86CPU *cpu = opaque;
 765    CPUX86State *env = &cpu->env;
 766
 767    return env->xss != 0;
 768}
 769
 770static const VMStateDescription vmstate_xss = {
 771    .name = "cpu/xss",
 772    .version_id = 1,
 773    .minimum_version_id = 1,
 774    .needed = xss_needed,
 775    .fields = (VMStateField[]) {
 776        VMSTATE_UINT64(env.xss, X86CPU),
 777        VMSTATE_END_OF_LIST()
 778    }
 779};
 780
 781#ifdef TARGET_X86_64
 782static bool pkru_needed(void *opaque)
 783{
 784    X86CPU *cpu = opaque;
 785    CPUX86State *env = &cpu->env;
 786
 787    return env->pkru != 0;
 788}
 789
 790static const VMStateDescription vmstate_pkru = {
 791    .name = "cpu/pkru",
 792    .version_id = 1,
 793    .minimum_version_id = 1,
 794    .needed = pkru_needed,
 795    .fields = (VMStateField[]){
 796        VMSTATE_UINT32(env.pkru, X86CPU),
 797        VMSTATE_END_OF_LIST()
 798    }
 799};
 800#endif
 801
 802static bool tsc_khz_needed(void *opaque)
 803{
 804    X86CPU *cpu = opaque;
 805    CPUX86State *env = &cpu->env;
 806    MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
 807    PCMachineClass *pcmc = PC_MACHINE_CLASS(mc);
 808    return env->tsc_khz && pcmc->save_tsc_khz;
 809}
 810
 811static const VMStateDescription vmstate_tsc_khz = {
 812    .name = "cpu/tsc_khz",
 813    .version_id = 1,
 814    .minimum_version_id = 1,
 815    .needed = tsc_khz_needed,
 816    .fields = (VMStateField[]) {
 817        VMSTATE_INT64(env.tsc_khz, X86CPU),
 818        VMSTATE_END_OF_LIST()
 819    }
 820};
 821
 822static bool mcg_ext_ctl_needed(void *opaque)
 823{
 824    X86CPU *cpu = opaque;
 825    CPUX86State *env = &cpu->env;
 826    return cpu->enable_lmce && env->mcg_ext_ctl;
 827}
 828
 829static const VMStateDescription vmstate_mcg_ext_ctl = {
 830    .name = "cpu/mcg_ext_ctl",
 831    .version_id = 1,
 832    .minimum_version_id = 1,
 833    .needed = mcg_ext_ctl_needed,
 834    .fields = (VMStateField[]) {
 835        VMSTATE_UINT64(env.mcg_ext_ctl, X86CPU),
 836        VMSTATE_END_OF_LIST()
 837    }
 838};
 839
 840static bool spec_ctrl_needed(void *opaque)
 841{
 842    X86CPU *cpu = opaque;
 843    CPUX86State *env = &cpu->env;
 844
 845    return env->spec_ctrl != 0;
 846}
 847
 848static const VMStateDescription vmstate_spec_ctrl = {
 849    .name = "cpu/spec_ctrl",
 850    .version_id = 1,
 851    .minimum_version_id = 1,
 852    .needed = spec_ctrl_needed,
 853    .fields = (VMStateField[]){
 854        VMSTATE_UINT64(env.spec_ctrl, X86CPU),
 855        VMSTATE_END_OF_LIST()
 856    }
 857};
 858
 859static bool intel_pt_enable_needed(void *opaque)
 860{
 861    X86CPU *cpu = opaque;
 862    CPUX86State *env = &cpu->env;
 863    int i;
 864
 865    if (env->msr_rtit_ctrl || env->msr_rtit_status ||
 866        env->msr_rtit_output_base || env->msr_rtit_output_mask ||
 867        env->msr_rtit_cr3_match) {
 868        return true;
 869    }
 870
 871    for (i = 0; i < MAX_RTIT_ADDRS; i++) {
 872        if (env->msr_rtit_addrs[i]) {
 873            return true;
 874        }
 875    }
 876
 877    return false;
 878}
 879
 880static const VMStateDescription vmstate_msr_intel_pt = {
 881    .name = "cpu/intel_pt",
 882    .version_id = 1,
 883    .minimum_version_id = 1,
 884    .needed = intel_pt_enable_needed,
 885    .fields = (VMStateField[]) {
 886        VMSTATE_UINT64(env.msr_rtit_ctrl, X86CPU),
 887        VMSTATE_UINT64(env.msr_rtit_status, X86CPU),
 888        VMSTATE_UINT64(env.msr_rtit_output_base, X86CPU),
 889        VMSTATE_UINT64(env.msr_rtit_output_mask, X86CPU),
 890        VMSTATE_UINT64(env.msr_rtit_cr3_match, X86CPU),
 891        VMSTATE_UINT64_ARRAY(env.msr_rtit_addrs, X86CPU, MAX_RTIT_ADDRS),
 892        VMSTATE_END_OF_LIST()
 893    }
 894};
 895
 896VMStateDescription vmstate_x86_cpu = {
 897    .name = "cpu",
 898    .version_id = 12,
 899    .minimum_version_id = 11,
 900    .pre_save = cpu_pre_save,
 901    .post_load = cpu_post_load,
 902    .fields = (VMStateField[]) {
 903        VMSTATE_UINTTL_ARRAY(env.regs, X86CPU, CPU_NB_REGS),
 904        VMSTATE_UINTTL(env.eip, X86CPU),
 905        VMSTATE_UINTTL(env.eflags, X86CPU),
 906        VMSTATE_UINT32(env.hflags, X86CPU),
 907        /* FPU */
 908        VMSTATE_UINT16(env.fpuc, X86CPU),
 909        VMSTATE_UINT16(env.fpus_vmstate, X86CPU),
 910        VMSTATE_UINT16(env.fptag_vmstate, X86CPU),
 911        VMSTATE_UINT16(env.fpregs_format_vmstate, X86CPU),
 912
 913        VMSTATE_STRUCT_ARRAY(env.fpregs, X86CPU, 8, 0, vmstate_fpreg, FPReg),
 914
 915        VMSTATE_SEGMENT_ARRAY(env.segs, X86CPU, 6),
 916        VMSTATE_SEGMENT(env.ldt, X86CPU),
 917        VMSTATE_SEGMENT(env.tr, X86CPU),
 918        VMSTATE_SEGMENT(env.gdt, X86CPU),
 919        VMSTATE_SEGMENT(env.idt, X86CPU),
 920
 921        VMSTATE_UINT32(env.sysenter_cs, X86CPU),
 922        VMSTATE_UINTTL(env.sysenter_esp, X86CPU),
 923        VMSTATE_UINTTL(env.sysenter_eip, X86CPU),
 924
 925        VMSTATE_UINTTL(env.cr[0], X86CPU),
 926        VMSTATE_UINTTL(env.cr[2], X86CPU),
 927        VMSTATE_UINTTL(env.cr[3], X86CPU),
 928        VMSTATE_UINTTL(env.cr[4], X86CPU),
 929        VMSTATE_UINTTL_ARRAY(env.dr, X86CPU, 8),
 930        /* MMU */
 931        VMSTATE_INT32(env.a20_mask, X86CPU),
 932        /* XMM */
 933        VMSTATE_UINT32(env.mxcsr, X86CPU),
 934        VMSTATE_XMM_REGS(env.xmm_regs, X86CPU, 0),
 935
 936#ifdef TARGET_X86_64
 937        VMSTATE_UINT64(env.efer, X86CPU),
 938        VMSTATE_UINT64(env.star, X86CPU),
 939        VMSTATE_UINT64(env.lstar, X86CPU),
 940        VMSTATE_UINT64(env.cstar, X86CPU),
 941        VMSTATE_UINT64(env.fmask, X86CPU),
 942        VMSTATE_UINT64(env.kernelgsbase, X86CPU),
 943#endif
 944        VMSTATE_UINT32(env.smbase, X86CPU),
 945
 946        VMSTATE_UINT64(env.pat, X86CPU),
 947        VMSTATE_UINT32(env.hflags2, X86CPU),
 948
 949        VMSTATE_UINT64(env.vm_hsave, X86CPU),
 950        VMSTATE_UINT64(env.vm_vmcb, X86CPU),
 951        VMSTATE_UINT64(env.tsc_offset, X86CPU),
 952        VMSTATE_UINT64(env.intercept, X86CPU),
 953        VMSTATE_UINT16(env.intercept_cr_read, X86CPU),
 954        VMSTATE_UINT16(env.intercept_cr_write, X86CPU),
 955        VMSTATE_UINT16(env.intercept_dr_read, X86CPU),
 956        VMSTATE_UINT16(env.intercept_dr_write, X86CPU),
 957        VMSTATE_UINT32(env.intercept_exceptions, X86CPU),
 958        VMSTATE_UINT8(env.v_tpr, X86CPU),
 959        /* MTRRs */
 960        VMSTATE_UINT64_ARRAY(env.mtrr_fixed, X86CPU, 11),
 961        VMSTATE_UINT64(env.mtrr_deftype, X86CPU),
 962        VMSTATE_MTRR_VARS(env.mtrr_var, X86CPU, MSR_MTRRcap_VCNT, 8),
 963        /* KVM-related states */
 964        VMSTATE_INT32(env.interrupt_injected, X86CPU),
 965        VMSTATE_UINT32(env.mp_state, X86CPU),
 966        VMSTATE_UINT64(env.tsc, X86CPU),
 967        VMSTATE_INT32(env.exception_injected, X86CPU),
 968        VMSTATE_UINT8(env.soft_interrupt, X86CPU),
 969        VMSTATE_UINT8(env.nmi_injected, X86CPU),
 970        VMSTATE_UINT8(env.nmi_pending, X86CPU),
 971        VMSTATE_UINT8(env.has_error_code, X86CPU),
 972        VMSTATE_UINT32(env.sipi_vector, X86CPU),
 973        /* MCE */
 974        VMSTATE_UINT64(env.mcg_cap, X86CPU),
 975        VMSTATE_UINT64(env.mcg_status, X86CPU),
 976        VMSTATE_UINT64(env.mcg_ctl, X86CPU),
 977        VMSTATE_UINT64_ARRAY(env.mce_banks, X86CPU, MCE_BANKS_DEF * 4),
 978        /* rdtscp */
 979        VMSTATE_UINT64(env.tsc_aux, X86CPU),
 980        /* KVM pvclock msr */
 981        VMSTATE_UINT64(env.system_time_msr, X86CPU),
 982        VMSTATE_UINT64(env.wall_clock_msr, X86CPU),
 983        /* XSAVE related fields */
 984        VMSTATE_UINT64_V(env.xcr0, X86CPU, 12),
 985        VMSTATE_UINT64_V(env.xstate_bv, X86CPU, 12),
 986        VMSTATE_YMMH_REGS_VARS(env.xmm_regs, X86CPU, 0, 12),
 987        VMSTATE_END_OF_LIST()
 988        /* The above list is not sorted /wrt version numbers, watch out! */
 989    },
 990    .subsections = (const VMStateDescription*[]) {
 991        &vmstate_async_pf_msr,
 992        &vmstate_pv_eoi_msr,
 993        &vmstate_steal_time_msr,
 994        &vmstate_fpop_ip_dp,
 995        &vmstate_msr_tsc_adjust,
 996        &vmstate_msr_tscdeadline,
 997        &vmstate_msr_ia32_misc_enable,
 998        &vmstate_msr_ia32_feature_control,
 999        &vmstate_msr_architectural_pmu,
1000        &vmstate_mpx,
1001        &vmstate_msr_hypercall_hypercall,
1002        &vmstate_msr_hyperv_vapic,
1003        &vmstate_msr_hyperv_time,
1004        &vmstate_msr_hyperv_crash,
1005        &vmstate_msr_hyperv_runtime,
1006        &vmstate_msr_hyperv_synic,
1007        &vmstate_msr_hyperv_stimer,
1008        &vmstate_avx512,
1009        &vmstate_xss,
1010        &vmstate_tsc_khz,
1011        &vmstate_msr_smi_count,
1012#ifdef TARGET_X86_64
1013        &vmstate_pkru,
1014#endif
1015        &vmstate_spec_ctrl,
1016        &vmstate_mcg_ext_ctl,
1017        &vmstate_msr_intel_pt,
1018        NULL
1019    }
1020};
1021