qemu/target/i386/machine.c
<<
>>
Prefs
   1#include "qemu/osdep.h"
   2#include "qemu-common.h"
   3#include "cpu.h"
   4#include "exec/exec-all.h"
   5#include "hw/hw.h"
   6#include "hw/boards.h"
   7#include "hw/i386/pc.h"
   8#include "hw/isa/isa.h"
   9#include "migration/cpu.h"
  10
  11#include "sysemu/kvm.h"
  12
  13#include "qemu/error-report.h"
  14
  15static const VMStateDescription vmstate_segment = {
  16    .name = "segment",
  17    .version_id = 1,
  18    .minimum_version_id = 1,
  19    .fields = (VMStateField[]) {
  20        VMSTATE_UINT32(selector, SegmentCache),
  21        VMSTATE_UINTTL(base, SegmentCache),
  22        VMSTATE_UINT32(limit, SegmentCache),
  23        VMSTATE_UINT32(flags, SegmentCache),
  24        VMSTATE_END_OF_LIST()
  25    }
  26};
  27
  28#define VMSTATE_SEGMENT(_field, _state) {                            \
  29    .name       = (stringify(_field)),                               \
  30    .size       = sizeof(SegmentCache),                              \
  31    .vmsd       = &vmstate_segment,                                  \
  32    .flags      = VMS_STRUCT,                                        \
  33    .offset     = offsetof(_state, _field)                           \
  34            + type_check(SegmentCache,typeof_field(_state, _field))  \
  35}
  36
  37#define VMSTATE_SEGMENT_ARRAY(_field, _state, _n)                    \
  38    VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_segment, SegmentCache)
  39
  40static const VMStateDescription vmstate_xmm_reg = {
  41    .name = "xmm_reg",
  42    .version_id = 1,
  43    .minimum_version_id = 1,
  44    .fields = (VMStateField[]) {
  45        VMSTATE_UINT64(ZMM_Q(0), ZMMReg),
  46        VMSTATE_UINT64(ZMM_Q(1), ZMMReg),
  47        VMSTATE_END_OF_LIST()
  48    }
  49};
  50
  51#define VMSTATE_XMM_REGS(_field, _state, _start)                         \
  52    VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0,     \
  53                             vmstate_xmm_reg, ZMMReg)
  54
  55/* YMMH format is the same as XMM, but for bits 128-255 */
  56static const VMStateDescription vmstate_ymmh_reg = {
  57    .name = "ymmh_reg",
  58    .version_id = 1,
  59    .minimum_version_id = 1,
  60    .fields = (VMStateField[]) {
  61        VMSTATE_UINT64(ZMM_Q(2), ZMMReg),
  62        VMSTATE_UINT64(ZMM_Q(3), ZMMReg),
  63        VMSTATE_END_OF_LIST()
  64    }
  65};
  66
  67#define VMSTATE_YMMH_REGS_VARS(_field, _state, _start, _v)               \
  68    VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, _v,    \
  69                             vmstate_ymmh_reg, ZMMReg)
  70
  71static const VMStateDescription vmstate_zmmh_reg = {
  72    .name = "zmmh_reg",
  73    .version_id = 1,
  74    .minimum_version_id = 1,
  75    .fields = (VMStateField[]) {
  76        VMSTATE_UINT64(ZMM_Q(4), ZMMReg),
  77        VMSTATE_UINT64(ZMM_Q(5), ZMMReg),
  78        VMSTATE_UINT64(ZMM_Q(6), ZMMReg),
  79        VMSTATE_UINT64(ZMM_Q(7), ZMMReg),
  80        VMSTATE_END_OF_LIST()
  81    }
  82};
  83
  84#define VMSTATE_ZMMH_REGS_VARS(_field, _state, _start)                   \
  85    VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0,     \
  86                             vmstate_zmmh_reg, ZMMReg)
  87
  88#ifdef TARGET_X86_64
  89static const VMStateDescription vmstate_hi16_zmm_reg = {
  90    .name = "hi16_zmm_reg",
  91    .version_id = 1,
  92    .minimum_version_id = 1,
  93    .fields = (VMStateField[]) {
  94        VMSTATE_UINT64(ZMM_Q(0), ZMMReg),
  95        VMSTATE_UINT64(ZMM_Q(1), ZMMReg),
  96        VMSTATE_UINT64(ZMM_Q(2), ZMMReg),
  97        VMSTATE_UINT64(ZMM_Q(3), ZMMReg),
  98        VMSTATE_UINT64(ZMM_Q(4), ZMMReg),
  99        VMSTATE_UINT64(ZMM_Q(5), ZMMReg),
 100        VMSTATE_UINT64(ZMM_Q(6), ZMMReg),
 101        VMSTATE_UINT64(ZMM_Q(7), ZMMReg),
 102        VMSTATE_END_OF_LIST()
 103    }
 104};
 105
 106#define VMSTATE_Hi16_ZMM_REGS_VARS(_field, _state, _start)               \
 107    VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0,     \
 108                             vmstate_hi16_zmm_reg, ZMMReg)
 109#endif
 110
 111static const VMStateDescription vmstate_bnd_regs = {
 112    .name = "bnd_regs",
 113    .version_id = 1,
 114    .minimum_version_id = 1,
 115    .fields = (VMStateField[]) {
 116        VMSTATE_UINT64(lb, BNDReg),
 117        VMSTATE_UINT64(ub, BNDReg),
 118        VMSTATE_END_OF_LIST()
 119    }
 120};
 121
 122#define VMSTATE_BND_REGS(_field, _state, _n)          \
 123    VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_bnd_regs, BNDReg)
 124
 125static const VMStateDescription vmstate_mtrr_var = {
 126    .name = "mtrr_var",
 127    .version_id = 1,
 128    .minimum_version_id = 1,
 129    .fields = (VMStateField[]) {
 130        VMSTATE_UINT64(base, MTRRVar),
 131        VMSTATE_UINT64(mask, MTRRVar),
 132        VMSTATE_END_OF_LIST()
 133    }
 134};
 135
 136#define VMSTATE_MTRR_VARS(_field, _state, _n, _v)                    \
 137    VMSTATE_STRUCT_ARRAY(_field, _state, _n, _v, vmstate_mtrr_var, MTRRVar)
 138
 139typedef struct x86_FPReg_tmp {
 140    FPReg *parent;
 141    uint64_t tmp_mant;
 142    uint16_t tmp_exp;
 143} x86_FPReg_tmp;
 144
 145static void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f)
 146{
 147    CPU_LDoubleU temp;
 148
 149    temp.d = f;
 150    *pmant = temp.l.lower;
 151    *pexp = temp.l.upper;
 152}
 153
 154static floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper)
 155{
 156    CPU_LDoubleU temp;
 157
 158    temp.l.upper = upper;
 159    temp.l.lower = mant;
 160    return temp.d;
 161}
 162
 163static int fpreg_pre_save(void *opaque)
 164{
 165    x86_FPReg_tmp *tmp = opaque;
 166
 167    /* we save the real CPU data (in case of MMX usage only 'mant'
 168       contains the MMX register */
 169    cpu_get_fp80(&tmp->tmp_mant, &tmp->tmp_exp, tmp->parent->d);
 170
 171    return 0;
 172}
 173
 174static int fpreg_post_load(void *opaque, int version)
 175{
 176    x86_FPReg_tmp *tmp = opaque;
 177
 178    tmp->parent->d = cpu_set_fp80(tmp->tmp_mant, tmp->tmp_exp);
 179    return 0;
 180}
 181
 182static const VMStateDescription vmstate_fpreg_tmp = {
 183    .name = "fpreg_tmp",
 184    .post_load = fpreg_post_load,
 185    .pre_save  = fpreg_pre_save,
 186    .fields = (VMStateField[]) {
 187        VMSTATE_UINT64(tmp_mant, x86_FPReg_tmp),
 188        VMSTATE_UINT16(tmp_exp, x86_FPReg_tmp),
 189        VMSTATE_END_OF_LIST()
 190    }
 191};
 192
 193static const VMStateDescription vmstate_fpreg = {
 194    .name = "fpreg",
 195    .fields = (VMStateField[]) {
 196        VMSTATE_WITH_TMP(FPReg, x86_FPReg_tmp, vmstate_fpreg_tmp),
 197        VMSTATE_END_OF_LIST()
 198    }
 199};
 200
 201static int cpu_pre_save(void *opaque)
 202{
 203    X86CPU *cpu = opaque;
 204    CPUX86State *env = &cpu->env;
 205    int i;
 206
 207    /* FPU */
 208    env->fpus_vmstate = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
 209    env->fptag_vmstate = 0;
 210    for(i = 0; i < 8; i++) {
 211        env->fptag_vmstate |= ((!env->fptags[i]) << i);
 212    }
 213
 214    env->fpregs_format_vmstate = 0;
 215
 216    /*
 217     * Real mode guest segments register DPL should be zero.
 218     * Older KVM version were setting it wrongly.
 219     * Fixing it will allow live migration to host with unrestricted guest
 220     * support (otherwise the migration will fail with invalid guest state
 221     * error).
 222     */
 223    if (!(env->cr[0] & CR0_PE_MASK) &&
 224        (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) {
 225        env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK);
 226        env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK);
 227        env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK);
 228        env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK);
 229        env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK);
 230        env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK);
 231    }
 232
 233    return 0;
 234}
 235
 236static int cpu_post_load(void *opaque, int version_id)
 237{
 238    X86CPU *cpu = opaque;
 239    CPUState *cs = CPU(cpu);
 240    CPUX86State *env = &cpu->env;
 241    int i;
 242
 243    if (env->tsc_khz && env->user_tsc_khz &&
 244        env->tsc_khz != env->user_tsc_khz) {
 245        error_report("Mismatch between user-specified TSC frequency and "
 246                     "migrated TSC frequency");
 247        return -EINVAL;
 248    }
 249
 250    if (env->fpregs_format_vmstate) {
 251        error_report("Unsupported old non-softfloat CPU state");
 252        return -EINVAL;
 253    }
 254    /*
 255     * Real mode guest segments register DPL should be zero.
 256     * Older KVM version were setting it wrongly.
 257     * Fixing it will allow live migration from such host that don't have
 258     * restricted guest support to a host with unrestricted guest support
 259     * (otherwise the migration will fail with invalid guest state
 260     * error).
 261     */
 262    if (!(env->cr[0] & CR0_PE_MASK) &&
 263        (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) {
 264        env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK);
 265        env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK);
 266        env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK);
 267        env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK);
 268        env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK);
 269        env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK);
 270    }
 271
 272    /* Older versions of QEMU incorrectly used CS.DPL as the CPL when
 273     * running under KVM.  This is wrong for conforming code segments.
 274     * Luckily, in our implementation the CPL field of hflags is redundant
 275     * and we can get the right value from the SS descriptor privilege level.
 276     */
 277    env->hflags &= ~HF_CPL_MASK;
 278    env->hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
 279
 280    env->fpstt = (env->fpus_vmstate >> 11) & 7;
 281    env->fpus = env->fpus_vmstate & ~0x3800;
 282    env->fptag_vmstate ^= 0xff;
 283    for(i = 0; i < 8; i++) {
 284        env->fptags[i] = (env->fptag_vmstate >> i) & 1;
 285    }
 286    if (tcg_enabled()) {
 287        target_ulong dr7;
 288        update_fp_status(env);
 289        update_mxcsr_status(env);
 290
 291        cpu_breakpoint_remove_all(cs, BP_CPU);
 292        cpu_watchpoint_remove_all(cs, BP_CPU);
 293
 294        /* Indicate all breakpoints disabled, as they are, then
 295           let the helper re-enable them.  */
 296        dr7 = env->dr[7];
 297        env->dr[7] = dr7 & ~(DR7_GLOBAL_BP_MASK | DR7_LOCAL_BP_MASK);
 298        cpu_x86_update_dr7(env, dr7);
 299    }
 300    tlb_flush(cs);
 301    return 0;
 302}
 303
 304static bool async_pf_msr_needed(void *opaque)
 305{
 306    X86CPU *cpu = opaque;
 307
 308    return cpu->env.async_pf_en_msr != 0;
 309}
 310
 311static bool pv_eoi_msr_needed(void *opaque)
 312{
 313    X86CPU *cpu = opaque;
 314
 315    return cpu->env.pv_eoi_en_msr != 0;
 316}
 317
 318static bool steal_time_msr_needed(void *opaque)
 319{
 320    X86CPU *cpu = opaque;
 321
 322    return cpu->env.steal_time_msr != 0;
 323}
 324
 325static const VMStateDescription vmstate_steal_time_msr = {
 326    .name = "cpu/steal_time_msr",
 327    .version_id = 1,
 328    .minimum_version_id = 1,
 329    .needed = steal_time_msr_needed,
 330    .fields = (VMStateField[]) {
 331        VMSTATE_UINT64(env.steal_time_msr, X86CPU),
 332        VMSTATE_END_OF_LIST()
 333    }
 334};
 335
 336static const VMStateDescription vmstate_async_pf_msr = {
 337    .name = "cpu/async_pf_msr",
 338    .version_id = 1,
 339    .minimum_version_id = 1,
 340    .needed = async_pf_msr_needed,
 341    .fields = (VMStateField[]) {
 342        VMSTATE_UINT64(env.async_pf_en_msr, X86CPU),
 343        VMSTATE_END_OF_LIST()
 344    }
 345};
 346
 347static const VMStateDescription vmstate_pv_eoi_msr = {
 348    .name = "cpu/async_pv_eoi_msr",
 349    .version_id = 1,
 350    .minimum_version_id = 1,
 351    .needed = pv_eoi_msr_needed,
 352    .fields = (VMStateField[]) {
 353        VMSTATE_UINT64(env.pv_eoi_en_msr, X86CPU),
 354        VMSTATE_END_OF_LIST()
 355    }
 356};
 357
 358static bool fpop_ip_dp_needed(void *opaque)
 359{
 360    X86CPU *cpu = opaque;
 361    CPUX86State *env = &cpu->env;
 362
 363    return env->fpop != 0 || env->fpip != 0 || env->fpdp != 0;
 364}
 365
 366static const VMStateDescription vmstate_fpop_ip_dp = {
 367    .name = "cpu/fpop_ip_dp",
 368    .version_id = 1,
 369    .minimum_version_id = 1,
 370    .needed = fpop_ip_dp_needed,
 371    .fields = (VMStateField[]) {
 372        VMSTATE_UINT16(env.fpop, X86CPU),
 373        VMSTATE_UINT64(env.fpip, X86CPU),
 374        VMSTATE_UINT64(env.fpdp, X86CPU),
 375        VMSTATE_END_OF_LIST()
 376    }
 377};
 378
 379static bool tsc_adjust_needed(void *opaque)
 380{
 381    X86CPU *cpu = opaque;
 382    CPUX86State *env = &cpu->env;
 383
 384    return env->tsc_adjust != 0;
 385}
 386
 387static const VMStateDescription vmstate_msr_tsc_adjust = {
 388    .name = "cpu/msr_tsc_adjust",
 389    .version_id = 1,
 390    .minimum_version_id = 1,
 391    .needed = tsc_adjust_needed,
 392    .fields = (VMStateField[]) {
 393        VMSTATE_UINT64(env.tsc_adjust, X86CPU),
 394        VMSTATE_END_OF_LIST()
 395    }
 396};
 397
 398static bool tscdeadline_needed(void *opaque)
 399{
 400    X86CPU *cpu = opaque;
 401    CPUX86State *env = &cpu->env;
 402
 403    return env->tsc_deadline != 0;
 404}
 405
 406static const VMStateDescription vmstate_msr_tscdeadline = {
 407    .name = "cpu/msr_tscdeadline",
 408    .version_id = 1,
 409    .minimum_version_id = 1,
 410    .needed = tscdeadline_needed,
 411    .fields = (VMStateField[]) {
 412        VMSTATE_UINT64(env.tsc_deadline, X86CPU),
 413        VMSTATE_END_OF_LIST()
 414    }
 415};
 416
 417static bool misc_enable_needed(void *opaque)
 418{
 419    X86CPU *cpu = opaque;
 420    CPUX86State *env = &cpu->env;
 421
 422    return env->msr_ia32_misc_enable != MSR_IA32_MISC_ENABLE_DEFAULT;
 423}
 424
 425static bool feature_control_needed(void *opaque)
 426{
 427    X86CPU *cpu = opaque;
 428    CPUX86State *env = &cpu->env;
 429
 430    return env->msr_ia32_feature_control != 0;
 431}
 432
 433static const VMStateDescription vmstate_msr_ia32_misc_enable = {
 434    .name = "cpu/msr_ia32_misc_enable",
 435    .version_id = 1,
 436    .minimum_version_id = 1,
 437    .needed = misc_enable_needed,
 438    .fields = (VMStateField[]) {
 439        VMSTATE_UINT64(env.msr_ia32_misc_enable, X86CPU),
 440        VMSTATE_END_OF_LIST()
 441    }
 442};
 443
 444static const VMStateDescription vmstate_msr_ia32_feature_control = {
 445    .name = "cpu/msr_ia32_feature_control",
 446    .version_id = 1,
 447    .minimum_version_id = 1,
 448    .needed = feature_control_needed,
 449    .fields = (VMStateField[]) {
 450        VMSTATE_UINT64(env.msr_ia32_feature_control, X86CPU),
 451        VMSTATE_END_OF_LIST()
 452    }
 453};
 454
 455static bool pmu_enable_needed(void *opaque)
 456{
 457    X86CPU *cpu = opaque;
 458    CPUX86State *env = &cpu->env;
 459    int i;
 460
 461    if (env->msr_fixed_ctr_ctrl || env->msr_global_ctrl ||
 462        env->msr_global_status || env->msr_global_ovf_ctrl) {
 463        return true;
 464    }
 465    for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
 466        if (env->msr_fixed_counters[i]) {
 467            return true;
 468        }
 469    }
 470    for (i = 0; i < MAX_GP_COUNTERS; i++) {
 471        if (env->msr_gp_counters[i] || env->msr_gp_evtsel[i]) {
 472            return true;
 473        }
 474    }
 475
 476    return false;
 477}
 478
 479static const VMStateDescription vmstate_msr_architectural_pmu = {
 480    .name = "cpu/msr_architectural_pmu",
 481    .version_id = 1,
 482    .minimum_version_id = 1,
 483    .needed = pmu_enable_needed,
 484    .fields = (VMStateField[]) {
 485        VMSTATE_UINT64(env.msr_fixed_ctr_ctrl, X86CPU),
 486        VMSTATE_UINT64(env.msr_global_ctrl, X86CPU),
 487        VMSTATE_UINT64(env.msr_global_status, X86CPU),
 488        VMSTATE_UINT64(env.msr_global_ovf_ctrl, X86CPU),
 489        VMSTATE_UINT64_ARRAY(env.msr_fixed_counters, X86CPU, MAX_FIXED_COUNTERS),
 490        VMSTATE_UINT64_ARRAY(env.msr_gp_counters, X86CPU, MAX_GP_COUNTERS),
 491        VMSTATE_UINT64_ARRAY(env.msr_gp_evtsel, X86CPU, MAX_GP_COUNTERS),
 492        VMSTATE_END_OF_LIST()
 493    }
 494};
 495
 496static bool mpx_needed(void *opaque)
 497{
 498    X86CPU *cpu = opaque;
 499    CPUX86State *env = &cpu->env;
 500    unsigned int i;
 501
 502    for (i = 0; i < 4; i++) {
 503        if (env->bnd_regs[i].lb || env->bnd_regs[i].ub) {
 504            return true;
 505        }
 506    }
 507
 508    if (env->bndcs_regs.cfgu || env->bndcs_regs.sts) {
 509        return true;
 510    }
 511
 512    return !!env->msr_bndcfgs;
 513}
 514
 515static const VMStateDescription vmstate_mpx = {
 516    .name = "cpu/mpx",
 517    .version_id = 1,
 518    .minimum_version_id = 1,
 519    .needed = mpx_needed,
 520    .fields = (VMStateField[]) {
 521        VMSTATE_BND_REGS(env.bnd_regs, X86CPU, 4),
 522        VMSTATE_UINT64(env.bndcs_regs.cfgu, X86CPU),
 523        VMSTATE_UINT64(env.bndcs_regs.sts, X86CPU),
 524        VMSTATE_UINT64(env.msr_bndcfgs, X86CPU),
 525        VMSTATE_END_OF_LIST()
 526    }
 527};
 528
 529static bool hyperv_hypercall_enable_needed(void *opaque)
 530{
 531    X86CPU *cpu = opaque;
 532    CPUX86State *env = &cpu->env;
 533
 534    return env->msr_hv_hypercall != 0 || env->msr_hv_guest_os_id != 0;
 535}
 536
 537static const VMStateDescription vmstate_msr_hypercall_hypercall = {
 538    .name = "cpu/msr_hyperv_hypercall",
 539    .version_id = 1,
 540    .minimum_version_id = 1,
 541    .needed = hyperv_hypercall_enable_needed,
 542    .fields = (VMStateField[]) {
 543        VMSTATE_UINT64(env.msr_hv_guest_os_id, X86CPU),
 544        VMSTATE_UINT64(env.msr_hv_hypercall, X86CPU),
 545        VMSTATE_END_OF_LIST()
 546    }
 547};
 548
 549static bool hyperv_vapic_enable_needed(void *opaque)
 550{
 551    X86CPU *cpu = opaque;
 552    CPUX86State *env = &cpu->env;
 553
 554    return env->msr_hv_vapic != 0;
 555}
 556
 557static const VMStateDescription vmstate_msr_hyperv_vapic = {
 558    .name = "cpu/msr_hyperv_vapic",
 559    .version_id = 1,
 560    .minimum_version_id = 1,
 561    .needed = hyperv_vapic_enable_needed,
 562    .fields = (VMStateField[]) {
 563        VMSTATE_UINT64(env.msr_hv_vapic, X86CPU),
 564        VMSTATE_END_OF_LIST()
 565    }
 566};
 567
 568static bool hyperv_time_enable_needed(void *opaque)
 569{
 570    X86CPU *cpu = opaque;
 571    CPUX86State *env = &cpu->env;
 572
 573    return env->msr_hv_tsc != 0;
 574}
 575
 576static const VMStateDescription vmstate_msr_hyperv_time = {
 577    .name = "cpu/msr_hyperv_time",
 578    .version_id = 1,
 579    .minimum_version_id = 1,
 580    .needed = hyperv_time_enable_needed,
 581    .fields = (VMStateField[]) {
 582        VMSTATE_UINT64(env.msr_hv_tsc, X86CPU),
 583        VMSTATE_END_OF_LIST()
 584    }
 585};
 586
 587static bool hyperv_crash_enable_needed(void *opaque)
 588{
 589    X86CPU *cpu = opaque;
 590    CPUX86State *env = &cpu->env;
 591    int i;
 592
 593    for (i = 0; i < HV_CRASH_PARAMS; i++) {
 594        if (env->msr_hv_crash_params[i]) {
 595            return true;
 596        }
 597    }
 598    return false;
 599}
 600
 601static const VMStateDescription vmstate_msr_hyperv_crash = {
 602    .name = "cpu/msr_hyperv_crash",
 603    .version_id = 1,
 604    .minimum_version_id = 1,
 605    .needed = hyperv_crash_enable_needed,
 606    .fields = (VMStateField[]) {
 607        VMSTATE_UINT64_ARRAY(env.msr_hv_crash_params, X86CPU, HV_CRASH_PARAMS),
 608        VMSTATE_END_OF_LIST()
 609    }
 610};
 611
 612static bool hyperv_runtime_enable_needed(void *opaque)
 613{
 614    X86CPU *cpu = opaque;
 615    CPUX86State *env = &cpu->env;
 616
 617    if (!cpu->hyperv_runtime) {
 618        return false;
 619    }
 620
 621    return env->msr_hv_runtime != 0;
 622}
 623
 624static const VMStateDescription vmstate_msr_hyperv_runtime = {
 625    .name = "cpu/msr_hyperv_runtime",
 626    .version_id = 1,
 627    .minimum_version_id = 1,
 628    .needed = hyperv_runtime_enable_needed,
 629    .fields = (VMStateField[]) {
 630        VMSTATE_UINT64(env.msr_hv_runtime, X86CPU),
 631        VMSTATE_END_OF_LIST()
 632    }
 633};
 634
 635static bool hyperv_synic_enable_needed(void *opaque)
 636{
 637    X86CPU *cpu = opaque;
 638    CPUX86State *env = &cpu->env;
 639    int i;
 640
 641    if (env->msr_hv_synic_control != 0 ||
 642        env->msr_hv_synic_evt_page != 0 ||
 643        env->msr_hv_synic_msg_page != 0) {
 644        return true;
 645    }
 646
 647    for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) {
 648        if (env->msr_hv_synic_sint[i] != 0) {
 649            return true;
 650        }
 651    }
 652
 653    return false;
 654}
 655
 656static const VMStateDescription vmstate_msr_hyperv_synic = {
 657    .name = "cpu/msr_hyperv_synic",
 658    .version_id = 1,
 659    .minimum_version_id = 1,
 660    .needed = hyperv_synic_enable_needed,
 661    .fields = (VMStateField[]) {
 662        VMSTATE_UINT64(env.msr_hv_synic_control, X86CPU),
 663        VMSTATE_UINT64(env.msr_hv_synic_evt_page, X86CPU),
 664        VMSTATE_UINT64(env.msr_hv_synic_msg_page, X86CPU),
 665        VMSTATE_UINT64_ARRAY(env.msr_hv_synic_sint, X86CPU, HV_SINT_COUNT),
 666        VMSTATE_END_OF_LIST()
 667    }
 668};
 669
 670static bool hyperv_stimer_enable_needed(void *opaque)
 671{
 672    X86CPU *cpu = opaque;
 673    CPUX86State *env = &cpu->env;
 674    int i;
 675
 676    for (i = 0; i < ARRAY_SIZE(env->msr_hv_stimer_config); i++) {
 677        if (env->msr_hv_stimer_config[i] || env->msr_hv_stimer_count[i]) {
 678            return true;
 679        }
 680    }
 681    return false;
 682}
 683
 684static const VMStateDescription vmstate_msr_hyperv_stimer = {
 685    .name = "cpu/msr_hyperv_stimer",
 686    .version_id = 1,
 687    .minimum_version_id = 1,
 688    .needed = hyperv_stimer_enable_needed,
 689    .fields = (VMStateField[]) {
 690        VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_config, X86CPU,
 691                             HV_STIMER_COUNT),
 692        VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_count, X86CPU, HV_STIMER_COUNT),
 693        VMSTATE_END_OF_LIST()
 694    }
 695};
 696
 697static bool avx512_needed(void *opaque)
 698{
 699    X86CPU *cpu = opaque;
 700    CPUX86State *env = &cpu->env;
 701    unsigned int i;
 702
 703    for (i = 0; i < NB_OPMASK_REGS; i++) {
 704        if (env->opmask_regs[i]) {
 705            return true;
 706        }
 707    }
 708
 709    for (i = 0; i < CPU_NB_REGS; i++) {
 710#define ENV_XMM(reg, field) (env->xmm_regs[reg].ZMM_Q(field))
 711        if (ENV_XMM(i, 4) || ENV_XMM(i, 6) ||
 712            ENV_XMM(i, 5) || ENV_XMM(i, 7)) {
 713            return true;
 714        }
 715#ifdef TARGET_X86_64
 716        if (ENV_XMM(i+16, 0) || ENV_XMM(i+16, 1) ||
 717            ENV_XMM(i+16, 2) || ENV_XMM(i+16, 3) ||
 718            ENV_XMM(i+16, 4) || ENV_XMM(i+16, 5) ||
 719            ENV_XMM(i+16, 6) || ENV_XMM(i+16, 7)) {
 720            return true;
 721        }
 722#endif
 723    }
 724
 725    return false;
 726}
 727
 728static const VMStateDescription vmstate_avx512 = {
 729    .name = "cpu/avx512",
 730    .version_id = 1,
 731    .minimum_version_id = 1,
 732    .needed = avx512_needed,
 733    .fields = (VMStateField[]) {
 734        VMSTATE_UINT64_ARRAY(env.opmask_regs, X86CPU, NB_OPMASK_REGS),
 735        VMSTATE_ZMMH_REGS_VARS(env.xmm_regs, X86CPU, 0),
 736#ifdef TARGET_X86_64
 737        VMSTATE_Hi16_ZMM_REGS_VARS(env.xmm_regs, X86CPU, 16),
 738#endif
 739        VMSTATE_END_OF_LIST()
 740    }
 741};
 742
 743static bool xss_needed(void *opaque)
 744{
 745    X86CPU *cpu = opaque;
 746    CPUX86State *env = &cpu->env;
 747
 748    return env->xss != 0;
 749}
 750
 751static const VMStateDescription vmstate_xss = {
 752    .name = "cpu/xss",
 753    .version_id = 1,
 754    .minimum_version_id = 1,
 755    .needed = xss_needed,
 756    .fields = (VMStateField[]) {
 757        VMSTATE_UINT64(env.xss, X86CPU),
 758        VMSTATE_END_OF_LIST()
 759    }
 760};
 761
 762#ifdef TARGET_X86_64
 763static bool pkru_needed(void *opaque)
 764{
 765    X86CPU *cpu = opaque;
 766    CPUX86State *env = &cpu->env;
 767
 768    return env->pkru != 0;
 769}
 770
 771static const VMStateDescription vmstate_pkru = {
 772    .name = "cpu/pkru",
 773    .version_id = 1,
 774    .minimum_version_id = 1,
 775    .needed = pkru_needed,
 776    .fields = (VMStateField[]){
 777        VMSTATE_UINT32(env.pkru, X86CPU),
 778        VMSTATE_END_OF_LIST()
 779    }
 780};
 781#endif
 782
 783static bool tsc_khz_needed(void *opaque)
 784{
 785    X86CPU *cpu = opaque;
 786    CPUX86State *env = &cpu->env;
 787    MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
 788    PCMachineClass *pcmc = PC_MACHINE_CLASS(mc);
 789    return env->tsc_khz && pcmc->save_tsc_khz;
 790}
 791
 792static const VMStateDescription vmstate_tsc_khz = {
 793    .name = "cpu/tsc_khz",
 794    .version_id = 1,
 795    .minimum_version_id = 1,
 796    .needed = tsc_khz_needed,
 797    .fields = (VMStateField[]) {
 798        VMSTATE_INT64(env.tsc_khz, X86CPU),
 799        VMSTATE_END_OF_LIST()
 800    }
 801};
 802
 803static bool mcg_ext_ctl_needed(void *opaque)
 804{
 805    X86CPU *cpu = opaque;
 806    CPUX86State *env = &cpu->env;
 807    return cpu->enable_lmce && env->mcg_ext_ctl;
 808}
 809
 810static const VMStateDescription vmstate_mcg_ext_ctl = {
 811    .name = "cpu/mcg_ext_ctl",
 812    .version_id = 1,
 813    .minimum_version_id = 1,
 814    .needed = mcg_ext_ctl_needed,
 815    .fields = (VMStateField[]) {
 816        VMSTATE_UINT64(env.mcg_ext_ctl, X86CPU),
 817        VMSTATE_END_OF_LIST()
 818    }
 819};
 820
 821static bool spec_ctrl_needed(void *opaque)
 822{
 823    X86CPU *cpu = opaque;
 824    CPUX86State *env = &cpu->env;
 825
 826    return env->spec_ctrl != 0;
 827}
 828
 829static const VMStateDescription vmstate_spec_ctrl = {
 830    .name = "cpu/spec_ctrl",
 831    .version_id = 1,
 832    .minimum_version_id = 1,
 833    .needed = spec_ctrl_needed,
 834    .fields = (VMStateField[]){
 835        VMSTATE_UINT64(env.spec_ctrl, X86CPU),
 836        VMSTATE_END_OF_LIST()
 837    }
 838};
 839
 840VMStateDescription vmstate_x86_cpu = {
 841    .name = "cpu",
 842    .version_id = 12,
 843    .minimum_version_id = 11,
 844    .pre_save = cpu_pre_save,
 845    .post_load = cpu_post_load,
 846    .fields = (VMStateField[]) {
 847        VMSTATE_UINTTL_ARRAY(env.regs, X86CPU, CPU_NB_REGS),
 848        VMSTATE_UINTTL(env.eip, X86CPU),
 849        VMSTATE_UINTTL(env.eflags, X86CPU),
 850        VMSTATE_UINT32(env.hflags, X86CPU),
 851        /* FPU */
 852        VMSTATE_UINT16(env.fpuc, X86CPU),
 853        VMSTATE_UINT16(env.fpus_vmstate, X86CPU),
 854        VMSTATE_UINT16(env.fptag_vmstate, X86CPU),
 855        VMSTATE_UINT16(env.fpregs_format_vmstate, X86CPU),
 856
 857        VMSTATE_STRUCT_ARRAY(env.fpregs, X86CPU, 8, 0, vmstate_fpreg, FPReg),
 858
 859        VMSTATE_SEGMENT_ARRAY(env.segs, X86CPU, 6),
 860        VMSTATE_SEGMENT(env.ldt, X86CPU),
 861        VMSTATE_SEGMENT(env.tr, X86CPU),
 862        VMSTATE_SEGMENT(env.gdt, X86CPU),
 863        VMSTATE_SEGMENT(env.idt, X86CPU),
 864
 865        VMSTATE_UINT32(env.sysenter_cs, X86CPU),
 866        VMSTATE_UINTTL(env.sysenter_esp, X86CPU),
 867        VMSTATE_UINTTL(env.sysenter_eip, X86CPU),
 868
 869        VMSTATE_UINTTL(env.cr[0], X86CPU),
 870        VMSTATE_UINTTL(env.cr[2], X86CPU),
 871        VMSTATE_UINTTL(env.cr[3], X86CPU),
 872        VMSTATE_UINTTL(env.cr[4], X86CPU),
 873        VMSTATE_UINTTL_ARRAY(env.dr, X86CPU, 8),
 874        /* MMU */
 875        VMSTATE_INT32(env.a20_mask, X86CPU),
 876        /* XMM */
 877        VMSTATE_UINT32(env.mxcsr, X86CPU),
 878        VMSTATE_XMM_REGS(env.xmm_regs, X86CPU, 0),
 879
 880#ifdef TARGET_X86_64
 881        VMSTATE_UINT64(env.efer, X86CPU),
 882        VMSTATE_UINT64(env.star, X86CPU),
 883        VMSTATE_UINT64(env.lstar, X86CPU),
 884        VMSTATE_UINT64(env.cstar, X86CPU),
 885        VMSTATE_UINT64(env.fmask, X86CPU),
 886        VMSTATE_UINT64(env.kernelgsbase, X86CPU),
 887#endif
 888        VMSTATE_UINT32(env.smbase, X86CPU),
 889
 890        VMSTATE_UINT64(env.pat, X86CPU),
 891        VMSTATE_UINT32(env.hflags2, X86CPU),
 892
 893        VMSTATE_UINT64(env.vm_hsave, X86CPU),
 894        VMSTATE_UINT64(env.vm_vmcb, X86CPU),
 895        VMSTATE_UINT64(env.tsc_offset, X86CPU),
 896        VMSTATE_UINT64(env.intercept, X86CPU),
 897        VMSTATE_UINT16(env.intercept_cr_read, X86CPU),
 898        VMSTATE_UINT16(env.intercept_cr_write, X86CPU),
 899        VMSTATE_UINT16(env.intercept_dr_read, X86CPU),
 900        VMSTATE_UINT16(env.intercept_dr_write, X86CPU),
 901        VMSTATE_UINT32(env.intercept_exceptions, X86CPU),
 902        VMSTATE_UINT8(env.v_tpr, X86CPU),
 903        /* MTRRs */
 904        VMSTATE_UINT64_ARRAY(env.mtrr_fixed, X86CPU, 11),
 905        VMSTATE_UINT64(env.mtrr_deftype, X86CPU),
 906        VMSTATE_MTRR_VARS(env.mtrr_var, X86CPU, MSR_MTRRcap_VCNT, 8),
 907        /* KVM-related states */
 908        VMSTATE_INT32(env.interrupt_injected, X86CPU),
 909        VMSTATE_UINT32(env.mp_state, X86CPU),
 910        VMSTATE_UINT64(env.tsc, X86CPU),
 911        VMSTATE_INT32(env.exception_injected, X86CPU),
 912        VMSTATE_UINT8(env.soft_interrupt, X86CPU),
 913        VMSTATE_UINT8(env.nmi_injected, X86CPU),
 914        VMSTATE_UINT8(env.nmi_pending, X86CPU),
 915        VMSTATE_UINT8(env.has_error_code, X86CPU),
 916        VMSTATE_UINT32(env.sipi_vector, X86CPU),
 917        /* MCE */
 918        VMSTATE_UINT64(env.mcg_cap, X86CPU),
 919        VMSTATE_UINT64(env.mcg_status, X86CPU),
 920        VMSTATE_UINT64(env.mcg_ctl, X86CPU),
 921        VMSTATE_UINT64_ARRAY(env.mce_banks, X86CPU, MCE_BANKS_DEF * 4),
 922        /* rdtscp */
 923        VMSTATE_UINT64(env.tsc_aux, X86CPU),
 924        /* KVM pvclock msr */
 925        VMSTATE_UINT64(env.system_time_msr, X86CPU),
 926        VMSTATE_UINT64(env.wall_clock_msr, X86CPU),
 927        /* XSAVE related fields */
 928        VMSTATE_UINT64_V(env.xcr0, X86CPU, 12),
 929        VMSTATE_UINT64_V(env.xstate_bv, X86CPU, 12),
 930        VMSTATE_YMMH_REGS_VARS(env.xmm_regs, X86CPU, 0, 12),
 931        VMSTATE_END_OF_LIST()
 932        /* The above list is not sorted /wrt version numbers, watch out! */
 933    },
 934    .subsections = (const VMStateDescription*[]) {
 935        &vmstate_async_pf_msr,
 936        &vmstate_pv_eoi_msr,
 937        &vmstate_steal_time_msr,
 938        &vmstate_fpop_ip_dp,
 939        &vmstate_msr_tsc_adjust,
 940        &vmstate_msr_tscdeadline,
 941        &vmstate_msr_ia32_misc_enable,
 942        &vmstate_msr_ia32_feature_control,
 943        &vmstate_msr_architectural_pmu,
 944        &vmstate_mpx,
 945        &vmstate_msr_hypercall_hypercall,
 946        &vmstate_msr_hyperv_vapic,
 947        &vmstate_msr_hyperv_time,
 948        &vmstate_msr_hyperv_crash,
 949        &vmstate_msr_hyperv_runtime,
 950        &vmstate_msr_hyperv_synic,
 951        &vmstate_msr_hyperv_stimer,
 952        &vmstate_avx512,
 953        &vmstate_xss,
 954        &vmstate_tsc_khz,
 955#ifdef TARGET_X86_64
 956        &vmstate_pkru,
 957#endif
 958        &vmstate_spec_ctrl,
 959        &vmstate_mcg_ext_ctl,
 960        NULL
 961    }
 962};
 963