qemu/target/i386/machine.c
<<
>>
Prefs
   1#include "qemu/osdep.h"
   2#include "qemu-common.h"
   3#include "cpu.h"
   4#include "exec/exec-all.h"
   5#include "hw/hw.h"
   6#include "hw/boards.h"
   7#include "hw/i386/pc.h"
   8#include "hw/isa/isa.h"
   9#include "migration/cpu.h"
  10#include "hyperv.h"
  11
  12#include "sysemu/kvm.h"
  13
  14#include "qemu/error-report.h"
  15
  16static const VMStateDescription vmstate_segment = {
  17    .name = "segment",
  18    .version_id = 1,
  19    .minimum_version_id = 1,
  20    .fields = (VMStateField[]) {
  21        VMSTATE_UINT32(selector, SegmentCache),
  22        VMSTATE_UINTTL(base, SegmentCache),
  23        VMSTATE_UINT32(limit, SegmentCache),
  24        VMSTATE_UINT32(flags, SegmentCache),
  25        VMSTATE_END_OF_LIST()
  26    }
  27};
  28
  29#define VMSTATE_SEGMENT(_field, _state) {                            \
  30    .name       = (stringify(_field)),                               \
  31    .size       = sizeof(SegmentCache),                              \
  32    .vmsd       = &vmstate_segment,                                  \
  33    .flags      = VMS_STRUCT,                                        \
  34    .offset     = offsetof(_state, _field)                           \
  35            + type_check(SegmentCache,typeof_field(_state, _field))  \
  36}
  37
  38#define VMSTATE_SEGMENT_ARRAY(_field, _state, _n)                    \
  39    VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_segment, SegmentCache)
  40
  41static const VMStateDescription vmstate_xmm_reg = {
  42    .name = "xmm_reg",
  43    .version_id = 1,
  44    .minimum_version_id = 1,
  45    .fields = (VMStateField[]) {
  46        VMSTATE_UINT64(ZMM_Q(0), ZMMReg),
  47        VMSTATE_UINT64(ZMM_Q(1), ZMMReg),
  48        VMSTATE_END_OF_LIST()
  49    }
  50};
  51
  52#define VMSTATE_XMM_REGS(_field, _state, _start)                         \
  53    VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0,     \
  54                             vmstate_xmm_reg, ZMMReg)
  55
  56/* YMMH format is the same as XMM, but for bits 128-255 */
  57static const VMStateDescription vmstate_ymmh_reg = {
  58    .name = "ymmh_reg",
  59    .version_id = 1,
  60    .minimum_version_id = 1,
  61    .fields = (VMStateField[]) {
  62        VMSTATE_UINT64(ZMM_Q(2), ZMMReg),
  63        VMSTATE_UINT64(ZMM_Q(3), ZMMReg),
  64        VMSTATE_END_OF_LIST()
  65    }
  66};
  67
  68#define VMSTATE_YMMH_REGS_VARS(_field, _state, _start, _v)               \
  69    VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, _v,    \
  70                             vmstate_ymmh_reg, ZMMReg)
  71
  72static const VMStateDescription vmstate_zmmh_reg = {
  73    .name = "zmmh_reg",
  74    .version_id = 1,
  75    .minimum_version_id = 1,
  76    .fields = (VMStateField[]) {
  77        VMSTATE_UINT64(ZMM_Q(4), ZMMReg),
  78        VMSTATE_UINT64(ZMM_Q(5), ZMMReg),
  79        VMSTATE_UINT64(ZMM_Q(6), ZMMReg),
  80        VMSTATE_UINT64(ZMM_Q(7), ZMMReg),
  81        VMSTATE_END_OF_LIST()
  82    }
  83};
  84
  85#define VMSTATE_ZMMH_REGS_VARS(_field, _state, _start)                   \
  86    VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0,     \
  87                             vmstate_zmmh_reg, ZMMReg)
  88
  89#ifdef TARGET_X86_64
  90static const VMStateDescription vmstate_hi16_zmm_reg = {
  91    .name = "hi16_zmm_reg",
  92    .version_id = 1,
  93    .minimum_version_id = 1,
  94    .fields = (VMStateField[]) {
  95        VMSTATE_UINT64(ZMM_Q(0), ZMMReg),
  96        VMSTATE_UINT64(ZMM_Q(1), ZMMReg),
  97        VMSTATE_UINT64(ZMM_Q(2), ZMMReg),
  98        VMSTATE_UINT64(ZMM_Q(3), ZMMReg),
  99        VMSTATE_UINT64(ZMM_Q(4), ZMMReg),
 100        VMSTATE_UINT64(ZMM_Q(5), ZMMReg),
 101        VMSTATE_UINT64(ZMM_Q(6), ZMMReg),
 102        VMSTATE_UINT64(ZMM_Q(7), ZMMReg),
 103        VMSTATE_END_OF_LIST()
 104    }
 105};
 106
 107#define VMSTATE_Hi16_ZMM_REGS_VARS(_field, _state, _start)               \
 108    VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0,     \
 109                             vmstate_hi16_zmm_reg, ZMMReg)
 110#endif
 111
 112static const VMStateDescription vmstate_bnd_regs = {
 113    .name = "bnd_regs",
 114    .version_id = 1,
 115    .minimum_version_id = 1,
 116    .fields = (VMStateField[]) {
 117        VMSTATE_UINT64(lb, BNDReg),
 118        VMSTATE_UINT64(ub, BNDReg),
 119        VMSTATE_END_OF_LIST()
 120    }
 121};
 122
 123#define VMSTATE_BND_REGS(_field, _state, _n)          \
 124    VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_bnd_regs, BNDReg)
 125
 126static const VMStateDescription vmstate_mtrr_var = {
 127    .name = "mtrr_var",
 128    .version_id = 1,
 129    .minimum_version_id = 1,
 130    .fields = (VMStateField[]) {
 131        VMSTATE_UINT64(base, MTRRVar),
 132        VMSTATE_UINT64(mask, MTRRVar),
 133        VMSTATE_END_OF_LIST()
 134    }
 135};
 136
 137#define VMSTATE_MTRR_VARS(_field, _state, _n, _v)                    \
 138    VMSTATE_STRUCT_ARRAY(_field, _state, _n, _v, vmstate_mtrr_var, MTRRVar)
 139
 140typedef struct x86_FPReg_tmp {
 141    FPReg *parent;
 142    uint64_t tmp_mant;
 143    uint16_t tmp_exp;
 144} x86_FPReg_tmp;
 145
 146static void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f)
 147{
 148    CPU_LDoubleU temp;
 149
 150    temp.d = f;
 151    *pmant = temp.l.lower;
 152    *pexp = temp.l.upper;
 153}
 154
 155static floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper)
 156{
 157    CPU_LDoubleU temp;
 158
 159    temp.l.upper = upper;
 160    temp.l.lower = mant;
 161    return temp.d;
 162}
 163
 164static int fpreg_pre_save(void *opaque)
 165{
 166    x86_FPReg_tmp *tmp = opaque;
 167
 168    /* we save the real CPU data (in case of MMX usage only 'mant'
 169       contains the MMX register */
 170    cpu_get_fp80(&tmp->tmp_mant, &tmp->tmp_exp, tmp->parent->d);
 171
 172    return 0;
 173}
 174
 175static int fpreg_post_load(void *opaque, int version)
 176{
 177    x86_FPReg_tmp *tmp = opaque;
 178
 179    tmp->parent->d = cpu_set_fp80(tmp->tmp_mant, tmp->tmp_exp);
 180    return 0;
 181}
 182
 183static const VMStateDescription vmstate_fpreg_tmp = {
 184    .name = "fpreg_tmp",
 185    .post_load = fpreg_post_load,
 186    .pre_save  = fpreg_pre_save,
 187    .fields = (VMStateField[]) {
 188        VMSTATE_UINT64(tmp_mant, x86_FPReg_tmp),
 189        VMSTATE_UINT16(tmp_exp, x86_FPReg_tmp),
 190        VMSTATE_END_OF_LIST()
 191    }
 192};
 193
 194static const VMStateDescription vmstate_fpreg = {
 195    .name = "fpreg",
 196    .fields = (VMStateField[]) {
 197        VMSTATE_WITH_TMP(FPReg, x86_FPReg_tmp, vmstate_fpreg_tmp),
 198        VMSTATE_END_OF_LIST()
 199    }
 200};
 201
 202static int cpu_pre_save(void *opaque)
 203{
 204    X86CPU *cpu = opaque;
 205    CPUX86State *env = &cpu->env;
 206    int i;
 207
 208    /* FPU */
 209    env->fpus_vmstate = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
 210    env->fptag_vmstate = 0;
 211    for(i = 0; i < 8; i++) {
 212        env->fptag_vmstate |= ((!env->fptags[i]) << i);
 213    }
 214
 215    env->fpregs_format_vmstate = 0;
 216
 217    /*
 218     * Real mode guest segments register DPL should be zero.
 219     * Older KVM version were setting it wrongly.
 220     * Fixing it will allow live migration to host with unrestricted guest
 221     * support (otherwise the migration will fail with invalid guest state
 222     * error).
 223     */
 224    if (!(env->cr[0] & CR0_PE_MASK) &&
 225        (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) {
 226        env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK);
 227        env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK);
 228        env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK);
 229        env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK);
 230        env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK);
 231        env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK);
 232    }
 233
 234    return 0;
 235}
 236
 237static int cpu_post_load(void *opaque, int version_id)
 238{
 239    X86CPU *cpu = opaque;
 240    CPUState *cs = CPU(cpu);
 241    CPUX86State *env = &cpu->env;
 242    int i;
 243
 244    if (env->tsc_khz && env->user_tsc_khz &&
 245        env->tsc_khz != env->user_tsc_khz) {
 246        error_report("Mismatch between user-specified TSC frequency and "
 247                     "migrated TSC frequency");
 248        return -EINVAL;
 249    }
 250
 251    if (env->fpregs_format_vmstate) {
 252        error_report("Unsupported old non-softfloat CPU state");
 253        return -EINVAL;
 254    }
 255    /*
 256     * Real mode guest segments register DPL should be zero.
 257     * Older KVM version were setting it wrongly.
 258     * Fixing it will allow live migration from such host that don't have
 259     * restricted guest support to a host with unrestricted guest support
 260     * (otherwise the migration will fail with invalid guest state
 261     * error).
 262     */
 263    if (!(env->cr[0] & CR0_PE_MASK) &&
 264        (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) {
 265        env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK);
 266        env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK);
 267        env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK);
 268        env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK);
 269        env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK);
 270        env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK);
 271    }
 272
 273    /* Older versions of QEMU incorrectly used CS.DPL as the CPL when
 274     * running under KVM.  This is wrong for conforming code segments.
 275     * Luckily, in our implementation the CPL field of hflags is redundant
 276     * and we can get the right value from the SS descriptor privilege level.
 277     */
 278    env->hflags &= ~HF_CPL_MASK;
 279    env->hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
 280
 281    env->fpstt = (env->fpus_vmstate >> 11) & 7;
 282    env->fpus = env->fpus_vmstate & ~0x3800;
 283    env->fptag_vmstate ^= 0xff;
 284    for(i = 0; i < 8; i++) {
 285        env->fptags[i] = (env->fptag_vmstate >> i) & 1;
 286    }
 287    if (tcg_enabled()) {
 288        target_ulong dr7;
 289        update_fp_status(env);
 290        update_mxcsr_status(env);
 291
 292        cpu_breakpoint_remove_all(cs, BP_CPU);
 293        cpu_watchpoint_remove_all(cs, BP_CPU);
 294
 295        /* Indicate all breakpoints disabled, as they are, then
 296           let the helper re-enable them.  */
 297        dr7 = env->dr[7];
 298        env->dr[7] = dr7 & ~(DR7_GLOBAL_BP_MASK | DR7_LOCAL_BP_MASK);
 299        cpu_x86_update_dr7(env, dr7);
 300    }
 301    tlb_flush(cs);
 302    return 0;
 303}
 304
 305static bool async_pf_msr_needed(void *opaque)
 306{
 307    X86CPU *cpu = opaque;
 308
 309    return cpu->env.async_pf_en_msr != 0;
 310}
 311
 312static bool pv_eoi_msr_needed(void *opaque)
 313{
 314    X86CPU *cpu = opaque;
 315
 316    return cpu->env.pv_eoi_en_msr != 0;
 317}
 318
 319static bool steal_time_msr_needed(void *opaque)
 320{
 321    X86CPU *cpu = opaque;
 322
 323    return cpu->env.steal_time_msr != 0;
 324}
 325
 326static const VMStateDescription vmstate_steal_time_msr = {
 327    .name = "cpu/steal_time_msr",
 328    .version_id = 1,
 329    .minimum_version_id = 1,
 330    .needed = steal_time_msr_needed,
 331    .fields = (VMStateField[]) {
 332        VMSTATE_UINT64(env.steal_time_msr, X86CPU),
 333        VMSTATE_END_OF_LIST()
 334    }
 335};
 336
 337static const VMStateDescription vmstate_async_pf_msr = {
 338    .name = "cpu/async_pf_msr",
 339    .version_id = 1,
 340    .minimum_version_id = 1,
 341    .needed = async_pf_msr_needed,
 342    .fields = (VMStateField[]) {
 343        VMSTATE_UINT64(env.async_pf_en_msr, X86CPU),
 344        VMSTATE_END_OF_LIST()
 345    }
 346};
 347
 348static const VMStateDescription vmstate_pv_eoi_msr = {
 349    .name = "cpu/async_pv_eoi_msr",
 350    .version_id = 1,
 351    .minimum_version_id = 1,
 352    .needed = pv_eoi_msr_needed,
 353    .fields = (VMStateField[]) {
 354        VMSTATE_UINT64(env.pv_eoi_en_msr, X86CPU),
 355        VMSTATE_END_OF_LIST()
 356    }
 357};
 358
 359static bool fpop_ip_dp_needed(void *opaque)
 360{
 361    X86CPU *cpu = opaque;
 362    CPUX86State *env = &cpu->env;
 363
 364    return env->fpop != 0 || env->fpip != 0 || env->fpdp != 0;
 365}
 366
 367static const VMStateDescription vmstate_fpop_ip_dp = {
 368    .name = "cpu/fpop_ip_dp",
 369    .version_id = 1,
 370    .minimum_version_id = 1,
 371    .needed = fpop_ip_dp_needed,
 372    .fields = (VMStateField[]) {
 373        VMSTATE_UINT16(env.fpop, X86CPU),
 374        VMSTATE_UINT64(env.fpip, X86CPU),
 375        VMSTATE_UINT64(env.fpdp, X86CPU),
 376        VMSTATE_END_OF_LIST()
 377    }
 378};
 379
 380static bool tsc_adjust_needed(void *opaque)
 381{
 382    X86CPU *cpu = opaque;
 383    CPUX86State *env = &cpu->env;
 384
 385    return env->tsc_adjust != 0;
 386}
 387
 388static const VMStateDescription vmstate_msr_tsc_adjust = {
 389    .name = "cpu/msr_tsc_adjust",
 390    .version_id = 1,
 391    .minimum_version_id = 1,
 392    .needed = tsc_adjust_needed,
 393    .fields = (VMStateField[]) {
 394        VMSTATE_UINT64(env.tsc_adjust, X86CPU),
 395        VMSTATE_END_OF_LIST()
 396    }
 397};
 398
 399static bool msr_smi_count_needed(void *opaque)
 400{
 401    X86CPU *cpu = opaque;
 402    CPUX86State *env = &cpu->env;
 403
 404    return cpu->migrate_smi_count && env->msr_smi_count != 0;
 405}
 406
 407static const VMStateDescription vmstate_msr_smi_count = {
 408    .name = "cpu/msr_smi_count",
 409    .version_id = 1,
 410    .minimum_version_id = 1,
 411    .needed = msr_smi_count_needed,
 412    .fields = (VMStateField[]) {
 413        VMSTATE_UINT64(env.msr_smi_count, X86CPU),
 414        VMSTATE_END_OF_LIST()
 415    }
 416};
 417
 418static bool tscdeadline_needed(void *opaque)
 419{
 420    X86CPU *cpu = opaque;
 421    CPUX86State *env = &cpu->env;
 422
 423    return env->tsc_deadline != 0;
 424}
 425
 426static const VMStateDescription vmstate_msr_tscdeadline = {
 427    .name = "cpu/msr_tscdeadline",
 428    .version_id = 1,
 429    .minimum_version_id = 1,
 430    .needed = tscdeadline_needed,
 431    .fields = (VMStateField[]) {
 432        VMSTATE_UINT64(env.tsc_deadline, X86CPU),
 433        VMSTATE_END_OF_LIST()
 434    }
 435};
 436
 437static bool misc_enable_needed(void *opaque)
 438{
 439    X86CPU *cpu = opaque;
 440    CPUX86State *env = &cpu->env;
 441
 442    return env->msr_ia32_misc_enable != MSR_IA32_MISC_ENABLE_DEFAULT;
 443}
 444
 445static bool feature_control_needed(void *opaque)
 446{
 447    X86CPU *cpu = opaque;
 448    CPUX86State *env = &cpu->env;
 449
 450    return env->msr_ia32_feature_control != 0;
 451}
 452
 453static const VMStateDescription vmstate_msr_ia32_misc_enable = {
 454    .name = "cpu/msr_ia32_misc_enable",
 455    .version_id = 1,
 456    .minimum_version_id = 1,
 457    .needed = misc_enable_needed,
 458    .fields = (VMStateField[]) {
 459        VMSTATE_UINT64(env.msr_ia32_misc_enable, X86CPU),
 460        VMSTATE_END_OF_LIST()
 461    }
 462};
 463
 464static const VMStateDescription vmstate_msr_ia32_feature_control = {
 465    .name = "cpu/msr_ia32_feature_control",
 466    .version_id = 1,
 467    .minimum_version_id = 1,
 468    .needed = feature_control_needed,
 469    .fields = (VMStateField[]) {
 470        VMSTATE_UINT64(env.msr_ia32_feature_control, X86CPU),
 471        VMSTATE_END_OF_LIST()
 472    }
 473};
 474
 475static bool pmu_enable_needed(void *opaque)
 476{
 477    X86CPU *cpu = opaque;
 478    CPUX86State *env = &cpu->env;
 479    int i;
 480
 481    if (env->msr_fixed_ctr_ctrl || env->msr_global_ctrl ||
 482        env->msr_global_status || env->msr_global_ovf_ctrl) {
 483        return true;
 484    }
 485    for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
 486        if (env->msr_fixed_counters[i]) {
 487            return true;
 488        }
 489    }
 490    for (i = 0; i < MAX_GP_COUNTERS; i++) {
 491        if (env->msr_gp_counters[i] || env->msr_gp_evtsel[i]) {
 492            return true;
 493        }
 494    }
 495
 496    return false;
 497}
 498
 499static const VMStateDescription vmstate_msr_architectural_pmu = {
 500    .name = "cpu/msr_architectural_pmu",
 501    .version_id = 1,
 502    .minimum_version_id = 1,
 503    .needed = pmu_enable_needed,
 504    .fields = (VMStateField[]) {
 505        VMSTATE_UINT64(env.msr_fixed_ctr_ctrl, X86CPU),
 506        VMSTATE_UINT64(env.msr_global_ctrl, X86CPU),
 507        VMSTATE_UINT64(env.msr_global_status, X86CPU),
 508        VMSTATE_UINT64(env.msr_global_ovf_ctrl, X86CPU),
 509        VMSTATE_UINT64_ARRAY(env.msr_fixed_counters, X86CPU, MAX_FIXED_COUNTERS),
 510        VMSTATE_UINT64_ARRAY(env.msr_gp_counters, X86CPU, MAX_GP_COUNTERS),
 511        VMSTATE_UINT64_ARRAY(env.msr_gp_evtsel, X86CPU, MAX_GP_COUNTERS),
 512        VMSTATE_END_OF_LIST()
 513    }
 514};
 515
 516static bool mpx_needed(void *opaque)
 517{
 518    X86CPU *cpu = opaque;
 519    CPUX86State *env = &cpu->env;
 520    unsigned int i;
 521
 522    for (i = 0; i < 4; i++) {
 523        if (env->bnd_regs[i].lb || env->bnd_regs[i].ub) {
 524            return true;
 525        }
 526    }
 527
 528    if (env->bndcs_regs.cfgu || env->bndcs_regs.sts) {
 529        return true;
 530    }
 531
 532    return !!env->msr_bndcfgs;
 533}
 534
 535static const VMStateDescription vmstate_mpx = {
 536    .name = "cpu/mpx",
 537    .version_id = 1,
 538    .minimum_version_id = 1,
 539    .needed = mpx_needed,
 540    .fields = (VMStateField[]) {
 541        VMSTATE_BND_REGS(env.bnd_regs, X86CPU, 4),
 542        VMSTATE_UINT64(env.bndcs_regs.cfgu, X86CPU),
 543        VMSTATE_UINT64(env.bndcs_regs.sts, X86CPU),
 544        VMSTATE_UINT64(env.msr_bndcfgs, X86CPU),
 545        VMSTATE_END_OF_LIST()
 546    }
 547};
 548
 549static bool hyperv_hypercall_enable_needed(void *opaque)
 550{
 551    X86CPU *cpu = opaque;
 552    CPUX86State *env = &cpu->env;
 553
 554    return env->msr_hv_hypercall != 0 || env->msr_hv_guest_os_id != 0;
 555}
 556
 557static const VMStateDescription vmstate_msr_hypercall_hypercall = {
 558    .name = "cpu/msr_hyperv_hypercall",
 559    .version_id = 1,
 560    .minimum_version_id = 1,
 561    .needed = hyperv_hypercall_enable_needed,
 562    .fields = (VMStateField[]) {
 563        VMSTATE_UINT64(env.msr_hv_guest_os_id, X86CPU),
 564        VMSTATE_UINT64(env.msr_hv_hypercall, X86CPU),
 565        VMSTATE_END_OF_LIST()
 566    }
 567};
 568
 569static bool hyperv_vapic_enable_needed(void *opaque)
 570{
 571    X86CPU *cpu = opaque;
 572    CPUX86State *env = &cpu->env;
 573
 574    return env->msr_hv_vapic != 0;
 575}
 576
 577static const VMStateDescription vmstate_msr_hyperv_vapic = {
 578    .name = "cpu/msr_hyperv_vapic",
 579    .version_id = 1,
 580    .minimum_version_id = 1,
 581    .needed = hyperv_vapic_enable_needed,
 582    .fields = (VMStateField[]) {
 583        VMSTATE_UINT64(env.msr_hv_vapic, X86CPU),
 584        VMSTATE_END_OF_LIST()
 585    }
 586};
 587
 588static bool hyperv_time_enable_needed(void *opaque)
 589{
 590    X86CPU *cpu = opaque;
 591    CPUX86State *env = &cpu->env;
 592
 593    return env->msr_hv_tsc != 0;
 594}
 595
 596static const VMStateDescription vmstate_msr_hyperv_time = {
 597    .name = "cpu/msr_hyperv_time",
 598    .version_id = 1,
 599    .minimum_version_id = 1,
 600    .needed = hyperv_time_enable_needed,
 601    .fields = (VMStateField[]) {
 602        VMSTATE_UINT64(env.msr_hv_tsc, X86CPU),
 603        VMSTATE_END_OF_LIST()
 604    }
 605};
 606
 607static bool hyperv_crash_enable_needed(void *opaque)
 608{
 609    X86CPU *cpu = opaque;
 610    CPUX86State *env = &cpu->env;
 611    int i;
 612
 613    for (i = 0; i < HV_CRASH_PARAMS; i++) {
 614        if (env->msr_hv_crash_params[i]) {
 615            return true;
 616        }
 617    }
 618    return false;
 619}
 620
 621static const VMStateDescription vmstate_msr_hyperv_crash = {
 622    .name = "cpu/msr_hyperv_crash",
 623    .version_id = 1,
 624    .minimum_version_id = 1,
 625    .needed = hyperv_crash_enable_needed,
 626    .fields = (VMStateField[]) {
 627        VMSTATE_UINT64_ARRAY(env.msr_hv_crash_params, X86CPU, HV_CRASH_PARAMS),
 628        VMSTATE_END_OF_LIST()
 629    }
 630};
 631
 632static bool hyperv_runtime_enable_needed(void *opaque)
 633{
 634    X86CPU *cpu = opaque;
 635    CPUX86State *env = &cpu->env;
 636
 637    if (!cpu->hyperv_runtime) {
 638        return false;
 639    }
 640
 641    return env->msr_hv_runtime != 0;
 642}
 643
 644static const VMStateDescription vmstate_msr_hyperv_runtime = {
 645    .name = "cpu/msr_hyperv_runtime",
 646    .version_id = 1,
 647    .minimum_version_id = 1,
 648    .needed = hyperv_runtime_enable_needed,
 649    .fields = (VMStateField[]) {
 650        VMSTATE_UINT64(env.msr_hv_runtime, X86CPU),
 651        VMSTATE_END_OF_LIST()
 652    }
 653};
 654
 655static bool hyperv_synic_enable_needed(void *opaque)
 656{
 657    X86CPU *cpu = opaque;
 658    CPUX86State *env = &cpu->env;
 659    int i;
 660
 661    if (env->msr_hv_synic_control != 0 ||
 662        env->msr_hv_synic_evt_page != 0 ||
 663        env->msr_hv_synic_msg_page != 0) {
 664        return true;
 665    }
 666
 667    for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) {
 668        if (env->msr_hv_synic_sint[i] != 0) {
 669            return true;
 670        }
 671    }
 672
 673    return false;
 674}
 675
 676static int hyperv_synic_post_load(void *opaque, int version_id)
 677{
 678    X86CPU *cpu = opaque;
 679    hyperv_x86_synic_update(cpu);
 680    return 0;
 681}
 682
 683static const VMStateDescription vmstate_msr_hyperv_synic = {
 684    .name = "cpu/msr_hyperv_synic",
 685    .version_id = 1,
 686    .minimum_version_id = 1,
 687    .needed = hyperv_synic_enable_needed,
 688    .post_load = hyperv_synic_post_load,
 689    .fields = (VMStateField[]) {
 690        VMSTATE_UINT64(env.msr_hv_synic_control, X86CPU),
 691        VMSTATE_UINT64(env.msr_hv_synic_evt_page, X86CPU),
 692        VMSTATE_UINT64(env.msr_hv_synic_msg_page, X86CPU),
 693        VMSTATE_UINT64_ARRAY(env.msr_hv_synic_sint, X86CPU, HV_SINT_COUNT),
 694        VMSTATE_END_OF_LIST()
 695    }
 696};
 697
 698static bool hyperv_stimer_enable_needed(void *opaque)
 699{
 700    X86CPU *cpu = opaque;
 701    CPUX86State *env = &cpu->env;
 702    int i;
 703
 704    for (i = 0; i < ARRAY_SIZE(env->msr_hv_stimer_config); i++) {
 705        if (env->msr_hv_stimer_config[i] || env->msr_hv_stimer_count[i]) {
 706            return true;
 707        }
 708    }
 709    return false;
 710}
 711
 712static const VMStateDescription vmstate_msr_hyperv_stimer = {
 713    .name = "cpu/msr_hyperv_stimer",
 714    .version_id = 1,
 715    .minimum_version_id = 1,
 716    .needed = hyperv_stimer_enable_needed,
 717    .fields = (VMStateField[]) {
 718        VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_config, X86CPU,
 719                             HV_STIMER_COUNT),
 720        VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_count, X86CPU, HV_STIMER_COUNT),
 721        VMSTATE_END_OF_LIST()
 722    }
 723};
 724
 725static bool hyperv_reenlightenment_enable_needed(void *opaque)
 726{
 727    X86CPU *cpu = opaque;
 728    CPUX86State *env = &cpu->env;
 729
 730    return env->msr_hv_reenlightenment_control != 0 ||
 731        env->msr_hv_tsc_emulation_control != 0 ||
 732        env->msr_hv_tsc_emulation_status != 0;
 733}
 734
 735static const VMStateDescription vmstate_msr_hyperv_reenlightenment = {
 736    .name = "cpu/msr_hyperv_reenlightenment",
 737    .version_id = 1,
 738    .minimum_version_id = 1,
 739    .needed = hyperv_reenlightenment_enable_needed,
 740    .fields = (VMStateField[]) {
 741        VMSTATE_UINT64(env.msr_hv_reenlightenment_control, X86CPU),
 742        VMSTATE_UINT64(env.msr_hv_tsc_emulation_control, X86CPU),
 743        VMSTATE_UINT64(env.msr_hv_tsc_emulation_status, X86CPU),
 744        VMSTATE_END_OF_LIST()
 745    }
 746};
 747
 748static bool avx512_needed(void *opaque)
 749{
 750    X86CPU *cpu = opaque;
 751    CPUX86State *env = &cpu->env;
 752    unsigned int i;
 753
 754    for (i = 0; i < NB_OPMASK_REGS; i++) {
 755        if (env->opmask_regs[i]) {
 756            return true;
 757        }
 758    }
 759
 760    for (i = 0; i < CPU_NB_REGS; i++) {
 761#define ENV_XMM(reg, field) (env->xmm_regs[reg].ZMM_Q(field))
 762        if (ENV_XMM(i, 4) || ENV_XMM(i, 6) ||
 763            ENV_XMM(i, 5) || ENV_XMM(i, 7)) {
 764            return true;
 765        }
 766#ifdef TARGET_X86_64
 767        if (ENV_XMM(i+16, 0) || ENV_XMM(i+16, 1) ||
 768            ENV_XMM(i+16, 2) || ENV_XMM(i+16, 3) ||
 769            ENV_XMM(i+16, 4) || ENV_XMM(i+16, 5) ||
 770            ENV_XMM(i+16, 6) || ENV_XMM(i+16, 7)) {
 771            return true;
 772        }
 773#endif
 774    }
 775
 776    return false;
 777}
 778
 779static const VMStateDescription vmstate_avx512 = {
 780    .name = "cpu/avx512",
 781    .version_id = 1,
 782    .minimum_version_id = 1,
 783    .needed = avx512_needed,
 784    .fields = (VMStateField[]) {
 785        VMSTATE_UINT64_ARRAY(env.opmask_regs, X86CPU, NB_OPMASK_REGS),
 786        VMSTATE_ZMMH_REGS_VARS(env.xmm_regs, X86CPU, 0),
 787#ifdef TARGET_X86_64
 788        VMSTATE_Hi16_ZMM_REGS_VARS(env.xmm_regs, X86CPU, 16),
 789#endif
 790        VMSTATE_END_OF_LIST()
 791    }
 792};
 793
 794static bool xss_needed(void *opaque)
 795{
 796    X86CPU *cpu = opaque;
 797    CPUX86State *env = &cpu->env;
 798
 799    return env->xss != 0;
 800}
 801
 802static const VMStateDescription vmstate_xss = {
 803    .name = "cpu/xss",
 804    .version_id = 1,
 805    .minimum_version_id = 1,
 806    .needed = xss_needed,
 807    .fields = (VMStateField[]) {
 808        VMSTATE_UINT64(env.xss, X86CPU),
 809        VMSTATE_END_OF_LIST()
 810    }
 811};
 812
 813#ifdef TARGET_X86_64
 814static bool pkru_needed(void *opaque)
 815{
 816    X86CPU *cpu = opaque;
 817    CPUX86State *env = &cpu->env;
 818
 819    return env->pkru != 0;
 820}
 821
 822static const VMStateDescription vmstate_pkru = {
 823    .name = "cpu/pkru",
 824    .version_id = 1,
 825    .minimum_version_id = 1,
 826    .needed = pkru_needed,
 827    .fields = (VMStateField[]){
 828        VMSTATE_UINT32(env.pkru, X86CPU),
 829        VMSTATE_END_OF_LIST()
 830    }
 831};
 832#endif
 833
 834static bool tsc_khz_needed(void *opaque)
 835{
 836    X86CPU *cpu = opaque;
 837    CPUX86State *env = &cpu->env;
 838    MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
 839    PCMachineClass *pcmc = PC_MACHINE_CLASS(mc);
 840    return env->tsc_khz && pcmc->save_tsc_khz;
 841}
 842
 843static const VMStateDescription vmstate_tsc_khz = {
 844    .name = "cpu/tsc_khz",
 845    .version_id = 1,
 846    .minimum_version_id = 1,
 847    .needed = tsc_khz_needed,
 848    .fields = (VMStateField[]) {
 849        VMSTATE_INT64(env.tsc_khz, X86CPU),
 850        VMSTATE_END_OF_LIST()
 851    }
 852};
 853
 854static bool mcg_ext_ctl_needed(void *opaque)
 855{
 856    X86CPU *cpu = opaque;
 857    CPUX86State *env = &cpu->env;
 858    return cpu->enable_lmce && env->mcg_ext_ctl;
 859}
 860
 861static const VMStateDescription vmstate_mcg_ext_ctl = {
 862    .name = "cpu/mcg_ext_ctl",
 863    .version_id = 1,
 864    .minimum_version_id = 1,
 865    .needed = mcg_ext_ctl_needed,
 866    .fields = (VMStateField[]) {
 867        VMSTATE_UINT64(env.mcg_ext_ctl, X86CPU),
 868        VMSTATE_END_OF_LIST()
 869    }
 870};
 871
 872static bool spec_ctrl_needed(void *opaque)
 873{
 874    X86CPU *cpu = opaque;
 875    CPUX86State *env = &cpu->env;
 876
 877    return env->spec_ctrl != 0;
 878}
 879
 880static const VMStateDescription vmstate_spec_ctrl = {
 881    .name = "cpu/spec_ctrl",
 882    .version_id = 1,
 883    .minimum_version_id = 1,
 884    .needed = spec_ctrl_needed,
 885    .fields = (VMStateField[]){
 886        VMSTATE_UINT64(env.spec_ctrl, X86CPU),
 887        VMSTATE_END_OF_LIST()
 888    }
 889};
 890
 891static bool intel_pt_enable_needed(void *opaque)
 892{
 893    X86CPU *cpu = opaque;
 894    CPUX86State *env = &cpu->env;
 895    int i;
 896
 897    if (env->msr_rtit_ctrl || env->msr_rtit_status ||
 898        env->msr_rtit_output_base || env->msr_rtit_output_mask ||
 899        env->msr_rtit_cr3_match) {
 900        return true;
 901    }
 902
 903    for (i = 0; i < MAX_RTIT_ADDRS; i++) {
 904        if (env->msr_rtit_addrs[i]) {
 905            return true;
 906        }
 907    }
 908
 909    return false;
 910}
 911
 912static const VMStateDescription vmstate_msr_intel_pt = {
 913    .name = "cpu/intel_pt",
 914    .version_id = 1,
 915    .minimum_version_id = 1,
 916    .needed = intel_pt_enable_needed,
 917    .fields = (VMStateField[]) {
 918        VMSTATE_UINT64(env.msr_rtit_ctrl, X86CPU),
 919        VMSTATE_UINT64(env.msr_rtit_status, X86CPU),
 920        VMSTATE_UINT64(env.msr_rtit_output_base, X86CPU),
 921        VMSTATE_UINT64(env.msr_rtit_output_mask, X86CPU),
 922        VMSTATE_UINT64(env.msr_rtit_cr3_match, X86CPU),
 923        VMSTATE_UINT64_ARRAY(env.msr_rtit_addrs, X86CPU, MAX_RTIT_ADDRS),
 924        VMSTATE_END_OF_LIST()
 925    }
 926};
 927
 928static bool virt_ssbd_needed(void *opaque)
 929{
 930    X86CPU *cpu = opaque;
 931    CPUX86State *env = &cpu->env;
 932
 933    return env->virt_ssbd != 0;
 934}
 935
 936static const VMStateDescription vmstate_msr_virt_ssbd = {
 937    .name = "cpu/virt_ssbd",
 938    .version_id = 1,
 939    .minimum_version_id = 1,
 940    .needed = virt_ssbd_needed,
 941    .fields = (VMStateField[]){
 942        VMSTATE_UINT64(env.virt_ssbd, X86CPU),
 943        VMSTATE_END_OF_LIST()
 944    }
 945};
 946
 947static bool svm_npt_needed(void *opaque)
 948{
 949    X86CPU *cpu = opaque;
 950    CPUX86State *env = &cpu->env;
 951
 952    return !!(env->hflags2 & HF2_NPT_MASK);
 953}
 954
 955static const VMStateDescription vmstate_svm_npt = {
 956    .name = "cpu/svn_npt",
 957    .version_id = 1,
 958    .minimum_version_id = 1,
 959    .needed = svm_npt_needed,
 960    .fields = (VMStateField[]){
 961        VMSTATE_UINT64(env.nested_cr3, X86CPU),
 962        VMSTATE_UINT32(env.nested_pg_mode, X86CPU),
 963        VMSTATE_END_OF_LIST()
 964    }
 965};
 966
 967VMStateDescription vmstate_x86_cpu = {
 968    .name = "cpu",
 969    .version_id = 12,
 970    .minimum_version_id = 11,
 971    .pre_save = cpu_pre_save,
 972    .post_load = cpu_post_load,
 973    .fields = (VMStateField[]) {
 974        VMSTATE_UINTTL_ARRAY(env.regs, X86CPU, CPU_NB_REGS),
 975        VMSTATE_UINTTL(env.eip, X86CPU),
 976        VMSTATE_UINTTL(env.eflags, X86CPU),
 977        VMSTATE_UINT32(env.hflags, X86CPU),
 978        /* FPU */
 979        VMSTATE_UINT16(env.fpuc, X86CPU),
 980        VMSTATE_UINT16(env.fpus_vmstate, X86CPU),
 981        VMSTATE_UINT16(env.fptag_vmstate, X86CPU),
 982        VMSTATE_UINT16(env.fpregs_format_vmstate, X86CPU),
 983
 984        VMSTATE_STRUCT_ARRAY(env.fpregs, X86CPU, 8, 0, vmstate_fpreg, FPReg),
 985
 986        VMSTATE_SEGMENT_ARRAY(env.segs, X86CPU, 6),
 987        VMSTATE_SEGMENT(env.ldt, X86CPU),
 988        VMSTATE_SEGMENT(env.tr, X86CPU),
 989        VMSTATE_SEGMENT(env.gdt, X86CPU),
 990        VMSTATE_SEGMENT(env.idt, X86CPU),
 991
 992        VMSTATE_UINT32(env.sysenter_cs, X86CPU),
 993        VMSTATE_UINTTL(env.sysenter_esp, X86CPU),
 994        VMSTATE_UINTTL(env.sysenter_eip, X86CPU),
 995
 996        VMSTATE_UINTTL(env.cr[0], X86CPU),
 997        VMSTATE_UINTTL(env.cr[2], X86CPU),
 998        VMSTATE_UINTTL(env.cr[3], X86CPU),
 999        VMSTATE_UINTTL(env.cr[4], X86CPU),
1000        VMSTATE_UINTTL_ARRAY(env.dr, X86CPU, 8),
1001        /* MMU */
1002        VMSTATE_INT32(env.a20_mask, X86CPU),
1003        /* XMM */
1004        VMSTATE_UINT32(env.mxcsr, X86CPU),
1005        VMSTATE_XMM_REGS(env.xmm_regs, X86CPU, 0),
1006
1007#ifdef TARGET_X86_64
1008        VMSTATE_UINT64(env.efer, X86CPU),
1009        VMSTATE_UINT64(env.star, X86CPU),
1010        VMSTATE_UINT64(env.lstar, X86CPU),
1011        VMSTATE_UINT64(env.cstar, X86CPU),
1012        VMSTATE_UINT64(env.fmask, X86CPU),
1013        VMSTATE_UINT64(env.kernelgsbase, X86CPU),
1014#endif
1015        VMSTATE_UINT32(env.smbase, X86CPU),
1016
1017        VMSTATE_UINT64(env.pat, X86CPU),
1018        VMSTATE_UINT32(env.hflags2, X86CPU),
1019
1020        VMSTATE_UINT64(env.vm_hsave, X86CPU),
1021        VMSTATE_UINT64(env.vm_vmcb, X86CPU),
1022        VMSTATE_UINT64(env.tsc_offset, X86CPU),
1023        VMSTATE_UINT64(env.intercept, X86CPU),
1024        VMSTATE_UINT16(env.intercept_cr_read, X86CPU),
1025        VMSTATE_UINT16(env.intercept_cr_write, X86CPU),
1026        VMSTATE_UINT16(env.intercept_dr_read, X86CPU),
1027        VMSTATE_UINT16(env.intercept_dr_write, X86CPU),
1028        VMSTATE_UINT32(env.intercept_exceptions, X86CPU),
1029        VMSTATE_UINT8(env.v_tpr, X86CPU),
1030        /* MTRRs */
1031        VMSTATE_UINT64_ARRAY(env.mtrr_fixed, X86CPU, 11),
1032        VMSTATE_UINT64(env.mtrr_deftype, X86CPU),
1033        VMSTATE_MTRR_VARS(env.mtrr_var, X86CPU, MSR_MTRRcap_VCNT, 8),
1034        /* KVM-related states */
1035        VMSTATE_INT32(env.interrupt_injected, X86CPU),
1036        VMSTATE_UINT32(env.mp_state, X86CPU),
1037        VMSTATE_UINT64(env.tsc, X86CPU),
1038        VMSTATE_INT32(env.exception_injected, X86CPU),
1039        VMSTATE_UINT8(env.soft_interrupt, X86CPU),
1040        VMSTATE_UINT8(env.nmi_injected, X86CPU),
1041        VMSTATE_UINT8(env.nmi_pending, X86CPU),
1042        VMSTATE_UINT8(env.has_error_code, X86CPU),
1043        VMSTATE_UINT32(env.sipi_vector, X86CPU),
1044        /* MCE */
1045        VMSTATE_UINT64(env.mcg_cap, X86CPU),
1046        VMSTATE_UINT64(env.mcg_status, X86CPU),
1047        VMSTATE_UINT64(env.mcg_ctl, X86CPU),
1048        VMSTATE_UINT64_ARRAY(env.mce_banks, X86CPU, MCE_BANKS_DEF * 4),
1049        /* rdtscp */
1050        VMSTATE_UINT64(env.tsc_aux, X86CPU),
1051        /* KVM pvclock msr */
1052        VMSTATE_UINT64(env.system_time_msr, X86CPU),
1053        VMSTATE_UINT64(env.wall_clock_msr, X86CPU),
1054        /* XSAVE related fields */
1055        VMSTATE_UINT64_V(env.xcr0, X86CPU, 12),
1056        VMSTATE_UINT64_V(env.xstate_bv, X86CPU, 12),
1057        VMSTATE_YMMH_REGS_VARS(env.xmm_regs, X86CPU, 0, 12),
1058        VMSTATE_END_OF_LIST()
1059        /* The above list is not sorted /wrt version numbers, watch out! */
1060    },
1061    .subsections = (const VMStateDescription*[]) {
1062        &vmstate_async_pf_msr,
1063        &vmstate_pv_eoi_msr,
1064        &vmstate_steal_time_msr,
1065        &vmstate_fpop_ip_dp,
1066        &vmstate_msr_tsc_adjust,
1067        &vmstate_msr_tscdeadline,
1068        &vmstate_msr_ia32_misc_enable,
1069        &vmstate_msr_ia32_feature_control,
1070        &vmstate_msr_architectural_pmu,
1071        &vmstate_mpx,
1072        &vmstate_msr_hypercall_hypercall,
1073        &vmstate_msr_hyperv_vapic,
1074        &vmstate_msr_hyperv_time,
1075        &vmstate_msr_hyperv_crash,
1076        &vmstate_msr_hyperv_runtime,
1077        &vmstate_msr_hyperv_synic,
1078        &vmstate_msr_hyperv_stimer,
1079        &vmstate_msr_hyperv_reenlightenment,
1080        &vmstate_avx512,
1081        &vmstate_xss,
1082        &vmstate_tsc_khz,
1083        &vmstate_msr_smi_count,
1084#ifdef TARGET_X86_64
1085        &vmstate_pkru,
1086#endif
1087        &vmstate_spec_ctrl,
1088        &vmstate_mcg_ext_ctl,
1089        &vmstate_msr_intel_pt,
1090        &vmstate_msr_virt_ssbd,
1091        &vmstate_svm_npt,
1092        NULL
1093    }
1094};
1095