qemu/target/i386/machine.c
<<
>>
Prefs
   1#include "qemu/osdep.h"
   2#include "qemu-common.h"
   3#include "cpu.h"
   4#include "exec/exec-all.h"
   5#include "hw/hw.h"
   6#include "hw/boards.h"
   7#include "hw/i386/pc.h"
   8#include "hw/isa/isa.h"
   9#include "migration/cpu.h"
  10
  11#include "sysemu/kvm.h"
  12
  13#include "qemu/error-report.h"
  14
  15static const VMStateDescription vmstate_segment = {
  16    .name = "segment",
  17    .version_id = 1,
  18    .minimum_version_id = 1,
  19    .fields = (VMStateField[]) {
  20        VMSTATE_UINT32(selector, SegmentCache),
  21        VMSTATE_UINTTL(base, SegmentCache),
  22        VMSTATE_UINT32(limit, SegmentCache),
  23        VMSTATE_UINT32(flags, SegmentCache),
  24        VMSTATE_END_OF_LIST()
  25    }
  26};
  27
  28#define VMSTATE_SEGMENT(_field, _state) {                            \
  29    .name       = (stringify(_field)),                               \
  30    .size       = sizeof(SegmentCache),                              \
  31    .vmsd       = &vmstate_segment,                                  \
  32    .flags      = VMS_STRUCT,                                        \
  33    .offset     = offsetof(_state, _field)                           \
  34            + type_check(SegmentCache,typeof_field(_state, _field))  \
  35}
  36
  37#define VMSTATE_SEGMENT_ARRAY(_field, _state, _n)                    \
  38    VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_segment, SegmentCache)
  39
  40static const VMStateDescription vmstate_xmm_reg = {
  41    .name = "xmm_reg",
  42    .version_id = 1,
  43    .minimum_version_id = 1,
  44    .fields = (VMStateField[]) {
  45        VMSTATE_UINT64(ZMM_Q(0), ZMMReg),
  46        VMSTATE_UINT64(ZMM_Q(1), ZMMReg),
  47        VMSTATE_END_OF_LIST()
  48    }
  49};
  50
  51#define VMSTATE_XMM_REGS(_field, _state, _start)                         \
  52    VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0,     \
  53                             vmstate_xmm_reg, ZMMReg)
  54
  55/* YMMH format is the same as XMM, but for bits 128-255 */
  56static const VMStateDescription vmstate_ymmh_reg = {
  57    .name = "ymmh_reg",
  58    .version_id = 1,
  59    .minimum_version_id = 1,
  60    .fields = (VMStateField[]) {
  61        VMSTATE_UINT64(ZMM_Q(2), ZMMReg),
  62        VMSTATE_UINT64(ZMM_Q(3), ZMMReg),
  63        VMSTATE_END_OF_LIST()
  64    }
  65};
  66
  67#define VMSTATE_YMMH_REGS_VARS(_field, _state, _start, _v)               \
  68    VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, _v,    \
  69                             vmstate_ymmh_reg, ZMMReg)
  70
  71static const VMStateDescription vmstate_zmmh_reg = {
  72    .name = "zmmh_reg",
  73    .version_id = 1,
  74    .minimum_version_id = 1,
  75    .fields = (VMStateField[]) {
  76        VMSTATE_UINT64(ZMM_Q(4), ZMMReg),
  77        VMSTATE_UINT64(ZMM_Q(5), ZMMReg),
  78        VMSTATE_UINT64(ZMM_Q(6), ZMMReg),
  79        VMSTATE_UINT64(ZMM_Q(7), ZMMReg),
  80        VMSTATE_END_OF_LIST()
  81    }
  82};
  83
  84#define VMSTATE_ZMMH_REGS_VARS(_field, _state, _start)                   \
  85    VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0,     \
  86                             vmstate_zmmh_reg, ZMMReg)
  87
  88#ifdef TARGET_X86_64
  89static const VMStateDescription vmstate_hi16_zmm_reg = {
  90    .name = "hi16_zmm_reg",
  91    .version_id = 1,
  92    .minimum_version_id = 1,
  93    .fields = (VMStateField[]) {
  94        VMSTATE_UINT64(ZMM_Q(0), ZMMReg),
  95        VMSTATE_UINT64(ZMM_Q(1), ZMMReg),
  96        VMSTATE_UINT64(ZMM_Q(2), ZMMReg),
  97        VMSTATE_UINT64(ZMM_Q(3), ZMMReg),
  98        VMSTATE_UINT64(ZMM_Q(4), ZMMReg),
  99        VMSTATE_UINT64(ZMM_Q(5), ZMMReg),
 100        VMSTATE_UINT64(ZMM_Q(6), ZMMReg),
 101        VMSTATE_UINT64(ZMM_Q(7), ZMMReg),
 102        VMSTATE_END_OF_LIST()
 103    }
 104};
 105
 106#define VMSTATE_Hi16_ZMM_REGS_VARS(_field, _state, _start)               \
 107    VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0,     \
 108                             vmstate_hi16_zmm_reg, ZMMReg)
 109#endif
 110
 111static const VMStateDescription vmstate_bnd_regs = {
 112    .name = "bnd_regs",
 113    .version_id = 1,
 114    .minimum_version_id = 1,
 115    .fields = (VMStateField[]) {
 116        VMSTATE_UINT64(lb, BNDReg),
 117        VMSTATE_UINT64(ub, BNDReg),
 118        VMSTATE_END_OF_LIST()
 119    }
 120};
 121
 122#define VMSTATE_BND_REGS(_field, _state, _n)          \
 123    VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_bnd_regs, BNDReg)
 124
 125static const VMStateDescription vmstate_mtrr_var = {
 126    .name = "mtrr_var",
 127    .version_id = 1,
 128    .minimum_version_id = 1,
 129    .fields = (VMStateField[]) {
 130        VMSTATE_UINT64(base, MTRRVar),
 131        VMSTATE_UINT64(mask, MTRRVar),
 132        VMSTATE_END_OF_LIST()
 133    }
 134};
 135
 136#define VMSTATE_MTRR_VARS(_field, _state, _n, _v)                    \
 137    VMSTATE_STRUCT_ARRAY(_field, _state, _n, _v, vmstate_mtrr_var, MTRRVar)
 138
 139typedef struct x86_FPReg_tmp {
 140    FPReg *parent;
 141    uint64_t tmp_mant;
 142    uint16_t tmp_exp;
 143} x86_FPReg_tmp;
 144
 145static void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f)
 146{
 147    CPU_LDoubleU temp;
 148
 149    temp.d = f;
 150    *pmant = temp.l.lower;
 151    *pexp = temp.l.upper;
 152}
 153
 154static floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper)
 155{
 156    CPU_LDoubleU temp;
 157
 158    temp.l.upper = upper;
 159    temp.l.lower = mant;
 160    return temp.d;
 161}
 162
 163static int fpreg_pre_save(void *opaque)
 164{
 165    x86_FPReg_tmp *tmp = opaque;
 166
 167    /* we save the real CPU data (in case of MMX usage only 'mant'
 168       contains the MMX register */
 169    cpu_get_fp80(&tmp->tmp_mant, &tmp->tmp_exp, tmp->parent->d);
 170
 171    return 0;
 172}
 173
 174static int fpreg_post_load(void *opaque, int version)
 175{
 176    x86_FPReg_tmp *tmp = opaque;
 177
 178    tmp->parent->d = cpu_set_fp80(tmp->tmp_mant, tmp->tmp_exp);
 179    return 0;
 180}
 181
 182static const VMStateDescription vmstate_fpreg_tmp = {
 183    .name = "fpreg_tmp",
 184    .post_load = fpreg_post_load,
 185    .pre_save  = fpreg_pre_save,
 186    .fields = (VMStateField[]) {
 187        VMSTATE_UINT64(tmp_mant, x86_FPReg_tmp),
 188        VMSTATE_UINT16(tmp_exp, x86_FPReg_tmp),
 189        VMSTATE_END_OF_LIST()
 190    }
 191};
 192
 193static const VMStateDescription vmstate_fpreg = {
 194    .name = "fpreg",
 195    .fields = (VMStateField[]) {
 196        VMSTATE_WITH_TMP(FPReg, x86_FPReg_tmp, vmstate_fpreg_tmp),
 197        VMSTATE_END_OF_LIST()
 198    }
 199};
 200
 201static int cpu_pre_save(void *opaque)
 202{
 203    X86CPU *cpu = opaque;
 204    CPUX86State *env = &cpu->env;
 205    int i;
 206
 207    /* FPU */
 208    env->fpus_vmstate = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
 209    env->fptag_vmstate = 0;
 210    for(i = 0; i < 8; i++) {
 211        env->fptag_vmstate |= ((!env->fptags[i]) << i);
 212    }
 213
 214    env->fpregs_format_vmstate = 0;
 215
 216    /*
 217     * Real mode guest segments register DPL should be zero.
 218     * Older KVM version were setting it wrongly.
 219     * Fixing it will allow live migration to host with unrestricted guest
 220     * support (otherwise the migration will fail with invalid guest state
 221     * error).
 222     */
 223    if (!(env->cr[0] & CR0_PE_MASK) &&
 224        (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) {
 225        env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK);
 226        env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK);
 227        env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK);
 228        env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK);
 229        env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK);
 230        env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK);
 231    }
 232
 233    return 0;
 234}
 235
 236static int cpu_post_load(void *opaque, int version_id)
 237{
 238    X86CPU *cpu = opaque;
 239    CPUState *cs = CPU(cpu);
 240    CPUX86State *env = &cpu->env;
 241    int i;
 242
 243    if (env->tsc_khz && env->user_tsc_khz &&
 244        env->tsc_khz != env->user_tsc_khz) {
 245        error_report("Mismatch between user-specified TSC frequency and "
 246                     "migrated TSC frequency");
 247        return -EINVAL;
 248    }
 249
 250    if (env->fpregs_format_vmstate) {
 251        error_report("Unsupported old non-softfloat CPU state");
 252        return -EINVAL;
 253    }
 254    /*
 255     * Real mode guest segments register DPL should be zero.
 256     * Older KVM version were setting it wrongly.
 257     * Fixing it will allow live migration from such host that don't have
 258     * restricted guest support to a host with unrestricted guest support
 259     * (otherwise the migration will fail with invalid guest state
 260     * error).
 261     */
 262    if (!(env->cr[0] & CR0_PE_MASK) &&
 263        (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) {
 264        env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK);
 265        env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK);
 266        env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK);
 267        env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK);
 268        env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK);
 269        env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK);
 270    }
 271
 272    /* Older versions of QEMU incorrectly used CS.DPL as the CPL when
 273     * running under KVM.  This is wrong for conforming code segments.
 274     * Luckily, in our implementation the CPL field of hflags is redundant
 275     * and we can get the right value from the SS descriptor privilege level.
 276     */
 277    env->hflags &= ~HF_CPL_MASK;
 278    env->hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
 279
 280    env->fpstt = (env->fpus_vmstate >> 11) & 7;
 281    env->fpus = env->fpus_vmstate & ~0x3800;
 282    env->fptag_vmstate ^= 0xff;
 283    for(i = 0; i < 8; i++) {
 284        env->fptags[i] = (env->fptag_vmstate >> i) & 1;
 285    }
 286    if (tcg_enabled()) {
 287        target_ulong dr7;
 288        update_fp_status(env);
 289        update_mxcsr_status(env);
 290
 291        cpu_breakpoint_remove_all(cs, BP_CPU);
 292        cpu_watchpoint_remove_all(cs, BP_CPU);
 293
 294        /* Indicate all breakpoints disabled, as they are, then
 295           let the helper re-enable them.  */
 296        dr7 = env->dr[7];
 297        env->dr[7] = dr7 & ~(DR7_GLOBAL_BP_MASK | DR7_LOCAL_BP_MASK);
 298        cpu_x86_update_dr7(env, dr7);
 299    }
 300    tlb_flush(cs);
 301    return 0;
 302}
 303
 304static bool async_pf_msr_needed(void *opaque)
 305{
 306    X86CPU *cpu = opaque;
 307
 308    return cpu->env.async_pf_en_msr != 0;
 309}
 310
 311static bool pv_eoi_msr_needed(void *opaque)
 312{
 313    X86CPU *cpu = opaque;
 314
 315    return cpu->env.pv_eoi_en_msr != 0;
 316}
 317
 318static bool steal_time_msr_needed(void *opaque)
 319{
 320    X86CPU *cpu = opaque;
 321
 322    return cpu->env.steal_time_msr != 0;
 323}
 324
 325static const VMStateDescription vmstate_steal_time_msr = {
 326    .name = "cpu/steal_time_msr",
 327    .version_id = 1,
 328    .minimum_version_id = 1,
 329    .needed = steal_time_msr_needed,
 330    .fields = (VMStateField[]) {
 331        VMSTATE_UINT64(env.steal_time_msr, X86CPU),
 332        VMSTATE_END_OF_LIST()
 333    }
 334};
 335
 336static const VMStateDescription vmstate_async_pf_msr = {
 337    .name = "cpu/async_pf_msr",
 338    .version_id = 1,
 339    .minimum_version_id = 1,
 340    .needed = async_pf_msr_needed,
 341    .fields = (VMStateField[]) {
 342        VMSTATE_UINT64(env.async_pf_en_msr, X86CPU),
 343        VMSTATE_END_OF_LIST()
 344    }
 345};
 346
 347static const VMStateDescription vmstate_pv_eoi_msr = {
 348    .name = "cpu/async_pv_eoi_msr",
 349    .version_id = 1,
 350    .minimum_version_id = 1,
 351    .needed = pv_eoi_msr_needed,
 352    .fields = (VMStateField[]) {
 353        VMSTATE_UINT64(env.pv_eoi_en_msr, X86CPU),
 354        VMSTATE_END_OF_LIST()
 355    }
 356};
 357
 358static bool fpop_ip_dp_needed(void *opaque)
 359{
 360    X86CPU *cpu = opaque;
 361    CPUX86State *env = &cpu->env;
 362
 363    return env->fpop != 0 || env->fpip != 0 || env->fpdp != 0;
 364}
 365
 366static const VMStateDescription vmstate_fpop_ip_dp = {
 367    .name = "cpu/fpop_ip_dp",
 368    .version_id = 1,
 369    .minimum_version_id = 1,
 370    .needed = fpop_ip_dp_needed,
 371    .fields = (VMStateField[]) {
 372        VMSTATE_UINT16(env.fpop, X86CPU),
 373        VMSTATE_UINT64(env.fpip, X86CPU),
 374        VMSTATE_UINT64(env.fpdp, X86CPU),
 375        VMSTATE_END_OF_LIST()
 376    }
 377};
 378
 379static bool tsc_adjust_needed(void *opaque)
 380{
 381    X86CPU *cpu = opaque;
 382    CPUX86State *env = &cpu->env;
 383
 384    return env->tsc_adjust != 0;
 385}
 386
 387static const VMStateDescription vmstate_msr_tsc_adjust = {
 388    .name = "cpu/msr_tsc_adjust",
 389    .version_id = 1,
 390    .minimum_version_id = 1,
 391    .needed = tsc_adjust_needed,
 392    .fields = (VMStateField[]) {
 393        VMSTATE_UINT64(env.tsc_adjust, X86CPU),
 394        VMSTATE_END_OF_LIST()
 395    }
 396};
 397
 398static bool msr_smi_count_needed(void *opaque)
 399{
 400    X86CPU *cpu = opaque;
 401    CPUX86State *env = &cpu->env;
 402
 403    return cpu->migrate_smi_count && env->msr_smi_count != 0;
 404}
 405
 406static const VMStateDescription vmstate_msr_smi_count = {
 407    .name = "cpu/msr_smi_count",
 408    .version_id = 1,
 409    .minimum_version_id = 1,
 410    .needed = msr_smi_count_needed,
 411    .fields = (VMStateField[]) {
 412        VMSTATE_UINT64(env.msr_smi_count, X86CPU),
 413        VMSTATE_END_OF_LIST()
 414    }
 415};
 416
 417static bool tscdeadline_needed(void *opaque)
 418{
 419    X86CPU *cpu = opaque;
 420    CPUX86State *env = &cpu->env;
 421
 422    return env->tsc_deadline != 0;
 423}
 424
 425static const VMStateDescription vmstate_msr_tscdeadline = {
 426    .name = "cpu/msr_tscdeadline",
 427    .version_id = 1,
 428    .minimum_version_id = 1,
 429    .needed = tscdeadline_needed,
 430    .fields = (VMStateField[]) {
 431        VMSTATE_UINT64(env.tsc_deadline, X86CPU),
 432        VMSTATE_END_OF_LIST()
 433    }
 434};
 435
 436static bool misc_enable_needed(void *opaque)
 437{
 438    X86CPU *cpu = opaque;
 439    CPUX86State *env = &cpu->env;
 440
 441    return env->msr_ia32_misc_enable != MSR_IA32_MISC_ENABLE_DEFAULT;
 442}
 443
 444static bool feature_control_needed(void *opaque)
 445{
 446    X86CPU *cpu = opaque;
 447    CPUX86State *env = &cpu->env;
 448
 449    return env->msr_ia32_feature_control != 0;
 450}
 451
 452static const VMStateDescription vmstate_msr_ia32_misc_enable = {
 453    .name = "cpu/msr_ia32_misc_enable",
 454    .version_id = 1,
 455    .minimum_version_id = 1,
 456    .needed = misc_enable_needed,
 457    .fields = (VMStateField[]) {
 458        VMSTATE_UINT64(env.msr_ia32_misc_enable, X86CPU),
 459        VMSTATE_END_OF_LIST()
 460    }
 461};
 462
 463static const VMStateDescription vmstate_msr_ia32_feature_control = {
 464    .name = "cpu/msr_ia32_feature_control",
 465    .version_id = 1,
 466    .minimum_version_id = 1,
 467    .needed = feature_control_needed,
 468    .fields = (VMStateField[]) {
 469        VMSTATE_UINT64(env.msr_ia32_feature_control, X86CPU),
 470        VMSTATE_END_OF_LIST()
 471    }
 472};
 473
 474static bool pmu_enable_needed(void *opaque)
 475{
 476    X86CPU *cpu = opaque;
 477    CPUX86State *env = &cpu->env;
 478    int i;
 479
 480    if (env->msr_fixed_ctr_ctrl || env->msr_global_ctrl ||
 481        env->msr_global_status || env->msr_global_ovf_ctrl) {
 482        return true;
 483    }
 484    for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
 485        if (env->msr_fixed_counters[i]) {
 486            return true;
 487        }
 488    }
 489    for (i = 0; i < MAX_GP_COUNTERS; i++) {
 490        if (env->msr_gp_counters[i] || env->msr_gp_evtsel[i]) {
 491            return true;
 492        }
 493    }
 494
 495    return false;
 496}
 497
 498static const VMStateDescription vmstate_msr_architectural_pmu = {
 499    .name = "cpu/msr_architectural_pmu",
 500    .version_id = 1,
 501    .minimum_version_id = 1,
 502    .needed = pmu_enable_needed,
 503    .fields = (VMStateField[]) {
 504        VMSTATE_UINT64(env.msr_fixed_ctr_ctrl, X86CPU),
 505        VMSTATE_UINT64(env.msr_global_ctrl, X86CPU),
 506        VMSTATE_UINT64(env.msr_global_status, X86CPU),
 507        VMSTATE_UINT64(env.msr_global_ovf_ctrl, X86CPU),
 508        VMSTATE_UINT64_ARRAY(env.msr_fixed_counters, X86CPU, MAX_FIXED_COUNTERS),
 509        VMSTATE_UINT64_ARRAY(env.msr_gp_counters, X86CPU, MAX_GP_COUNTERS),
 510        VMSTATE_UINT64_ARRAY(env.msr_gp_evtsel, X86CPU, MAX_GP_COUNTERS),
 511        VMSTATE_END_OF_LIST()
 512    }
 513};
 514
 515static bool mpx_needed(void *opaque)
 516{
 517    X86CPU *cpu = opaque;
 518    CPUX86State *env = &cpu->env;
 519    unsigned int i;
 520
 521    for (i = 0; i < 4; i++) {
 522        if (env->bnd_regs[i].lb || env->bnd_regs[i].ub) {
 523            return true;
 524        }
 525    }
 526
 527    if (env->bndcs_regs.cfgu || env->bndcs_regs.sts) {
 528        return true;
 529    }
 530
 531    return !!env->msr_bndcfgs;
 532}
 533
 534static const VMStateDescription vmstate_mpx = {
 535    .name = "cpu/mpx",
 536    .version_id = 1,
 537    .minimum_version_id = 1,
 538    .needed = mpx_needed,
 539    .fields = (VMStateField[]) {
 540        VMSTATE_BND_REGS(env.bnd_regs, X86CPU, 4),
 541        VMSTATE_UINT64(env.bndcs_regs.cfgu, X86CPU),
 542        VMSTATE_UINT64(env.bndcs_regs.sts, X86CPU),
 543        VMSTATE_UINT64(env.msr_bndcfgs, X86CPU),
 544        VMSTATE_END_OF_LIST()
 545    }
 546};
 547
 548static bool hyperv_hypercall_enable_needed(void *opaque)
 549{
 550    X86CPU *cpu = opaque;
 551    CPUX86State *env = &cpu->env;
 552
 553    return env->msr_hv_hypercall != 0 || env->msr_hv_guest_os_id != 0;
 554}
 555
 556static const VMStateDescription vmstate_msr_hypercall_hypercall = {
 557    .name = "cpu/msr_hyperv_hypercall",
 558    .version_id = 1,
 559    .minimum_version_id = 1,
 560    .needed = hyperv_hypercall_enable_needed,
 561    .fields = (VMStateField[]) {
 562        VMSTATE_UINT64(env.msr_hv_guest_os_id, X86CPU),
 563        VMSTATE_UINT64(env.msr_hv_hypercall, X86CPU),
 564        VMSTATE_END_OF_LIST()
 565    }
 566};
 567
 568static bool hyperv_vapic_enable_needed(void *opaque)
 569{
 570    X86CPU *cpu = opaque;
 571    CPUX86State *env = &cpu->env;
 572
 573    return env->msr_hv_vapic != 0;
 574}
 575
 576static const VMStateDescription vmstate_msr_hyperv_vapic = {
 577    .name = "cpu/msr_hyperv_vapic",
 578    .version_id = 1,
 579    .minimum_version_id = 1,
 580    .needed = hyperv_vapic_enable_needed,
 581    .fields = (VMStateField[]) {
 582        VMSTATE_UINT64(env.msr_hv_vapic, X86CPU),
 583        VMSTATE_END_OF_LIST()
 584    }
 585};
 586
 587static bool hyperv_time_enable_needed(void *opaque)
 588{
 589    X86CPU *cpu = opaque;
 590    CPUX86State *env = &cpu->env;
 591
 592    return env->msr_hv_tsc != 0;
 593}
 594
 595static const VMStateDescription vmstate_msr_hyperv_time = {
 596    .name = "cpu/msr_hyperv_time",
 597    .version_id = 1,
 598    .minimum_version_id = 1,
 599    .needed = hyperv_time_enable_needed,
 600    .fields = (VMStateField[]) {
 601        VMSTATE_UINT64(env.msr_hv_tsc, X86CPU),
 602        VMSTATE_END_OF_LIST()
 603    }
 604};
 605
 606static bool hyperv_crash_enable_needed(void *opaque)
 607{
 608    X86CPU *cpu = opaque;
 609    CPUX86State *env = &cpu->env;
 610    int i;
 611
 612    for (i = 0; i < HV_CRASH_PARAMS; i++) {
 613        if (env->msr_hv_crash_params[i]) {
 614            return true;
 615        }
 616    }
 617    return false;
 618}
 619
 620static const VMStateDescription vmstate_msr_hyperv_crash = {
 621    .name = "cpu/msr_hyperv_crash",
 622    .version_id = 1,
 623    .minimum_version_id = 1,
 624    .needed = hyperv_crash_enable_needed,
 625    .fields = (VMStateField[]) {
 626        VMSTATE_UINT64_ARRAY(env.msr_hv_crash_params, X86CPU, HV_CRASH_PARAMS),
 627        VMSTATE_END_OF_LIST()
 628    }
 629};
 630
 631static bool hyperv_runtime_enable_needed(void *opaque)
 632{
 633    X86CPU *cpu = opaque;
 634    CPUX86State *env = &cpu->env;
 635
 636    if (!cpu->hyperv_runtime) {
 637        return false;
 638    }
 639
 640    return env->msr_hv_runtime != 0;
 641}
 642
 643static const VMStateDescription vmstate_msr_hyperv_runtime = {
 644    .name = "cpu/msr_hyperv_runtime",
 645    .version_id = 1,
 646    .minimum_version_id = 1,
 647    .needed = hyperv_runtime_enable_needed,
 648    .fields = (VMStateField[]) {
 649        VMSTATE_UINT64(env.msr_hv_runtime, X86CPU),
 650        VMSTATE_END_OF_LIST()
 651    }
 652};
 653
 654static bool hyperv_synic_enable_needed(void *opaque)
 655{
 656    X86CPU *cpu = opaque;
 657    CPUX86State *env = &cpu->env;
 658    int i;
 659
 660    if (env->msr_hv_synic_control != 0 ||
 661        env->msr_hv_synic_evt_page != 0 ||
 662        env->msr_hv_synic_msg_page != 0) {
 663        return true;
 664    }
 665
 666    for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) {
 667        if (env->msr_hv_synic_sint[i] != 0) {
 668            return true;
 669        }
 670    }
 671
 672    return false;
 673}
 674
 675static const VMStateDescription vmstate_msr_hyperv_synic = {
 676    .name = "cpu/msr_hyperv_synic",
 677    .version_id = 1,
 678    .minimum_version_id = 1,
 679    .needed = hyperv_synic_enable_needed,
 680    .fields = (VMStateField[]) {
 681        VMSTATE_UINT64(env.msr_hv_synic_control, X86CPU),
 682        VMSTATE_UINT64(env.msr_hv_synic_evt_page, X86CPU),
 683        VMSTATE_UINT64(env.msr_hv_synic_msg_page, X86CPU),
 684        VMSTATE_UINT64_ARRAY(env.msr_hv_synic_sint, X86CPU, HV_SINT_COUNT),
 685        VMSTATE_END_OF_LIST()
 686    }
 687};
 688
 689static bool hyperv_stimer_enable_needed(void *opaque)
 690{
 691    X86CPU *cpu = opaque;
 692    CPUX86State *env = &cpu->env;
 693    int i;
 694
 695    for (i = 0; i < ARRAY_SIZE(env->msr_hv_stimer_config); i++) {
 696        if (env->msr_hv_stimer_config[i] || env->msr_hv_stimer_count[i]) {
 697            return true;
 698        }
 699    }
 700    return false;
 701}
 702
 703static const VMStateDescription vmstate_msr_hyperv_stimer = {
 704    .name = "cpu/msr_hyperv_stimer",
 705    .version_id = 1,
 706    .minimum_version_id = 1,
 707    .needed = hyperv_stimer_enable_needed,
 708    .fields = (VMStateField[]) {
 709        VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_config, X86CPU,
 710                             HV_STIMER_COUNT),
 711        VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_count, X86CPU, HV_STIMER_COUNT),
 712        VMSTATE_END_OF_LIST()
 713    }
 714};
 715
 716static bool hyperv_reenlightenment_enable_needed(void *opaque)
 717{
 718    X86CPU *cpu = opaque;
 719    CPUX86State *env = &cpu->env;
 720
 721    return env->msr_hv_reenlightenment_control != 0 ||
 722        env->msr_hv_tsc_emulation_control != 0 ||
 723        env->msr_hv_tsc_emulation_status != 0;
 724}
 725
 726static const VMStateDescription vmstate_msr_hyperv_reenlightenment = {
 727    .name = "cpu/msr_hyperv_reenlightenment",
 728    .version_id = 1,
 729    .minimum_version_id = 1,
 730    .needed = hyperv_reenlightenment_enable_needed,
 731    .fields = (VMStateField[]) {
 732        VMSTATE_UINT64(env.msr_hv_reenlightenment_control, X86CPU),
 733        VMSTATE_UINT64(env.msr_hv_tsc_emulation_control, X86CPU),
 734        VMSTATE_UINT64(env.msr_hv_tsc_emulation_status, X86CPU),
 735        VMSTATE_END_OF_LIST()
 736    }
 737};
 738
 739static bool avx512_needed(void *opaque)
 740{
 741    X86CPU *cpu = opaque;
 742    CPUX86State *env = &cpu->env;
 743    unsigned int i;
 744
 745    for (i = 0; i < NB_OPMASK_REGS; i++) {
 746        if (env->opmask_regs[i]) {
 747            return true;
 748        }
 749    }
 750
 751    for (i = 0; i < CPU_NB_REGS; i++) {
 752#define ENV_XMM(reg, field) (env->xmm_regs[reg].ZMM_Q(field))
 753        if (ENV_XMM(i, 4) || ENV_XMM(i, 6) ||
 754            ENV_XMM(i, 5) || ENV_XMM(i, 7)) {
 755            return true;
 756        }
 757#ifdef TARGET_X86_64
 758        if (ENV_XMM(i+16, 0) || ENV_XMM(i+16, 1) ||
 759            ENV_XMM(i+16, 2) || ENV_XMM(i+16, 3) ||
 760            ENV_XMM(i+16, 4) || ENV_XMM(i+16, 5) ||
 761            ENV_XMM(i+16, 6) || ENV_XMM(i+16, 7)) {
 762            return true;
 763        }
 764#endif
 765    }
 766
 767    return false;
 768}
 769
 770static const VMStateDescription vmstate_avx512 = {
 771    .name = "cpu/avx512",
 772    .version_id = 1,
 773    .minimum_version_id = 1,
 774    .needed = avx512_needed,
 775    .fields = (VMStateField[]) {
 776        VMSTATE_UINT64_ARRAY(env.opmask_regs, X86CPU, NB_OPMASK_REGS),
 777        VMSTATE_ZMMH_REGS_VARS(env.xmm_regs, X86CPU, 0),
 778#ifdef TARGET_X86_64
 779        VMSTATE_Hi16_ZMM_REGS_VARS(env.xmm_regs, X86CPU, 16),
 780#endif
 781        VMSTATE_END_OF_LIST()
 782    }
 783};
 784
 785static bool xss_needed(void *opaque)
 786{
 787    X86CPU *cpu = opaque;
 788    CPUX86State *env = &cpu->env;
 789
 790    return env->xss != 0;
 791}
 792
 793static const VMStateDescription vmstate_xss = {
 794    .name = "cpu/xss",
 795    .version_id = 1,
 796    .minimum_version_id = 1,
 797    .needed = xss_needed,
 798    .fields = (VMStateField[]) {
 799        VMSTATE_UINT64(env.xss, X86CPU),
 800        VMSTATE_END_OF_LIST()
 801    }
 802};
 803
 804#ifdef TARGET_X86_64
 805static bool pkru_needed(void *opaque)
 806{
 807    X86CPU *cpu = opaque;
 808    CPUX86State *env = &cpu->env;
 809
 810    return env->pkru != 0;
 811}
 812
 813static const VMStateDescription vmstate_pkru = {
 814    .name = "cpu/pkru",
 815    .version_id = 1,
 816    .minimum_version_id = 1,
 817    .needed = pkru_needed,
 818    .fields = (VMStateField[]){
 819        VMSTATE_UINT32(env.pkru, X86CPU),
 820        VMSTATE_END_OF_LIST()
 821    }
 822};
 823#endif
 824
 825static bool tsc_khz_needed(void *opaque)
 826{
 827    X86CPU *cpu = opaque;
 828    CPUX86State *env = &cpu->env;
 829    MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
 830    PCMachineClass *pcmc = PC_MACHINE_CLASS(mc);
 831    return env->tsc_khz && pcmc->save_tsc_khz;
 832}
 833
 834static const VMStateDescription vmstate_tsc_khz = {
 835    .name = "cpu/tsc_khz",
 836    .version_id = 1,
 837    .minimum_version_id = 1,
 838    .needed = tsc_khz_needed,
 839    .fields = (VMStateField[]) {
 840        VMSTATE_INT64(env.tsc_khz, X86CPU),
 841        VMSTATE_END_OF_LIST()
 842    }
 843};
 844
 845static bool mcg_ext_ctl_needed(void *opaque)
 846{
 847    X86CPU *cpu = opaque;
 848    CPUX86State *env = &cpu->env;
 849    return cpu->enable_lmce && env->mcg_ext_ctl;
 850}
 851
 852static const VMStateDescription vmstate_mcg_ext_ctl = {
 853    .name = "cpu/mcg_ext_ctl",
 854    .version_id = 1,
 855    .minimum_version_id = 1,
 856    .needed = mcg_ext_ctl_needed,
 857    .fields = (VMStateField[]) {
 858        VMSTATE_UINT64(env.mcg_ext_ctl, X86CPU),
 859        VMSTATE_END_OF_LIST()
 860    }
 861};
 862
 863static bool spec_ctrl_needed(void *opaque)
 864{
 865    X86CPU *cpu = opaque;
 866    CPUX86State *env = &cpu->env;
 867
 868    return env->spec_ctrl != 0;
 869}
 870
 871static const VMStateDescription vmstate_spec_ctrl = {
 872    .name = "cpu/spec_ctrl",
 873    .version_id = 1,
 874    .minimum_version_id = 1,
 875    .needed = spec_ctrl_needed,
 876    .fields = (VMStateField[]){
 877        VMSTATE_UINT64(env.spec_ctrl, X86CPU),
 878        VMSTATE_END_OF_LIST()
 879    }
 880};
 881
 882static bool intel_pt_enable_needed(void *opaque)
 883{
 884    X86CPU *cpu = opaque;
 885    CPUX86State *env = &cpu->env;
 886    int i;
 887
 888    if (env->msr_rtit_ctrl || env->msr_rtit_status ||
 889        env->msr_rtit_output_base || env->msr_rtit_output_mask ||
 890        env->msr_rtit_cr3_match) {
 891        return true;
 892    }
 893
 894    for (i = 0; i < MAX_RTIT_ADDRS; i++) {
 895        if (env->msr_rtit_addrs[i]) {
 896            return true;
 897        }
 898    }
 899
 900    return false;
 901}
 902
 903static const VMStateDescription vmstate_msr_intel_pt = {
 904    .name = "cpu/intel_pt",
 905    .version_id = 1,
 906    .minimum_version_id = 1,
 907    .needed = intel_pt_enable_needed,
 908    .fields = (VMStateField[]) {
 909        VMSTATE_UINT64(env.msr_rtit_ctrl, X86CPU),
 910        VMSTATE_UINT64(env.msr_rtit_status, X86CPU),
 911        VMSTATE_UINT64(env.msr_rtit_output_base, X86CPU),
 912        VMSTATE_UINT64(env.msr_rtit_output_mask, X86CPU),
 913        VMSTATE_UINT64(env.msr_rtit_cr3_match, X86CPU),
 914        VMSTATE_UINT64_ARRAY(env.msr_rtit_addrs, X86CPU, MAX_RTIT_ADDRS),
 915        VMSTATE_END_OF_LIST()
 916    }
 917};
 918
 919static bool virt_ssbd_needed(void *opaque)
 920{
 921    X86CPU *cpu = opaque;
 922    CPUX86State *env = &cpu->env;
 923
 924    return env->virt_ssbd != 0;
 925}
 926
 927static const VMStateDescription vmstate_msr_virt_ssbd = {
 928    .name = "cpu/virt_ssbd",
 929    .version_id = 1,
 930    .minimum_version_id = 1,
 931    .needed = virt_ssbd_needed,
 932    .fields = (VMStateField[]){
 933        VMSTATE_UINT64(env.virt_ssbd, X86CPU),
 934        VMSTATE_END_OF_LIST()
 935    }
 936};
 937
 938static bool svm_npt_needed(void *opaque)
 939{
 940    X86CPU *cpu = opaque;
 941    CPUX86State *env = &cpu->env;
 942
 943    return !!(env->hflags2 & HF2_NPT_MASK);
 944}
 945
 946static const VMStateDescription vmstate_svm_npt = {
 947    .name = "cpu/svn_npt",
 948    .version_id = 1,
 949    .minimum_version_id = 1,
 950    .needed = svm_npt_needed,
 951    .fields = (VMStateField[]){
 952        VMSTATE_UINT64(env.nested_cr3, X86CPU),
 953        VMSTATE_UINT32(env.nested_pg_mode, X86CPU),
 954        VMSTATE_END_OF_LIST()
 955    }
 956};
 957
 958VMStateDescription vmstate_x86_cpu = {
 959    .name = "cpu",
 960    .version_id = 12,
 961    .minimum_version_id = 11,
 962    .pre_save = cpu_pre_save,
 963    .post_load = cpu_post_load,
 964    .fields = (VMStateField[]) {
 965        VMSTATE_UINTTL_ARRAY(env.regs, X86CPU, CPU_NB_REGS),
 966        VMSTATE_UINTTL(env.eip, X86CPU),
 967        VMSTATE_UINTTL(env.eflags, X86CPU),
 968        VMSTATE_UINT32(env.hflags, X86CPU),
 969        /* FPU */
 970        VMSTATE_UINT16(env.fpuc, X86CPU),
 971        VMSTATE_UINT16(env.fpus_vmstate, X86CPU),
 972        VMSTATE_UINT16(env.fptag_vmstate, X86CPU),
 973        VMSTATE_UINT16(env.fpregs_format_vmstate, X86CPU),
 974
 975        VMSTATE_STRUCT_ARRAY(env.fpregs, X86CPU, 8, 0, vmstate_fpreg, FPReg),
 976
 977        VMSTATE_SEGMENT_ARRAY(env.segs, X86CPU, 6),
 978        VMSTATE_SEGMENT(env.ldt, X86CPU),
 979        VMSTATE_SEGMENT(env.tr, X86CPU),
 980        VMSTATE_SEGMENT(env.gdt, X86CPU),
 981        VMSTATE_SEGMENT(env.idt, X86CPU),
 982
 983        VMSTATE_UINT32(env.sysenter_cs, X86CPU),
 984        VMSTATE_UINTTL(env.sysenter_esp, X86CPU),
 985        VMSTATE_UINTTL(env.sysenter_eip, X86CPU),
 986
 987        VMSTATE_UINTTL(env.cr[0], X86CPU),
 988        VMSTATE_UINTTL(env.cr[2], X86CPU),
 989        VMSTATE_UINTTL(env.cr[3], X86CPU),
 990        VMSTATE_UINTTL(env.cr[4], X86CPU),
 991        VMSTATE_UINTTL_ARRAY(env.dr, X86CPU, 8),
 992        /* MMU */
 993        VMSTATE_INT32(env.a20_mask, X86CPU),
 994        /* XMM */
 995        VMSTATE_UINT32(env.mxcsr, X86CPU),
 996        VMSTATE_XMM_REGS(env.xmm_regs, X86CPU, 0),
 997
 998#ifdef TARGET_X86_64
 999        VMSTATE_UINT64(env.efer, X86CPU),
1000        VMSTATE_UINT64(env.star, X86CPU),
1001        VMSTATE_UINT64(env.lstar, X86CPU),
1002        VMSTATE_UINT64(env.cstar, X86CPU),
1003        VMSTATE_UINT64(env.fmask, X86CPU),
1004        VMSTATE_UINT64(env.kernelgsbase, X86CPU),
1005#endif
1006        VMSTATE_UINT32(env.smbase, X86CPU),
1007
1008        VMSTATE_UINT64(env.pat, X86CPU),
1009        VMSTATE_UINT32(env.hflags2, X86CPU),
1010
1011        VMSTATE_UINT64(env.vm_hsave, X86CPU),
1012        VMSTATE_UINT64(env.vm_vmcb, X86CPU),
1013        VMSTATE_UINT64(env.tsc_offset, X86CPU),
1014        VMSTATE_UINT64(env.intercept, X86CPU),
1015        VMSTATE_UINT16(env.intercept_cr_read, X86CPU),
1016        VMSTATE_UINT16(env.intercept_cr_write, X86CPU),
1017        VMSTATE_UINT16(env.intercept_dr_read, X86CPU),
1018        VMSTATE_UINT16(env.intercept_dr_write, X86CPU),
1019        VMSTATE_UINT32(env.intercept_exceptions, X86CPU),
1020        VMSTATE_UINT8(env.v_tpr, X86CPU),
1021        /* MTRRs */
1022        VMSTATE_UINT64_ARRAY(env.mtrr_fixed, X86CPU, 11),
1023        VMSTATE_UINT64(env.mtrr_deftype, X86CPU),
1024        VMSTATE_MTRR_VARS(env.mtrr_var, X86CPU, MSR_MTRRcap_VCNT, 8),
1025        /* KVM-related states */
1026        VMSTATE_INT32(env.interrupt_injected, X86CPU),
1027        VMSTATE_UINT32(env.mp_state, X86CPU),
1028        VMSTATE_UINT64(env.tsc, X86CPU),
1029        VMSTATE_INT32(env.exception_injected, X86CPU),
1030        VMSTATE_UINT8(env.soft_interrupt, X86CPU),
1031        VMSTATE_UINT8(env.nmi_injected, X86CPU),
1032        VMSTATE_UINT8(env.nmi_pending, X86CPU),
1033        VMSTATE_UINT8(env.has_error_code, X86CPU),
1034        VMSTATE_UINT32(env.sipi_vector, X86CPU),
1035        /* MCE */
1036        VMSTATE_UINT64(env.mcg_cap, X86CPU),
1037        VMSTATE_UINT64(env.mcg_status, X86CPU),
1038        VMSTATE_UINT64(env.mcg_ctl, X86CPU),
1039        VMSTATE_UINT64_ARRAY(env.mce_banks, X86CPU, MCE_BANKS_DEF * 4),
1040        /* rdtscp */
1041        VMSTATE_UINT64(env.tsc_aux, X86CPU),
1042        /* KVM pvclock msr */
1043        VMSTATE_UINT64(env.system_time_msr, X86CPU),
1044        VMSTATE_UINT64(env.wall_clock_msr, X86CPU),
1045        /* XSAVE related fields */
1046        VMSTATE_UINT64_V(env.xcr0, X86CPU, 12),
1047        VMSTATE_UINT64_V(env.xstate_bv, X86CPU, 12),
1048        VMSTATE_YMMH_REGS_VARS(env.xmm_regs, X86CPU, 0, 12),
1049        VMSTATE_END_OF_LIST()
1050        /* The above list is not sorted /wrt version numbers, watch out! */
1051    },
1052    .subsections = (const VMStateDescription*[]) {
1053        &vmstate_async_pf_msr,
1054        &vmstate_pv_eoi_msr,
1055        &vmstate_steal_time_msr,
1056        &vmstate_fpop_ip_dp,
1057        &vmstate_msr_tsc_adjust,
1058        &vmstate_msr_tscdeadline,
1059        &vmstate_msr_ia32_misc_enable,
1060        &vmstate_msr_ia32_feature_control,
1061        &vmstate_msr_architectural_pmu,
1062        &vmstate_mpx,
1063        &vmstate_msr_hypercall_hypercall,
1064        &vmstate_msr_hyperv_vapic,
1065        &vmstate_msr_hyperv_time,
1066        &vmstate_msr_hyperv_crash,
1067        &vmstate_msr_hyperv_runtime,
1068        &vmstate_msr_hyperv_synic,
1069        &vmstate_msr_hyperv_stimer,
1070        &vmstate_msr_hyperv_reenlightenment,
1071        &vmstate_avx512,
1072        &vmstate_xss,
1073        &vmstate_tsc_khz,
1074        &vmstate_msr_smi_count,
1075#ifdef TARGET_X86_64
1076        &vmstate_pkru,
1077#endif
1078        &vmstate_spec_ctrl,
1079        &vmstate_mcg_ext_ctl,
1080        &vmstate_msr_intel_pt,
1081        &vmstate_msr_virt_ssbd,
1082        &vmstate_svm_npt,
1083        NULL
1084    }
1085};
1086