qemu/target-i386/machine.c
<<
>>
Prefs
   1#include "qemu/osdep.h"
   2#include "qemu-common.h"
   3#include "cpu.h"
   4#include "exec/exec-all.h"
   5#include "hw/hw.h"
   6#include "hw/boards.h"
   7#include "hw/i386/pc.h"
   8#include "hw/isa/isa.h"
   9#include "migration/cpu.h"
  10#include "exec/exec-all.h"
  11
  12#include "cpu.h"
  13#include "exec/exec-all.h"
  14#include "sysemu/kvm.h"
  15
  16#include "qemu/error-report.h"
  17
  18static const VMStateDescription vmstate_segment = {
  19    .name = "segment",
  20    .version_id = 1,
  21    .minimum_version_id = 1,
  22    .fields = (VMStateField[]) {
  23        VMSTATE_UINT32(selector, SegmentCache),
  24        VMSTATE_UINTTL(base, SegmentCache),
  25        VMSTATE_UINT32(limit, SegmentCache),
  26        VMSTATE_UINT32(flags, SegmentCache),
  27        VMSTATE_END_OF_LIST()
  28    }
  29};
  30
  31#define VMSTATE_SEGMENT(_field, _state) {                            \
  32    .name       = (stringify(_field)),                               \
  33    .size       = sizeof(SegmentCache),                              \
  34    .vmsd       = &vmstate_segment,                                  \
  35    .flags      = VMS_STRUCT,                                        \
  36    .offset     = offsetof(_state, _field)                           \
  37            + type_check(SegmentCache,typeof_field(_state, _field))  \
  38}
  39
  40#define VMSTATE_SEGMENT_ARRAY(_field, _state, _n)                    \
  41    VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_segment, SegmentCache)
  42
  43static const VMStateDescription vmstate_xmm_reg = {
  44    .name = "xmm_reg",
  45    .version_id = 1,
  46    .minimum_version_id = 1,
  47    .fields = (VMStateField[]) {
  48        VMSTATE_UINT64(ZMM_Q(0), ZMMReg),
  49        VMSTATE_UINT64(ZMM_Q(1), ZMMReg),
  50        VMSTATE_END_OF_LIST()
  51    }
  52};
  53
  54#define VMSTATE_XMM_REGS(_field, _state, _start)                         \
  55    VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0,     \
  56                             vmstate_xmm_reg, ZMMReg)
  57
  58/* YMMH format is the same as XMM, but for bits 128-255 */
  59static const VMStateDescription vmstate_ymmh_reg = {
  60    .name = "ymmh_reg",
  61    .version_id = 1,
  62    .minimum_version_id = 1,
  63    .fields = (VMStateField[]) {
  64        VMSTATE_UINT64(ZMM_Q(2), ZMMReg),
  65        VMSTATE_UINT64(ZMM_Q(3), ZMMReg),
  66        VMSTATE_END_OF_LIST()
  67    }
  68};
  69
  70#define VMSTATE_YMMH_REGS_VARS(_field, _state, _start, _v)               \
  71    VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, _v,    \
  72                             vmstate_ymmh_reg, ZMMReg)
  73
  74static const VMStateDescription vmstate_zmmh_reg = {
  75    .name = "zmmh_reg",
  76    .version_id = 1,
  77    .minimum_version_id = 1,
  78    .fields = (VMStateField[]) {
  79        VMSTATE_UINT64(ZMM_Q(4), ZMMReg),
  80        VMSTATE_UINT64(ZMM_Q(5), ZMMReg),
  81        VMSTATE_UINT64(ZMM_Q(6), ZMMReg),
  82        VMSTATE_UINT64(ZMM_Q(7), ZMMReg),
  83        VMSTATE_END_OF_LIST()
  84    }
  85};
  86
  87#define VMSTATE_ZMMH_REGS_VARS(_field, _state, _start)                   \
  88    VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0,     \
  89                             vmstate_zmmh_reg, ZMMReg)
  90
  91#ifdef TARGET_X86_64
  92static const VMStateDescription vmstate_hi16_zmm_reg = {
  93    .name = "hi16_zmm_reg",
  94    .version_id = 1,
  95    .minimum_version_id = 1,
  96    .fields = (VMStateField[]) {
  97        VMSTATE_UINT64(ZMM_Q(0), ZMMReg),
  98        VMSTATE_UINT64(ZMM_Q(1), ZMMReg),
  99        VMSTATE_UINT64(ZMM_Q(2), ZMMReg),
 100        VMSTATE_UINT64(ZMM_Q(3), ZMMReg),
 101        VMSTATE_UINT64(ZMM_Q(4), ZMMReg),
 102        VMSTATE_UINT64(ZMM_Q(5), ZMMReg),
 103        VMSTATE_UINT64(ZMM_Q(6), ZMMReg),
 104        VMSTATE_UINT64(ZMM_Q(7), ZMMReg),
 105        VMSTATE_END_OF_LIST()
 106    }
 107};
 108
 109#define VMSTATE_Hi16_ZMM_REGS_VARS(_field, _state, _start)               \
 110    VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0,     \
 111                             vmstate_hi16_zmm_reg, ZMMReg)
 112#endif
 113
 114static const VMStateDescription vmstate_bnd_regs = {
 115    .name = "bnd_regs",
 116    .version_id = 1,
 117    .minimum_version_id = 1,
 118    .fields = (VMStateField[]) {
 119        VMSTATE_UINT64(lb, BNDReg),
 120        VMSTATE_UINT64(ub, BNDReg),
 121        VMSTATE_END_OF_LIST()
 122    }
 123};
 124
 125#define VMSTATE_BND_REGS(_field, _state, _n)          \
 126    VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_bnd_regs, BNDReg)
 127
 128static const VMStateDescription vmstate_mtrr_var = {
 129    .name = "mtrr_var",
 130    .version_id = 1,
 131    .minimum_version_id = 1,
 132    .fields = (VMStateField[]) {
 133        VMSTATE_UINT64(base, MTRRVar),
 134        VMSTATE_UINT64(mask, MTRRVar),
 135        VMSTATE_END_OF_LIST()
 136    }
 137};
 138
 139#define VMSTATE_MTRR_VARS(_field, _state, _n, _v)                    \
 140    VMSTATE_STRUCT_ARRAY(_field, _state, _n, _v, vmstate_mtrr_var, MTRRVar)
 141
 142static void put_fpreg_error(QEMUFile *f, void *opaque, size_t size)
 143{
 144    fprintf(stderr, "call put_fpreg() with invalid arguments\n");
 145    exit(0);
 146}
 147
 148/* XXX: add that in a FPU generic layer */
 149union x86_longdouble {
 150    uint64_t mant;
 151    uint16_t exp;
 152};
 153
 154#define MANTD1(fp)      (fp & ((1LL << 52) - 1))
 155#define EXPBIAS1 1023
 156#define EXPD1(fp)       ((fp >> 52) & 0x7FF)
 157#define SIGND1(fp)      ((fp >> 32) & 0x80000000)
 158
 159static void fp64_to_fp80(union x86_longdouble *p, uint64_t temp)
 160{
 161    int e;
 162    /* mantissa */
 163    p->mant = (MANTD1(temp) << 11) | (1LL << 63);
 164    /* exponent + sign */
 165    e = EXPD1(temp) - EXPBIAS1 + 16383;
 166    e |= SIGND1(temp) >> 16;
 167    p->exp = e;
 168}
 169
 170static int get_fpreg(QEMUFile *f, void *opaque, size_t size)
 171{
 172    FPReg *fp_reg = opaque;
 173    uint64_t mant;
 174    uint16_t exp;
 175
 176    qemu_get_be64s(f, &mant);
 177    qemu_get_be16s(f, &exp);
 178    fp_reg->d = cpu_set_fp80(mant, exp);
 179    return 0;
 180}
 181
 182static void put_fpreg(QEMUFile *f, void *opaque, size_t size)
 183{
 184    FPReg *fp_reg = opaque;
 185    uint64_t mant;
 186    uint16_t exp;
 187    /* we save the real CPU data (in case of MMX usage only 'mant'
 188       contains the MMX register */
 189    cpu_get_fp80(&mant, &exp, fp_reg->d);
 190    qemu_put_be64s(f, &mant);
 191    qemu_put_be16s(f, &exp);
 192}
 193
 194static const VMStateInfo vmstate_fpreg = {
 195    .name = "fpreg",
 196    .get  = get_fpreg,
 197    .put  = put_fpreg,
 198};
 199
 200static int get_fpreg_1_mmx(QEMUFile *f, void *opaque, size_t size)
 201{
 202    union x86_longdouble *p = opaque;
 203    uint64_t mant;
 204
 205    qemu_get_be64s(f, &mant);
 206    p->mant = mant;
 207    p->exp = 0xffff;
 208    return 0;
 209}
 210
 211static const VMStateInfo vmstate_fpreg_1_mmx = {
 212    .name = "fpreg_1_mmx",
 213    .get  = get_fpreg_1_mmx,
 214    .put  = put_fpreg_error,
 215};
 216
 217static int get_fpreg_1_no_mmx(QEMUFile *f, void *opaque, size_t size)
 218{
 219    union x86_longdouble *p = opaque;
 220    uint64_t mant;
 221
 222    qemu_get_be64s(f, &mant);
 223    fp64_to_fp80(p, mant);
 224    return 0;
 225}
 226
 227static const VMStateInfo vmstate_fpreg_1_no_mmx = {
 228    .name = "fpreg_1_no_mmx",
 229    .get  = get_fpreg_1_no_mmx,
 230    .put  = put_fpreg_error,
 231};
 232
 233static bool fpregs_is_0(void *opaque, int version_id)
 234{
 235    X86CPU *cpu = opaque;
 236    CPUX86State *env = &cpu->env;
 237
 238    return (env->fpregs_format_vmstate == 0);
 239}
 240
 241static bool fpregs_is_1_mmx(void *opaque, int version_id)
 242{
 243    X86CPU *cpu = opaque;
 244    CPUX86State *env = &cpu->env;
 245    int guess_mmx;
 246
 247    guess_mmx = ((env->fptag_vmstate == 0xff) &&
 248                 (env->fpus_vmstate & 0x3800) == 0);
 249    return (guess_mmx && (env->fpregs_format_vmstate == 1));
 250}
 251
 252static bool fpregs_is_1_no_mmx(void *opaque, int version_id)
 253{
 254    X86CPU *cpu = opaque;
 255    CPUX86State *env = &cpu->env;
 256    int guess_mmx;
 257
 258    guess_mmx = ((env->fptag_vmstate == 0xff) &&
 259                 (env->fpus_vmstate & 0x3800) == 0);
 260    return (!guess_mmx && (env->fpregs_format_vmstate == 1));
 261}
 262
 263#define VMSTATE_FP_REGS(_field, _state, _n)                               \
 264    VMSTATE_ARRAY_TEST(_field, _state, _n, fpregs_is_0, vmstate_fpreg, FPReg), \
 265    VMSTATE_ARRAY_TEST(_field, _state, _n, fpregs_is_1_mmx, vmstate_fpreg_1_mmx, FPReg), \
 266    VMSTATE_ARRAY_TEST(_field, _state, _n, fpregs_is_1_no_mmx, vmstate_fpreg_1_no_mmx, FPReg)
 267
 268static bool version_is_5(void *opaque, int version_id)
 269{
 270    return version_id == 5;
 271}
 272
 273#ifdef TARGET_X86_64
 274static bool less_than_7(void *opaque, int version_id)
 275{
 276    return version_id < 7;
 277}
 278
 279static int get_uint64_as_uint32(QEMUFile *f, void *pv, size_t size)
 280{
 281    uint64_t *v = pv;
 282    *v = qemu_get_be32(f);
 283    return 0;
 284}
 285
 286static void put_uint64_as_uint32(QEMUFile *f, void *pv, size_t size)
 287{
 288    uint64_t *v = pv;
 289    qemu_put_be32(f, *v);
 290}
 291
 292static const VMStateInfo vmstate_hack_uint64_as_uint32 = {
 293    .name = "uint64_as_uint32",
 294    .get  = get_uint64_as_uint32,
 295    .put  = put_uint64_as_uint32,
 296};
 297
 298#define VMSTATE_HACK_UINT32(_f, _s, _t)                                  \
 299    VMSTATE_SINGLE_TEST(_f, _s, _t, 0, vmstate_hack_uint64_as_uint32, uint64_t)
 300#endif
 301
 302static void cpu_pre_save(void *opaque)
 303{
 304    X86CPU *cpu = opaque;
 305    CPUX86State *env = &cpu->env;
 306    int i;
 307
 308    /* FPU */
 309    env->fpus_vmstate = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
 310    env->fptag_vmstate = 0;
 311    for(i = 0; i < 8; i++) {
 312        env->fptag_vmstate |= ((!env->fptags[i]) << i);
 313    }
 314
 315    env->fpregs_format_vmstate = 0;
 316
 317    /*
 318     * Real mode guest segments register DPL should be zero.
 319     * Older KVM version were setting it wrongly.
 320     * Fixing it will allow live migration to host with unrestricted guest
 321     * support (otherwise the migration will fail with invalid guest state
 322     * error).
 323     */
 324    if (!(env->cr[0] & CR0_PE_MASK) &&
 325        (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) {
 326        env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK);
 327        env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK);
 328        env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK);
 329        env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK);
 330        env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK);
 331        env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK);
 332    }
 333
 334}
 335
 336static int cpu_post_load(void *opaque, int version_id)
 337{
 338    X86CPU *cpu = opaque;
 339    CPUState *cs = CPU(cpu);
 340    CPUX86State *env = &cpu->env;
 341    int i;
 342
 343    if (env->tsc_khz && env->user_tsc_khz &&
 344        env->tsc_khz != env->user_tsc_khz) {
 345        error_report("Mismatch between user-specified TSC frequency and "
 346                     "migrated TSC frequency");
 347        return -EINVAL;
 348    }
 349
 350    /*
 351     * Real mode guest segments register DPL should be zero.
 352     * Older KVM version were setting it wrongly.
 353     * Fixing it will allow live migration from such host that don't have
 354     * restricted guest support to a host with unrestricted guest support
 355     * (otherwise the migration will fail with invalid guest state
 356     * error).
 357     */
 358    if (!(env->cr[0] & CR0_PE_MASK) &&
 359        (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) {
 360        env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK);
 361        env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK);
 362        env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK);
 363        env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK);
 364        env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK);
 365        env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK);
 366    }
 367
 368    /* Older versions of QEMU incorrectly used CS.DPL as the CPL when
 369     * running under KVM.  This is wrong for conforming code segments.
 370     * Luckily, in our implementation the CPL field of hflags is redundant
 371     * and we can get the right value from the SS descriptor privilege level.
 372     */
 373    env->hflags &= ~HF_CPL_MASK;
 374    env->hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
 375
 376    env->fpstt = (env->fpus_vmstate >> 11) & 7;
 377    env->fpus = env->fpus_vmstate & ~0x3800;
 378    env->fptag_vmstate ^= 0xff;
 379    for(i = 0; i < 8; i++) {
 380        env->fptags[i] = (env->fptag_vmstate >> i) & 1;
 381    }
 382    update_fp_status(env);
 383
 384    cpu_breakpoint_remove_all(cs, BP_CPU);
 385    cpu_watchpoint_remove_all(cs, BP_CPU);
 386    {
 387        /* Indicate all breakpoints disabled, as they are, then
 388           let the helper re-enable them.  */
 389        target_ulong dr7 = env->dr[7];
 390        env->dr[7] = dr7 & ~(DR7_GLOBAL_BP_MASK | DR7_LOCAL_BP_MASK);
 391        cpu_x86_update_dr7(env, dr7);
 392    }
 393    tlb_flush(cs, 1);
 394
 395    if (tcg_enabled()) {
 396        cpu_smm_update(cpu);
 397    }
 398    return 0;
 399}
 400
 401static bool async_pf_msr_needed(void *opaque)
 402{
 403    X86CPU *cpu = opaque;
 404
 405    return cpu->env.async_pf_en_msr != 0;
 406}
 407
 408static bool pv_eoi_msr_needed(void *opaque)
 409{
 410    X86CPU *cpu = opaque;
 411
 412    return cpu->env.pv_eoi_en_msr != 0;
 413}
 414
 415static bool steal_time_msr_needed(void *opaque)
 416{
 417    X86CPU *cpu = opaque;
 418
 419    return cpu->env.steal_time_msr != 0;
 420}
 421
 422static const VMStateDescription vmstate_steal_time_msr = {
 423    .name = "cpu/steal_time_msr",
 424    .version_id = 1,
 425    .minimum_version_id = 1,
 426    .needed = steal_time_msr_needed,
 427    .fields = (VMStateField[]) {
 428        VMSTATE_UINT64(env.steal_time_msr, X86CPU),
 429        VMSTATE_END_OF_LIST()
 430    }
 431};
 432
 433static const VMStateDescription vmstate_async_pf_msr = {
 434    .name = "cpu/async_pf_msr",
 435    .version_id = 1,
 436    .minimum_version_id = 1,
 437    .needed = async_pf_msr_needed,
 438    .fields = (VMStateField[]) {
 439        VMSTATE_UINT64(env.async_pf_en_msr, X86CPU),
 440        VMSTATE_END_OF_LIST()
 441    }
 442};
 443
 444static const VMStateDescription vmstate_pv_eoi_msr = {
 445    .name = "cpu/async_pv_eoi_msr",
 446    .version_id = 1,
 447    .minimum_version_id = 1,
 448    .needed = pv_eoi_msr_needed,
 449    .fields = (VMStateField[]) {
 450        VMSTATE_UINT64(env.pv_eoi_en_msr, X86CPU),
 451        VMSTATE_END_OF_LIST()
 452    }
 453};
 454
 455static bool fpop_ip_dp_needed(void *opaque)
 456{
 457    X86CPU *cpu = opaque;
 458    CPUX86State *env = &cpu->env;
 459
 460    return env->fpop != 0 || env->fpip != 0 || env->fpdp != 0;
 461}
 462
 463static const VMStateDescription vmstate_fpop_ip_dp = {
 464    .name = "cpu/fpop_ip_dp",
 465    .version_id = 1,
 466    .minimum_version_id = 1,
 467    .needed = fpop_ip_dp_needed,
 468    .fields = (VMStateField[]) {
 469        VMSTATE_UINT16(env.fpop, X86CPU),
 470        VMSTATE_UINT64(env.fpip, X86CPU),
 471        VMSTATE_UINT64(env.fpdp, X86CPU),
 472        VMSTATE_END_OF_LIST()
 473    }
 474};
 475
 476static bool tsc_adjust_needed(void *opaque)
 477{
 478    X86CPU *cpu = opaque;
 479    CPUX86State *env = &cpu->env;
 480
 481    return env->tsc_adjust != 0;
 482}
 483
 484static const VMStateDescription vmstate_msr_tsc_adjust = {
 485    .name = "cpu/msr_tsc_adjust",
 486    .version_id = 1,
 487    .minimum_version_id = 1,
 488    .needed = tsc_adjust_needed,
 489    .fields = (VMStateField[]) {
 490        VMSTATE_UINT64(env.tsc_adjust, X86CPU),
 491        VMSTATE_END_OF_LIST()
 492    }
 493};
 494
 495static bool tscdeadline_needed(void *opaque)
 496{
 497    X86CPU *cpu = opaque;
 498    CPUX86State *env = &cpu->env;
 499
 500    return env->tsc_deadline != 0;
 501}
 502
 503static const VMStateDescription vmstate_msr_tscdeadline = {
 504    .name = "cpu/msr_tscdeadline",
 505    .version_id = 1,
 506    .minimum_version_id = 1,
 507    .needed = tscdeadline_needed,
 508    .fields = (VMStateField[]) {
 509        VMSTATE_UINT64(env.tsc_deadline, X86CPU),
 510        VMSTATE_END_OF_LIST()
 511    }
 512};
 513
 514static bool misc_enable_needed(void *opaque)
 515{
 516    X86CPU *cpu = opaque;
 517    CPUX86State *env = &cpu->env;
 518
 519    return env->msr_ia32_misc_enable != MSR_IA32_MISC_ENABLE_DEFAULT;
 520}
 521
 522static bool feature_control_needed(void *opaque)
 523{
 524    X86CPU *cpu = opaque;
 525    CPUX86State *env = &cpu->env;
 526
 527    return env->msr_ia32_feature_control != 0;
 528}
 529
 530static const VMStateDescription vmstate_msr_ia32_misc_enable = {
 531    .name = "cpu/msr_ia32_misc_enable",
 532    .version_id = 1,
 533    .minimum_version_id = 1,
 534    .needed = misc_enable_needed,
 535    .fields = (VMStateField[]) {
 536        VMSTATE_UINT64(env.msr_ia32_misc_enable, X86CPU),
 537        VMSTATE_END_OF_LIST()
 538    }
 539};
 540
 541static const VMStateDescription vmstate_msr_ia32_feature_control = {
 542    .name = "cpu/msr_ia32_feature_control",
 543    .version_id = 1,
 544    .minimum_version_id = 1,
 545    .needed = feature_control_needed,
 546    .fields = (VMStateField[]) {
 547        VMSTATE_UINT64(env.msr_ia32_feature_control, X86CPU),
 548        VMSTATE_END_OF_LIST()
 549    }
 550};
 551
 552static bool pmu_enable_needed(void *opaque)
 553{
 554    X86CPU *cpu = opaque;
 555    CPUX86State *env = &cpu->env;
 556    int i;
 557
 558    if (env->msr_fixed_ctr_ctrl || env->msr_global_ctrl ||
 559        env->msr_global_status || env->msr_global_ovf_ctrl) {
 560        return true;
 561    }
 562    for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
 563        if (env->msr_fixed_counters[i]) {
 564            return true;
 565        }
 566    }
 567    for (i = 0; i < MAX_GP_COUNTERS; i++) {
 568        if (env->msr_gp_counters[i] || env->msr_gp_evtsel[i]) {
 569            return true;
 570        }
 571    }
 572
 573    return false;
 574}
 575
 576static const VMStateDescription vmstate_msr_architectural_pmu = {
 577    .name = "cpu/msr_architectural_pmu",
 578    .version_id = 1,
 579    .minimum_version_id = 1,
 580    .needed = pmu_enable_needed,
 581    .fields = (VMStateField[]) {
 582        VMSTATE_UINT64(env.msr_fixed_ctr_ctrl, X86CPU),
 583        VMSTATE_UINT64(env.msr_global_ctrl, X86CPU),
 584        VMSTATE_UINT64(env.msr_global_status, X86CPU),
 585        VMSTATE_UINT64(env.msr_global_ovf_ctrl, X86CPU),
 586        VMSTATE_UINT64_ARRAY(env.msr_fixed_counters, X86CPU, MAX_FIXED_COUNTERS),
 587        VMSTATE_UINT64_ARRAY(env.msr_gp_counters, X86CPU, MAX_GP_COUNTERS),
 588        VMSTATE_UINT64_ARRAY(env.msr_gp_evtsel, X86CPU, MAX_GP_COUNTERS),
 589        VMSTATE_END_OF_LIST()
 590    }
 591};
 592
 593static bool mpx_needed(void *opaque)
 594{
 595    X86CPU *cpu = opaque;
 596    CPUX86State *env = &cpu->env;
 597    unsigned int i;
 598
 599    for (i = 0; i < 4; i++) {
 600        if (env->bnd_regs[i].lb || env->bnd_regs[i].ub) {
 601            return true;
 602        }
 603    }
 604
 605    if (env->bndcs_regs.cfgu || env->bndcs_regs.sts) {
 606        return true;
 607    }
 608
 609    return !!env->msr_bndcfgs;
 610}
 611
 612static const VMStateDescription vmstate_mpx = {
 613    .name = "cpu/mpx",
 614    .version_id = 1,
 615    .minimum_version_id = 1,
 616    .needed = mpx_needed,
 617    .fields = (VMStateField[]) {
 618        VMSTATE_BND_REGS(env.bnd_regs, X86CPU, 4),
 619        VMSTATE_UINT64(env.bndcs_regs.cfgu, X86CPU),
 620        VMSTATE_UINT64(env.bndcs_regs.sts, X86CPU),
 621        VMSTATE_UINT64(env.msr_bndcfgs, X86CPU),
 622        VMSTATE_END_OF_LIST()
 623    }
 624};
 625
 626static bool hyperv_hypercall_enable_needed(void *opaque)
 627{
 628    X86CPU *cpu = opaque;
 629    CPUX86State *env = &cpu->env;
 630
 631    return env->msr_hv_hypercall != 0 || env->msr_hv_guest_os_id != 0;
 632}
 633
 634static const VMStateDescription vmstate_msr_hypercall_hypercall = {
 635    .name = "cpu/msr_hyperv_hypercall",
 636    .version_id = 1,
 637    .minimum_version_id = 1,
 638    .needed = hyperv_hypercall_enable_needed,
 639    .fields = (VMStateField[]) {
 640        VMSTATE_UINT64(env.msr_hv_guest_os_id, X86CPU),
 641        VMSTATE_UINT64(env.msr_hv_hypercall, X86CPU),
 642        VMSTATE_END_OF_LIST()
 643    }
 644};
 645
 646static bool hyperv_vapic_enable_needed(void *opaque)
 647{
 648    X86CPU *cpu = opaque;
 649    CPUX86State *env = &cpu->env;
 650
 651    return env->msr_hv_vapic != 0;
 652}
 653
 654static const VMStateDescription vmstate_msr_hyperv_vapic = {
 655    .name = "cpu/msr_hyperv_vapic",
 656    .version_id = 1,
 657    .minimum_version_id = 1,
 658    .needed = hyperv_vapic_enable_needed,
 659    .fields = (VMStateField[]) {
 660        VMSTATE_UINT64(env.msr_hv_vapic, X86CPU),
 661        VMSTATE_END_OF_LIST()
 662    }
 663};
 664
 665static bool hyperv_time_enable_needed(void *opaque)
 666{
 667    X86CPU *cpu = opaque;
 668    CPUX86State *env = &cpu->env;
 669
 670    return env->msr_hv_tsc != 0;
 671}
 672
 673static const VMStateDescription vmstate_msr_hyperv_time = {
 674    .name = "cpu/msr_hyperv_time",
 675    .version_id = 1,
 676    .minimum_version_id = 1,
 677    .needed = hyperv_time_enable_needed,
 678    .fields = (VMStateField[]) {
 679        VMSTATE_UINT64(env.msr_hv_tsc, X86CPU),
 680        VMSTATE_END_OF_LIST()
 681    }
 682};
 683
 684static bool hyperv_crash_enable_needed(void *opaque)
 685{
 686    X86CPU *cpu = opaque;
 687    CPUX86State *env = &cpu->env;
 688    int i;
 689
 690    for (i = 0; i < HV_X64_MSR_CRASH_PARAMS; i++) {
 691        if (env->msr_hv_crash_params[i]) {
 692            return true;
 693        }
 694    }
 695    return false;
 696}
 697
 698static const VMStateDescription vmstate_msr_hyperv_crash = {
 699    .name = "cpu/msr_hyperv_crash",
 700    .version_id = 1,
 701    .minimum_version_id = 1,
 702    .needed = hyperv_crash_enable_needed,
 703    .fields = (VMStateField[]) {
 704        VMSTATE_UINT64_ARRAY(env.msr_hv_crash_params,
 705                             X86CPU, HV_X64_MSR_CRASH_PARAMS),
 706        VMSTATE_END_OF_LIST()
 707    }
 708};
 709
 710static bool hyperv_runtime_enable_needed(void *opaque)
 711{
 712    X86CPU *cpu = opaque;
 713    CPUX86State *env = &cpu->env;
 714
 715    return env->msr_hv_runtime != 0;
 716}
 717
 718static const VMStateDescription vmstate_msr_hyperv_runtime = {
 719    .name = "cpu/msr_hyperv_runtime",
 720    .version_id = 1,
 721    .minimum_version_id = 1,
 722    .needed = hyperv_runtime_enable_needed,
 723    .fields = (VMStateField[]) {
 724        VMSTATE_UINT64(env.msr_hv_runtime, X86CPU),
 725        VMSTATE_END_OF_LIST()
 726    }
 727};
 728
 729static bool hyperv_synic_enable_needed(void *opaque)
 730{
 731    X86CPU *cpu = opaque;
 732    CPUX86State *env = &cpu->env;
 733    int i;
 734
 735    if (env->msr_hv_synic_control != 0 ||
 736        env->msr_hv_synic_evt_page != 0 ||
 737        env->msr_hv_synic_msg_page != 0) {
 738        return true;
 739    }
 740
 741    for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) {
 742        if (env->msr_hv_synic_sint[i] != 0) {
 743            return true;
 744        }
 745    }
 746
 747    return false;
 748}
 749
 750static const VMStateDescription vmstate_msr_hyperv_synic = {
 751    .name = "cpu/msr_hyperv_synic",
 752    .version_id = 1,
 753    .minimum_version_id = 1,
 754    .needed = hyperv_synic_enable_needed,
 755    .fields = (VMStateField[]) {
 756        VMSTATE_UINT64(env.msr_hv_synic_control, X86CPU),
 757        VMSTATE_UINT64(env.msr_hv_synic_evt_page, X86CPU),
 758        VMSTATE_UINT64(env.msr_hv_synic_msg_page, X86CPU),
 759        VMSTATE_UINT64_ARRAY(env.msr_hv_synic_sint, X86CPU,
 760                             HV_SYNIC_SINT_COUNT),
 761        VMSTATE_END_OF_LIST()
 762    }
 763};
 764
 765static bool hyperv_stimer_enable_needed(void *opaque)
 766{
 767    X86CPU *cpu = opaque;
 768    CPUX86State *env = &cpu->env;
 769    int i;
 770
 771    for (i = 0; i < ARRAY_SIZE(env->msr_hv_stimer_config); i++) {
 772        if (env->msr_hv_stimer_config[i] || env->msr_hv_stimer_count[i]) {
 773            return true;
 774        }
 775    }
 776    return false;
 777}
 778
 779static const VMStateDescription vmstate_msr_hyperv_stimer = {
 780    .name = "cpu/msr_hyperv_stimer",
 781    .version_id = 1,
 782    .minimum_version_id = 1,
 783    .needed = hyperv_stimer_enable_needed,
 784    .fields = (VMStateField[]) {
 785        VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_config,
 786                             X86CPU, HV_SYNIC_STIMER_COUNT),
 787        VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_count,
 788                             X86CPU, HV_SYNIC_STIMER_COUNT),
 789        VMSTATE_END_OF_LIST()
 790    }
 791};
 792
 793static bool avx512_needed(void *opaque)
 794{
 795    X86CPU *cpu = opaque;
 796    CPUX86State *env = &cpu->env;
 797    unsigned int i;
 798
 799    for (i = 0; i < NB_OPMASK_REGS; i++) {
 800        if (env->opmask_regs[i]) {
 801            return true;
 802        }
 803    }
 804
 805    for (i = 0; i < CPU_NB_REGS; i++) {
 806#define ENV_XMM(reg, field) (env->xmm_regs[reg].ZMM_Q(field))
 807        if (ENV_XMM(i, 4) || ENV_XMM(i, 6) ||
 808            ENV_XMM(i, 5) || ENV_XMM(i, 7)) {
 809            return true;
 810        }
 811#ifdef TARGET_X86_64
 812        if (ENV_XMM(i+16, 0) || ENV_XMM(i+16, 1) ||
 813            ENV_XMM(i+16, 2) || ENV_XMM(i+16, 3) ||
 814            ENV_XMM(i+16, 4) || ENV_XMM(i+16, 5) ||
 815            ENV_XMM(i+16, 6) || ENV_XMM(i+16, 7)) {
 816            return true;
 817        }
 818#endif
 819    }
 820
 821    return false;
 822}
 823
 824static const VMStateDescription vmstate_avx512 = {
 825    .name = "cpu/avx512",
 826    .version_id = 1,
 827    .minimum_version_id = 1,
 828    .needed = avx512_needed,
 829    .fields = (VMStateField[]) {
 830        VMSTATE_UINT64_ARRAY(env.opmask_regs, X86CPU, NB_OPMASK_REGS),
 831        VMSTATE_ZMMH_REGS_VARS(env.xmm_regs, X86CPU, 0),
 832#ifdef TARGET_X86_64
 833        VMSTATE_Hi16_ZMM_REGS_VARS(env.xmm_regs, X86CPU, 16),
 834#endif
 835        VMSTATE_END_OF_LIST()
 836    }
 837};
 838
 839static bool xss_needed(void *opaque)
 840{
 841    X86CPU *cpu = opaque;
 842    CPUX86State *env = &cpu->env;
 843
 844    return env->xss != 0;
 845}
 846
 847static const VMStateDescription vmstate_xss = {
 848    .name = "cpu/xss",
 849    .version_id = 1,
 850    .minimum_version_id = 1,
 851    .needed = xss_needed,
 852    .fields = (VMStateField[]) {
 853        VMSTATE_UINT64(env.xss, X86CPU),
 854        VMSTATE_END_OF_LIST()
 855    }
 856};
 857
 858#ifdef TARGET_X86_64
 859static bool pkru_needed(void *opaque)
 860{
 861    X86CPU *cpu = opaque;
 862    CPUX86State *env = &cpu->env;
 863
 864    return env->pkru != 0;
 865}
 866
 867static const VMStateDescription vmstate_pkru = {
 868    .name = "cpu/pkru",
 869    .version_id = 1,
 870    .minimum_version_id = 1,
 871    .needed = pkru_needed,
 872    .fields = (VMStateField[]){
 873        VMSTATE_UINT32(env.pkru, X86CPU),
 874        VMSTATE_END_OF_LIST()
 875    }
 876};
 877#endif
 878
 879static bool tsc_khz_needed(void *opaque)
 880{
 881    X86CPU *cpu = opaque;
 882    CPUX86State *env = &cpu->env;
 883    MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
 884    PCMachineClass *pcmc = PC_MACHINE_CLASS(mc);
 885    return env->tsc_khz && pcmc->save_tsc_khz;
 886}
 887
 888static const VMStateDescription vmstate_tsc_khz = {
 889    .name = "cpu/tsc_khz",
 890    .version_id = 1,
 891    .minimum_version_id = 1,
 892    .needed = tsc_khz_needed,
 893    .fields = (VMStateField[]) {
 894        VMSTATE_INT64(env.tsc_khz, X86CPU),
 895        VMSTATE_END_OF_LIST()
 896    }
 897};
 898
 899static bool mcg_ext_ctl_needed(void *opaque)
 900{
 901    X86CPU *cpu = opaque;
 902    CPUX86State *env = &cpu->env;
 903    return cpu->enable_lmce && env->mcg_ext_ctl;
 904}
 905
 906static const VMStateDescription vmstate_mcg_ext_ctl = {
 907    .name = "cpu/mcg_ext_ctl",
 908    .version_id = 1,
 909    .minimum_version_id = 1,
 910    .needed = mcg_ext_ctl_needed,
 911    .fields = (VMStateField[]) {
 912        VMSTATE_UINT64(env.mcg_ext_ctl, X86CPU),
 913        VMSTATE_END_OF_LIST()
 914    }
 915};
 916
 917VMStateDescription vmstate_x86_cpu = {
 918    .name = "cpu",
 919    .version_id = 12,
 920    .minimum_version_id = 3,
 921    .pre_save = cpu_pre_save,
 922    .post_load = cpu_post_load,
 923    .fields = (VMStateField[]) {
 924        VMSTATE_UINTTL_ARRAY(env.regs, X86CPU, CPU_NB_REGS),
 925        VMSTATE_UINTTL(env.eip, X86CPU),
 926        VMSTATE_UINTTL(env.eflags, X86CPU),
 927        VMSTATE_UINT32(env.hflags, X86CPU),
 928        /* FPU */
 929        VMSTATE_UINT16(env.fpuc, X86CPU),
 930        VMSTATE_UINT16(env.fpus_vmstate, X86CPU),
 931        VMSTATE_UINT16(env.fptag_vmstate, X86CPU),
 932        VMSTATE_UINT16(env.fpregs_format_vmstate, X86CPU),
 933        VMSTATE_FP_REGS(env.fpregs, X86CPU, 8),
 934
 935        VMSTATE_SEGMENT_ARRAY(env.segs, X86CPU, 6),
 936        VMSTATE_SEGMENT(env.ldt, X86CPU),
 937        VMSTATE_SEGMENT(env.tr, X86CPU),
 938        VMSTATE_SEGMENT(env.gdt, X86CPU),
 939        VMSTATE_SEGMENT(env.idt, X86CPU),
 940
 941        VMSTATE_UINT32(env.sysenter_cs, X86CPU),
 942#ifdef TARGET_X86_64
 943        /* Hack: In v7 size changed from 32 to 64 bits on x86_64 */
 944        VMSTATE_HACK_UINT32(env.sysenter_esp, X86CPU, less_than_7),
 945        VMSTATE_HACK_UINT32(env.sysenter_eip, X86CPU, less_than_7),
 946        VMSTATE_UINTTL_V(env.sysenter_esp, X86CPU, 7),
 947        VMSTATE_UINTTL_V(env.sysenter_eip, X86CPU, 7),
 948#else
 949        VMSTATE_UINTTL(env.sysenter_esp, X86CPU),
 950        VMSTATE_UINTTL(env.sysenter_eip, X86CPU),
 951#endif
 952
 953        VMSTATE_UINTTL(env.cr[0], X86CPU),
 954        VMSTATE_UINTTL(env.cr[2], X86CPU),
 955        VMSTATE_UINTTL(env.cr[3], X86CPU),
 956        VMSTATE_UINTTL(env.cr[4], X86CPU),
 957        VMSTATE_UINTTL_ARRAY(env.dr, X86CPU, 8),
 958        /* MMU */
 959        VMSTATE_INT32(env.a20_mask, X86CPU),
 960        /* XMM */
 961        VMSTATE_UINT32(env.mxcsr, X86CPU),
 962        VMSTATE_XMM_REGS(env.xmm_regs, X86CPU, 0),
 963
 964#ifdef TARGET_X86_64
 965        VMSTATE_UINT64(env.efer, X86CPU),
 966        VMSTATE_UINT64(env.star, X86CPU),
 967        VMSTATE_UINT64(env.lstar, X86CPU),
 968        VMSTATE_UINT64(env.cstar, X86CPU),
 969        VMSTATE_UINT64(env.fmask, X86CPU),
 970        VMSTATE_UINT64(env.kernelgsbase, X86CPU),
 971#endif
 972        VMSTATE_UINT32_V(env.smbase, X86CPU, 4),
 973
 974        VMSTATE_UINT64_V(env.pat, X86CPU, 5),
 975        VMSTATE_UINT32_V(env.hflags2, X86CPU, 5),
 976
 977        VMSTATE_UINT32_TEST(parent_obj.halted, X86CPU, version_is_5),
 978        VMSTATE_UINT64_V(env.vm_hsave, X86CPU, 5),
 979        VMSTATE_UINT64_V(env.vm_vmcb, X86CPU, 5),
 980        VMSTATE_UINT64_V(env.tsc_offset, X86CPU, 5),
 981        VMSTATE_UINT64_V(env.intercept, X86CPU, 5),
 982        VMSTATE_UINT16_V(env.intercept_cr_read, X86CPU, 5),
 983        VMSTATE_UINT16_V(env.intercept_cr_write, X86CPU, 5),
 984        VMSTATE_UINT16_V(env.intercept_dr_read, X86CPU, 5),
 985        VMSTATE_UINT16_V(env.intercept_dr_write, X86CPU, 5),
 986        VMSTATE_UINT32_V(env.intercept_exceptions, X86CPU, 5),
 987        VMSTATE_UINT8_V(env.v_tpr, X86CPU, 5),
 988        /* MTRRs */
 989        VMSTATE_UINT64_ARRAY_V(env.mtrr_fixed, X86CPU, 11, 8),
 990        VMSTATE_UINT64_V(env.mtrr_deftype, X86CPU, 8),
 991        VMSTATE_MTRR_VARS(env.mtrr_var, X86CPU, MSR_MTRRcap_VCNT, 8),
 992        /* KVM-related states */
 993        VMSTATE_INT32_V(env.interrupt_injected, X86CPU, 9),
 994        VMSTATE_UINT32_V(env.mp_state, X86CPU, 9),
 995        VMSTATE_UINT64_V(env.tsc, X86CPU, 9),
 996        VMSTATE_INT32_V(env.exception_injected, X86CPU, 11),
 997        VMSTATE_UINT8_V(env.soft_interrupt, X86CPU, 11),
 998        VMSTATE_UINT8_V(env.nmi_injected, X86CPU, 11),
 999        VMSTATE_UINT8_V(env.nmi_pending, X86CPU, 11),
1000        VMSTATE_UINT8_V(env.has_error_code, X86CPU, 11),
1001        VMSTATE_UINT32_V(env.sipi_vector, X86CPU, 11),
1002        /* MCE */
1003        VMSTATE_UINT64_V(env.mcg_cap, X86CPU, 10),
1004        VMSTATE_UINT64_V(env.mcg_status, X86CPU, 10),
1005        VMSTATE_UINT64_V(env.mcg_ctl, X86CPU, 10),
1006        VMSTATE_UINT64_ARRAY_V(env.mce_banks, X86CPU, MCE_BANKS_DEF * 4, 10),
1007        /* rdtscp */
1008        VMSTATE_UINT64_V(env.tsc_aux, X86CPU, 11),
1009        /* KVM pvclock msr */
1010        VMSTATE_UINT64_V(env.system_time_msr, X86CPU, 11),
1011        VMSTATE_UINT64_V(env.wall_clock_msr, X86CPU, 11),
1012        /* XSAVE related fields */
1013        VMSTATE_UINT64_V(env.xcr0, X86CPU, 12),
1014        VMSTATE_UINT64_V(env.xstate_bv, X86CPU, 12),
1015        VMSTATE_YMMH_REGS_VARS(env.xmm_regs, X86CPU, 0, 12),
1016        VMSTATE_END_OF_LIST()
1017        /* The above list is not sorted /wrt version numbers, watch out! */
1018    },
1019    .subsections = (const VMStateDescription*[]) {
1020        &vmstate_async_pf_msr,
1021        &vmstate_pv_eoi_msr,
1022        &vmstate_steal_time_msr,
1023        &vmstate_fpop_ip_dp,
1024        &vmstate_msr_tsc_adjust,
1025        &vmstate_msr_tscdeadline,
1026        &vmstate_msr_ia32_misc_enable,
1027        &vmstate_msr_ia32_feature_control,
1028        &vmstate_msr_architectural_pmu,
1029        &vmstate_mpx,
1030        &vmstate_msr_hypercall_hypercall,
1031        &vmstate_msr_hyperv_vapic,
1032        &vmstate_msr_hyperv_time,
1033        &vmstate_msr_hyperv_crash,
1034        &vmstate_msr_hyperv_runtime,
1035        &vmstate_msr_hyperv_synic,
1036        &vmstate_msr_hyperv_stimer,
1037        &vmstate_avx512,
1038        &vmstate_xss,
1039        &vmstate_tsc_khz,
1040#ifdef TARGET_X86_64
1041        &vmstate_pkru,
1042#endif
1043        &vmstate_mcg_ext_ctl,
1044        NULL
1045    }
1046};
1047