qemu/target/ppc/machine.c
<<
>>
Prefs
   1#include "qemu/osdep.h"
   2#include "cpu.h"
   3#include "system/kvm.h"
   4#include "system/tcg.h"
   5#include "helper_regs.h"
   6#include "mmu-hash64.h"
   7#include "migration/cpu.h"
   8#include "qapi/error.h"
   9#include "kvm_ppc.h"
  10#include "power8-pmu.h"
  11#include "system/replay.h"
  12
  13static void post_load_update_msr(CPUPPCState *env)
  14{
  15    target_ulong msr = env->msr;
  16
  17    /*
  18     * Invalidate all supported msr bits except MSR_TGPR/MSR_HVB
  19     * before restoring.  Note that this recomputes hflags.
  20     */
  21    env->msr ^= env->msr_mask & ~((1ULL << MSR_TGPR) | MSR_HVB);
  22    ppc_store_msr(env, msr);
  23}
  24
  25static int get_avr(QEMUFile *f, void *pv, size_t size,
  26                   const VMStateField *field)
  27{
  28    ppc_avr_t *v = pv;
  29
  30    v->u64[0] = qemu_get_be64(f);
  31    v->u64[1] = qemu_get_be64(f);
  32
  33    return 0;
  34}
  35
  36static int put_avr(QEMUFile *f, void *pv, size_t size,
  37                   const VMStateField *field, JSONWriter *vmdesc)
  38{
  39    ppc_avr_t *v = pv;
  40
  41    qemu_put_be64(f, v->u64[0]);
  42    qemu_put_be64(f, v->u64[1]);
  43    return 0;
  44}
  45
  46static const VMStateInfo vmstate_info_avr = {
  47    .name = "avr",
  48    .get  = get_avr,
  49    .put  = put_avr,
  50};
  51
  52#define VMSTATE_AVR_ARRAY_V(_f, _s, _n, _v)                       \
  53    VMSTATE_SUB_ARRAY(_f, _s, 32, _n, _v, vmstate_info_avr, ppc_avr_t)
  54
  55#define VMSTATE_AVR_ARRAY(_f, _s, _n)                             \
  56    VMSTATE_AVR_ARRAY_V(_f, _s, _n, 0)
  57
  58static int get_fpr(QEMUFile *f, void *pv, size_t size,
  59                   const VMStateField *field)
  60{
  61    ppc_vsr_t *v = pv;
  62
  63    v->VsrD(0) = qemu_get_be64(f);
  64
  65    return 0;
  66}
  67
  68static int put_fpr(QEMUFile *f, void *pv, size_t size,
  69                   const VMStateField *field, JSONWriter *vmdesc)
  70{
  71    ppc_vsr_t *v = pv;
  72
  73    qemu_put_be64(f, v->VsrD(0));
  74    return 0;
  75}
  76
  77static const VMStateInfo vmstate_info_fpr = {
  78    .name = "fpr",
  79    .get  = get_fpr,
  80    .put  = put_fpr,
  81};
  82
  83#define VMSTATE_FPR_ARRAY_V(_f, _s, _n, _v)                       \
  84    VMSTATE_SUB_ARRAY(_f, _s, 0, _n, _v, vmstate_info_fpr, ppc_vsr_t)
  85
  86#define VMSTATE_FPR_ARRAY(_f, _s, _n)                             \
  87    VMSTATE_FPR_ARRAY_V(_f, _s, _n, 0)
  88
  89static int get_vsr(QEMUFile *f, void *pv, size_t size,
  90                   const VMStateField *field)
  91{
  92    ppc_vsr_t *v = pv;
  93
  94    v->VsrD(1) = qemu_get_be64(f);
  95
  96    return 0;
  97}
  98
  99static int put_vsr(QEMUFile *f, void *pv, size_t size,
 100                   const VMStateField *field, JSONWriter *vmdesc)
 101{
 102    ppc_vsr_t *v = pv;
 103
 104    qemu_put_be64(f, v->VsrD(1));
 105    return 0;
 106}
 107
 108static const VMStateInfo vmstate_info_vsr = {
 109    .name = "vsr",
 110    .get  = get_vsr,
 111    .put  = put_vsr,
 112};
 113
 114#define VMSTATE_VSR_ARRAY_V(_f, _s, _n, _v)                       \
 115    VMSTATE_SUB_ARRAY(_f, _s, 0, _n, _v, vmstate_info_vsr, ppc_vsr_t)
 116
 117#define VMSTATE_VSR_ARRAY(_f, _s, _n)                             \
 118    VMSTATE_VSR_ARRAY_V(_f, _s, _n, 0)
 119
 120static int cpu_pre_save(void *opaque)
 121{
 122    PowerPCCPU *cpu = opaque;
 123    CPUPPCState *env = &cpu->env;
 124    int i;
 125
 126    env->spr[SPR_LR] = env->lr;
 127    env->spr[SPR_CTR] = env->ctr;
 128    env->spr[SPR_XER] = cpu_read_xer(env);
 129#if defined(TARGET_PPC64)
 130    env->spr[SPR_CFAR] = env->cfar;
 131#endif
 132    env->spr[SPR_BOOKE_SPEFSCR] = env->spe_fscr;
 133
 134    for (i = 0; (i < 4) && (i < env->nb_BATs); i++) {
 135        env->spr[SPR_DBAT0U + 2 * i] = env->DBAT[0][i];
 136        env->spr[SPR_DBAT0U + 2 * i + 1] = env->DBAT[1][i];
 137        env->spr[SPR_IBAT0U + 2 * i] = env->IBAT[0][i];
 138        env->spr[SPR_IBAT0U + 2 * i + 1] = env->IBAT[1][i];
 139    }
 140    for (i = 0; (i < 4) && ((i + 4) < env->nb_BATs); i++) {
 141        env->spr[SPR_DBAT4U + 2 * i] = env->DBAT[0][i + 4];
 142        env->spr[SPR_DBAT4U + 2 * i + 1] = env->DBAT[1][i + 4];
 143        env->spr[SPR_IBAT4U + 2 * i] = env->IBAT[0][i + 4];
 144        env->spr[SPR_IBAT4U + 2 * i + 1] = env->IBAT[1][i + 4];
 145    }
 146
 147    /* Used to retain migration compatibility for pre 6.0 for 601 machines. */
 148    env->hflags_compat_nmsr = 0;
 149
 150    if (tcg_enabled()) {
 151        /*
 152         * TCG does not maintain the DECR spr (unlike KVM) so have to save
 153         * it here.
 154         */
 155        env->spr[SPR_DECR] = cpu_ppc_load_decr(env);
 156    }
 157
 158    return 0;
 159}
 160
 161/*
 162 * Determine if a given PVR is a "close enough" match to the CPU
 163 * object.  For TCG and KVM PR it would probably be sufficient to
 164 * require an exact PVR match.  However for KVM HV the user is
 165 * restricted to a PVR exactly matching the host CPU.  The correct way
 166 * to handle this is to put the guest into an architected
 167 * compatibility mode.  However, to allow a more forgiving transition
 168 * and migration from before this was widely done, we allow migration
 169 * between sufficiently similar PVRs, as determined by the CPU class's
 170 * pvr_match() hook.
 171 */
 172static bool pvr_match(PowerPCCPU *cpu, uint32_t pvr)
 173{
 174    PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
 175
 176    if (pvr == pcc->pvr) {
 177        return true;
 178    }
 179    return pcc->pvr_match(pcc, pvr, true);
 180}
 181
 182static int cpu_post_load(void *opaque, int version_id)
 183{
 184    PowerPCCPU *cpu = opaque;
 185    CPUPPCState *env = &cpu->env;
 186    int i;
 187
 188    /*
 189     * If we're operating in compat mode, we should be ok as long as
 190     * the destination supports the same compatibility mode.
 191     *
 192     * Otherwise, however, we require that the destination has exactly
 193     * the same CPU model as the source.
 194     */
 195
 196#if defined(TARGET_PPC64)
 197    if (cpu->compat_pvr) {
 198        uint32_t compat_pvr = cpu->compat_pvr;
 199        Error *local_err = NULL;
 200        int ret;
 201
 202        cpu->compat_pvr = 0;
 203        ret = ppc_set_compat(cpu, compat_pvr, &local_err);
 204        if (ret < 0) {
 205            error_report_err(local_err);
 206            return ret;
 207        }
 208    } else
 209#endif
 210    {
 211        if (!pvr_match(cpu, env->spr[SPR_PVR])) {
 212            return -EINVAL;
 213        }
 214    }
 215
 216    /*
 217     * If we're running with KVM HV, there is a chance that the guest
 218     * is running with KVM HV and its kernel does not have the
 219     * capability of dealing with a different PVR other than this
 220     * exact host PVR in KVM_SET_SREGS. If that happens, the
 221     * guest freezes after migration.
 222     *
 223     * The function kvmppc_pvr_workaround_required does this verification
 224     * by first checking if the kernel has the cap, returning true immediately
 225     * if that is the case. Otherwise, it checks if we're running in KVM PR.
 226     * If the guest kernel does not have the cap and we're not running KVM-PR
 227     * (so, it is running KVM-HV), we need to ensure that KVM_SET_SREGS will
 228     * receive the PVR it expects as a workaround.
 229     *
 230     */
 231    if (kvmppc_pvr_workaround_required(cpu)) {
 232        env->spr[SPR_PVR] = env->spr_cb[SPR_PVR].default_value;
 233    }
 234
 235    env->lr = env->spr[SPR_LR];
 236    env->ctr = env->spr[SPR_CTR];
 237    cpu_write_xer(env, env->spr[SPR_XER]);
 238#if defined(TARGET_PPC64)
 239    env->cfar = env->spr[SPR_CFAR];
 240#endif
 241    env->spe_fscr = env->spr[SPR_BOOKE_SPEFSCR];
 242
 243    for (i = 0; (i < 4) && (i < env->nb_BATs); i++) {
 244        env->DBAT[0][i] = env->spr[SPR_DBAT0U + 2 * i];
 245        env->DBAT[1][i] = env->spr[SPR_DBAT0U + 2 * i + 1];
 246        env->IBAT[0][i] = env->spr[SPR_IBAT0U + 2 * i];
 247        env->IBAT[1][i] = env->spr[SPR_IBAT0U + 2 * i + 1];
 248    }
 249    for (i = 0; (i < 4) && ((i + 4) < env->nb_BATs); i++) {
 250        env->DBAT[0][i + 4] = env->spr[SPR_DBAT4U + 2 * i];
 251        env->DBAT[1][i + 4] = env->spr[SPR_DBAT4U + 2 * i + 1];
 252        env->IBAT[0][i + 4] = env->spr[SPR_IBAT4U + 2 * i];
 253        env->IBAT[1][i + 4] = env->spr[SPR_IBAT4U + 2 * i + 1];
 254    }
 255
 256    if (!cpu->vhyp) {
 257        ppc_store_sdr1(env, env->spr[SPR_SDR1]);
 258    }
 259
 260    post_load_update_msr(env);
 261
 262    if (tcg_enabled()) {
 263        /* Re-set breaks based on regs */
 264#if defined(TARGET_PPC64)
 265        ppc_update_ciabr(env);
 266        ppc_update_daw(env, 0);
 267        ppc_update_daw(env, 1);
 268#endif
 269        /*
 270         * TCG needs to re-start the decrementer timer and/or raise the
 271         * interrupt. This works for level-triggered decrementer. Edge
 272         * triggered types (including HDEC) would need to carry more state.
 273         */
 274        cpu_ppc_store_decr(env, env->spr[SPR_DECR]);
 275        pmu_mmcr01a_updated(env);
 276    }
 277
 278    return 0;
 279}
 280
 281static bool fpu_needed(void *opaque)
 282{
 283    PowerPCCPU *cpu = opaque;
 284
 285    return cpu->env.insns_flags & PPC_FLOAT;
 286}
 287
 288static const VMStateDescription vmstate_fpu = {
 289    .name = "cpu/fpu",
 290    .version_id = 1,
 291    .minimum_version_id = 1,
 292    .needed = fpu_needed,
 293    .fields = (const VMStateField[]) {
 294        VMSTATE_FPR_ARRAY(env.vsr, PowerPCCPU, 32),
 295        VMSTATE_UINTTL(env.fpscr, PowerPCCPU),
 296        VMSTATE_END_OF_LIST()
 297    },
 298};
 299
 300static bool altivec_needed(void *opaque)
 301{
 302    PowerPCCPU *cpu = opaque;
 303
 304    return cpu->env.insns_flags & PPC_ALTIVEC;
 305}
 306
 307static int get_vscr(QEMUFile *f, void *opaque, size_t size,
 308                    const VMStateField *field)
 309{
 310    PowerPCCPU *cpu = opaque;
 311    ppc_store_vscr(&cpu->env, qemu_get_be32(f));
 312    return 0;
 313}
 314
 315static int put_vscr(QEMUFile *f, void *opaque, size_t size,
 316                    const VMStateField *field, JSONWriter *vmdesc)
 317{
 318    PowerPCCPU *cpu = opaque;
 319    qemu_put_be32(f, ppc_get_vscr(&cpu->env));
 320    return 0;
 321}
 322
 323static const VMStateInfo vmstate_vscr = {
 324    .name = "cpu/altivec/vscr",
 325    .get = get_vscr,
 326    .put = put_vscr,
 327};
 328
 329static const VMStateDescription vmstate_altivec = {
 330    .name = "cpu/altivec",
 331    .version_id = 1,
 332    .minimum_version_id = 1,
 333    .needed = altivec_needed,
 334    .fields = (const VMStateField[]) {
 335        VMSTATE_AVR_ARRAY(env.vsr, PowerPCCPU, 32),
 336        /*
 337         * Save the architecture value of the vscr, not the internally
 338         * expanded version.  Since this architecture value does not
 339         * exist in memory to be stored, this requires a but of hoop
 340         * jumping.  We want OFFSET=0 so that we effectively pass CPU
 341         * to the helper functions.
 342         */
 343        {
 344            .name = "vscr",
 345            .version_id = 0,
 346            .size = sizeof(uint32_t),
 347            .info = &vmstate_vscr,
 348            .flags = VMS_SINGLE,
 349            .offset = 0
 350        },
 351        VMSTATE_END_OF_LIST()
 352    },
 353};
 354
 355static bool vsx_needed(void *opaque)
 356{
 357    PowerPCCPU *cpu = opaque;
 358
 359    return cpu->env.insns_flags2 & PPC2_VSX;
 360}
 361
 362static const VMStateDescription vmstate_vsx = {
 363    .name = "cpu/vsx",
 364    .version_id = 1,
 365    .minimum_version_id = 1,
 366    .needed = vsx_needed,
 367    .fields = (const VMStateField[]) {
 368        VMSTATE_VSR_ARRAY(env.vsr, PowerPCCPU, 32),
 369        VMSTATE_END_OF_LIST()
 370    },
 371};
 372
 373#ifdef TARGET_PPC64
 374/* Transactional memory state */
 375static bool tm_needed(void *opaque)
 376{
 377    PowerPCCPU *cpu = opaque;
 378    CPUPPCState *env = &cpu->env;
 379    return FIELD_EX64(env->msr, MSR, TS);
 380}
 381
 382static const VMStateDescription vmstate_tm = {
 383    .name = "cpu/tm",
 384    .version_id = 1,
 385    .minimum_version_id = 1,
 386    .needed = tm_needed,
 387    .fields = (const VMStateField []) {
 388        VMSTATE_UINTTL_ARRAY(env.tm_gpr, PowerPCCPU, 32),
 389        VMSTATE_AVR_ARRAY(env.tm_vsr, PowerPCCPU, 64),
 390        VMSTATE_UINT64(env.tm_cr, PowerPCCPU),
 391        VMSTATE_UINT64(env.tm_lr, PowerPCCPU),
 392        VMSTATE_UINT64(env.tm_ctr, PowerPCCPU),
 393        VMSTATE_UINT64(env.tm_fpscr, PowerPCCPU),
 394        VMSTATE_UINT64(env.tm_amr, PowerPCCPU),
 395        VMSTATE_UINT64(env.tm_ppr, PowerPCCPU),
 396        VMSTATE_UINT64(env.tm_vrsave, PowerPCCPU),
 397        VMSTATE_UINT32(env.tm_vscr, PowerPCCPU),
 398        VMSTATE_UINT64(env.tm_dscr, PowerPCCPU),
 399        VMSTATE_UINT64(env.tm_tar, PowerPCCPU),
 400        VMSTATE_END_OF_LIST()
 401    },
 402};
 403#endif
 404
 405static bool sr_needed(void *opaque)
 406{
 407#ifdef TARGET_PPC64
 408    PowerPCCPU *cpu = opaque;
 409
 410    return !mmu_is_64bit(cpu->env.mmu_model);
 411#else
 412    return true;
 413#endif
 414}
 415
 416static const VMStateDescription vmstate_sr = {
 417    .name = "cpu/sr",
 418    .version_id = 1,
 419    .minimum_version_id = 1,
 420    .needed = sr_needed,
 421    .fields = (const VMStateField[]) {
 422        VMSTATE_UINTTL_ARRAY(env.sr, PowerPCCPU, 32),
 423        VMSTATE_END_OF_LIST()
 424    },
 425};
 426
 427#ifdef TARGET_PPC64
 428static int get_slbe(QEMUFile *f, void *pv, size_t size,
 429                    const VMStateField *field)
 430{
 431    ppc_slb_t *v = pv;
 432
 433    v->esid = qemu_get_be64(f);
 434    v->vsid = qemu_get_be64(f);
 435
 436    return 0;
 437}
 438
 439static int put_slbe(QEMUFile *f, void *pv, size_t size,
 440                    const VMStateField *field, JSONWriter *vmdesc)
 441{
 442    ppc_slb_t *v = pv;
 443
 444    qemu_put_be64(f, v->esid);
 445    qemu_put_be64(f, v->vsid);
 446    return 0;
 447}
 448
 449static const VMStateInfo vmstate_info_slbe = {
 450    .name = "slbe",
 451    .get  = get_slbe,
 452    .put  = put_slbe,
 453};
 454
 455#define VMSTATE_SLB_ARRAY_V(_f, _s, _n, _v)                       \
 456    VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_slbe, ppc_slb_t)
 457
 458#define VMSTATE_SLB_ARRAY(_f, _s, _n)                             \
 459    VMSTATE_SLB_ARRAY_V(_f, _s, _n, 0)
 460
 461static bool slb_needed(void *opaque)
 462{
 463    PowerPCCPU *cpu = opaque;
 464
 465    /* We don't support any of the old segment table based 64-bit CPUs */
 466    return mmu_is_64bit(cpu->env.mmu_model);
 467}
 468
 469static int slb_post_load(void *opaque, int version_id)
 470{
 471    PowerPCCPU *cpu = opaque;
 472    CPUPPCState *env = &cpu->env;
 473    int i;
 474
 475    /*
 476     * We've pulled in the raw esid and vsid values from the migration
 477     * stream, but we need to recompute the page size pointers
 478     */
 479    for (i = 0; i < cpu->hash64_opts->slb_size; i++) {
 480        if (ppc_store_slb(cpu, i, env->slb[i].esid, env->slb[i].vsid) < 0) {
 481            /* Migration source had bad values in its SLB */
 482            return -1;
 483        }
 484    }
 485
 486    return 0;
 487}
 488
 489static const VMStateDescription vmstate_slb = {
 490    .name = "cpu/slb",
 491    .version_id = 2,
 492    .minimum_version_id = 1,
 493    .needed = slb_needed,
 494    .post_load = slb_post_load,
 495    .fields = (const VMStateField[]) {
 496        VMSTATE_SLB_ARRAY(env.slb, PowerPCCPU, MAX_SLB_ENTRIES),
 497        VMSTATE_END_OF_LIST()
 498    }
 499};
 500#endif /* TARGET_PPC64 */
 501
 502static const VMStateDescription vmstate_tlb6xx_entry = {
 503    .name = "cpu/tlb6xx_entry",
 504    .version_id = 1,
 505    .minimum_version_id = 1,
 506    .fields = (const VMStateField[]) {
 507        VMSTATE_UINTTL(pte0, ppc6xx_tlb_t),
 508        VMSTATE_UINTTL(pte1, ppc6xx_tlb_t),
 509        VMSTATE_UINTTL(EPN, ppc6xx_tlb_t),
 510        VMSTATE_END_OF_LIST()
 511    },
 512};
 513
 514static bool tlb6xx_needed(void *opaque)
 515{
 516    PowerPCCPU *cpu = opaque;
 517    CPUPPCState *env = &cpu->env;
 518
 519    return env->nb_tlb && (env->tlb_type == TLB_6XX);
 520}
 521
 522static const VMStateDescription vmstate_tlb6xx = {
 523    .name = "cpu/tlb6xx",
 524    .version_id = 1,
 525    .minimum_version_id = 1,
 526    .needed = tlb6xx_needed,
 527    .fields = (const VMStateField[]) {
 528        VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL),
 529        VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlb6, PowerPCCPU,
 530                                            env.nb_tlb,
 531                                            vmstate_tlb6xx_entry,
 532                                            ppc6xx_tlb_t),
 533        VMSTATE_UINTTL_ARRAY(env.tgpr, PowerPCCPU, 4),
 534        VMSTATE_END_OF_LIST()
 535    }
 536};
 537
 538static const VMStateDescription vmstate_tlbemb_entry = {
 539    .name = "cpu/tlbemb_entry",
 540    .version_id = 1,
 541    .minimum_version_id = 1,
 542    .fields = (const VMStateField[]) {
 543        VMSTATE_UINT64(RPN, ppcemb_tlb_t),
 544        VMSTATE_UINTTL(EPN, ppcemb_tlb_t),
 545        VMSTATE_UINTTL(PID, ppcemb_tlb_t),
 546        VMSTATE_UINTTL(size, ppcemb_tlb_t),
 547        VMSTATE_UINT32(prot, ppcemb_tlb_t),
 548        VMSTATE_UINT32(attr, ppcemb_tlb_t),
 549        VMSTATE_END_OF_LIST()
 550    },
 551};
 552
 553static bool tlbemb_needed(void *opaque)
 554{
 555    PowerPCCPU *cpu = opaque;
 556    CPUPPCState *env = &cpu->env;
 557
 558    return env->nb_tlb && (env->tlb_type == TLB_EMB);
 559}
 560
 561static const VMStateDescription vmstate_tlbemb = {
 562    .name = "cpu/tlbemb",
 563    .version_id = 1,
 564    .minimum_version_id = 1,
 565    .needed = tlbemb_needed,
 566    .fields = (const VMStateField[]) {
 567        VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL),
 568        VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbe, PowerPCCPU,
 569                                            env.nb_tlb,
 570                                            vmstate_tlbemb_entry,
 571                                            ppcemb_tlb_t),
 572        VMSTATE_END_OF_LIST()
 573    },
 574};
 575
 576static const VMStateDescription vmstate_tlbmas_entry = {
 577    .name = "cpu/tlbmas_entry",
 578    .version_id = 1,
 579    .minimum_version_id = 1,
 580    .fields = (const VMStateField[]) {
 581        VMSTATE_UINT32(mas8, ppcmas_tlb_t),
 582        VMSTATE_UINT32(mas1, ppcmas_tlb_t),
 583        VMSTATE_UINT64(mas2, ppcmas_tlb_t),
 584        VMSTATE_UINT64(mas7_3, ppcmas_tlb_t),
 585        VMSTATE_END_OF_LIST()
 586    },
 587};
 588
 589static bool tlbmas_needed(void *opaque)
 590{
 591    PowerPCCPU *cpu = opaque;
 592    CPUPPCState *env = &cpu->env;
 593
 594    return env->nb_tlb && (env->tlb_type == TLB_MAS);
 595}
 596
 597static const VMStateDescription vmstate_tlbmas = {
 598    .name = "cpu/tlbmas",
 599    .version_id = 1,
 600    .minimum_version_id = 1,
 601    .needed = tlbmas_needed,
 602    .fields = (const VMStateField[]) {
 603        VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL),
 604        VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbm, PowerPCCPU,
 605                                            env.nb_tlb,
 606                                            vmstate_tlbmas_entry,
 607                                            ppcmas_tlb_t),
 608        VMSTATE_END_OF_LIST()
 609    }
 610};
 611
 612static bool compat_needed(void *opaque)
 613{
 614    PowerPCCPU *cpu = opaque;
 615
 616    assert(!(cpu->compat_pvr && !cpu->vhyp));
 617    return cpu->compat_pvr != 0;
 618}
 619
 620static const VMStateDescription vmstate_compat = {
 621    .name = "cpu/compat",
 622    .version_id = 1,
 623    .minimum_version_id = 1,
 624    .needed = compat_needed,
 625    .fields = (const VMStateField[]) {
 626        VMSTATE_UINT32(compat_pvr, PowerPCCPU),
 627        VMSTATE_END_OF_LIST()
 628    }
 629};
 630
 631static bool reservation_needed(void *opaque)
 632{
 633    return (replay_mode != REPLAY_MODE_NONE);
 634}
 635
 636static const VMStateDescription vmstate_reservation = {
 637    .name = "cpu/reservation",
 638    .version_id = 1,
 639    .minimum_version_id = 1,
 640    .needed = reservation_needed,
 641    .fields = (const VMStateField[]) {
 642        VMSTATE_UINTTL(env.reserve_addr, PowerPCCPU),
 643        VMSTATE_UINTTL(env.reserve_length, PowerPCCPU),
 644        VMSTATE_UINTTL(env.reserve_val, PowerPCCPU),
 645#if defined(TARGET_PPC64)
 646        VMSTATE_UINTTL(env.reserve_val2, PowerPCCPU),
 647#endif
 648        VMSTATE_END_OF_LIST()
 649    }
 650};
 651
 652#ifdef TARGET_PPC64
 653static bool bhrb_needed(void *opaque)
 654{
 655    PowerPCCPU *cpu = opaque;
 656    return (cpu->env.flags & POWERPC_FLAG_BHRB) != 0;
 657}
 658
 659static const VMStateDescription vmstate_bhrb = {
 660    .name = "cpu/bhrb",
 661    .version_id = 1,
 662    .minimum_version_id = 1,
 663    .needed = bhrb_needed,
 664    .fields = (VMStateField[]) {
 665        VMSTATE_UINTTL(env.bhrb_offset, PowerPCCPU),
 666        VMSTATE_UINT64_ARRAY(env.bhrb, PowerPCCPU, BHRB_MAX_NUM_ENTRIES),
 667        VMSTATE_END_OF_LIST()
 668    }
 669};
 670#endif
 671
 672const VMStateDescription vmstate_ppc_cpu = {
 673    .name = "cpu",
 674    .version_id = 5,
 675    .minimum_version_id = 5,
 676    .pre_save = cpu_pre_save,
 677    .post_load = cpu_post_load,
 678    .fields = (const VMStateField[]) {
 679        VMSTATE_UNUSED(sizeof(target_ulong)), /* was _EQUAL(env.spr[SPR_PVR]) */
 680
 681        /* User mode architected state */
 682        VMSTATE_UINTTL_ARRAY(env.gpr, PowerPCCPU, 32),
 683#if !defined(TARGET_PPC64)
 684        VMSTATE_UINTTL_ARRAY(env.gprh, PowerPCCPU, 32),
 685#endif
 686        VMSTATE_UINT32_ARRAY(env.crf, PowerPCCPU, 8),
 687        VMSTATE_UINTTL(env.nip, PowerPCCPU),
 688
 689        /* SPRs */
 690        VMSTATE_UINTTL_ARRAY(env.spr, PowerPCCPU, 1024),
 691        VMSTATE_UINT64(env.spe_acc, PowerPCCPU),
 692
 693        VMSTATE_UNUSED(sizeof(target_ulong)), /* was env.reserve_addr */
 694
 695        /* Supervisor mode architected state */
 696        VMSTATE_UINTTL(env.msr, PowerPCCPU),
 697
 698        /* Backward compatible internal state */
 699        VMSTATE_UINTTL(env.hflags_compat_nmsr, PowerPCCPU),
 700
 701        VMSTATE_END_OF_LIST()
 702    },
 703    .subsections = (const VMStateDescription * const []) {
 704        &vmstate_fpu,
 705        &vmstate_altivec,
 706        &vmstate_vsx,
 707        &vmstate_sr,
 708#ifdef TARGET_PPC64
 709        &vmstate_tm,
 710        &vmstate_slb,
 711        &vmstate_bhrb,
 712#endif /* TARGET_PPC64 */
 713        &vmstate_tlb6xx,
 714        &vmstate_tlbemb,
 715        &vmstate_tlbmas,
 716        &vmstate_compat,
 717        &vmstate_reservation,
 718        NULL
 719    }
 720};
 721