qemu/target/ppc/machine.c
<<
>>
Prefs
   1#include "qemu/osdep.h"
   2#include "cpu.h"
   3#include "exec/exec-all.h"
   4#include "sysemu/kvm.h"
   5#include "helper_regs.h"
   6#include "mmu-hash64.h"
   7#include "migration/cpu.h"
   8#include "qapi/error.h"
   9#include "qemu/main-loop.h"
  10#include "kvm_ppc.h"
  11
  12static void post_load_update_msr(CPUPPCState *env)
  13{
  14    target_ulong msr = env->msr;
  15
  16    /*
  17     * Invalidate all supported msr bits except MSR_TGPR/MSR_HVB
  18     * before restoring.  Note that this recomputes hflags.
  19     */
  20    env->msr ^= env->msr_mask & ~((1ULL << MSR_TGPR) | MSR_HVB);
  21    ppc_store_msr(env, msr);
  22}
  23
  24static int cpu_load_old(QEMUFile *f, void *opaque, int version_id)
  25{
  26    PowerPCCPU *cpu = opaque;
  27    CPUPPCState *env = &cpu->env;
  28    unsigned int i, j;
  29    target_ulong sdr1;
  30    uint32_t fpscr, vscr;
  31#if defined(TARGET_PPC64)
  32    int32_t slb_nr;
  33#endif
  34    target_ulong xer;
  35
  36    for (i = 0; i < 32; i++) {
  37        qemu_get_betls(f, &env->gpr[i]);
  38    }
  39#if !defined(TARGET_PPC64)
  40    for (i = 0; i < 32; i++) {
  41        qemu_get_betls(f, &env->gprh[i]);
  42    }
  43#endif
  44    qemu_get_betls(f, &env->lr);
  45    qemu_get_betls(f, &env->ctr);
  46    for (i = 0; i < 8; i++) {
  47        qemu_get_be32s(f, &env->crf[i]);
  48    }
  49    qemu_get_betls(f, &xer);
  50    cpu_write_xer(env, xer);
  51    qemu_get_betls(f, &env->reserve_addr);
  52    qemu_get_betls(f, &env->msr);
  53    for (i = 0; i < 4; i++) {
  54        qemu_get_betls(f, &env->tgpr[i]);
  55    }
  56    for (i = 0; i < 32; i++) {
  57        union {
  58            float64 d;
  59            uint64_t l;
  60        } u;
  61        u.l = qemu_get_be64(f);
  62        *cpu_fpr_ptr(env, i) = u.d;
  63    }
  64    qemu_get_be32s(f, &fpscr);
  65    env->fpscr = fpscr;
  66    qemu_get_sbe32s(f, &env->access_type);
  67#if defined(TARGET_PPC64)
  68    qemu_get_betls(f, &env->spr[SPR_ASR]);
  69    qemu_get_sbe32s(f, &slb_nr);
  70#endif
  71    qemu_get_betls(f, &sdr1);
  72    for (i = 0; i < 32; i++) {
  73        qemu_get_betls(f, &env->sr[i]);
  74    }
  75    for (i = 0; i < 2; i++) {
  76        for (j = 0; j < 8; j++) {
  77            qemu_get_betls(f, &env->DBAT[i][j]);
  78        }
  79    }
  80    for (i = 0; i < 2; i++) {
  81        for (j = 0; j < 8; j++) {
  82            qemu_get_betls(f, &env->IBAT[i][j]);
  83        }
  84    }
  85    qemu_get_sbe32s(f, &env->nb_tlb);
  86    qemu_get_sbe32s(f, &env->tlb_per_way);
  87    qemu_get_sbe32s(f, &env->nb_ways);
  88    qemu_get_sbe32s(f, &env->last_way);
  89    qemu_get_sbe32s(f, &env->id_tlbs);
  90    qemu_get_sbe32s(f, &env->nb_pids);
  91    if (env->tlb.tlb6) {
  92        /* XXX assumes 6xx */
  93        for (i = 0; i < env->nb_tlb; i++) {
  94            qemu_get_betls(f, &env->tlb.tlb6[i].pte0);
  95            qemu_get_betls(f, &env->tlb.tlb6[i].pte1);
  96            qemu_get_betls(f, &env->tlb.tlb6[i].EPN);
  97        }
  98    }
  99    for (i = 0; i < 4; i++) {
 100        qemu_get_betls(f, &env->pb[i]);
 101    }
 102    for (i = 0; i < 1024; i++) {
 103        qemu_get_betls(f, &env->spr[i]);
 104    }
 105    if (!cpu->vhyp) {
 106        ppc_store_sdr1(env, sdr1);
 107    }
 108    qemu_get_be32s(f, &vscr);
 109    ppc_store_vscr(env, vscr);
 110    qemu_get_be64s(f, &env->spe_acc);
 111    qemu_get_be32s(f, &env->spe_fscr);
 112    qemu_get_betls(f, &env->msr_mask);
 113    qemu_get_be32s(f, &env->flags);
 114    qemu_get_sbe32s(f, &env->error_code);
 115    qemu_get_be32s(f, &env->pending_interrupts);
 116    qemu_get_be32s(f, &env->irq_input_state);
 117    for (i = 0; i < POWERPC_EXCP_NB; i++) {
 118        qemu_get_betls(f, &env->excp_vectors[i]);
 119    }
 120    qemu_get_betls(f, &env->excp_prefix);
 121    qemu_get_betls(f, &env->ivor_mask);
 122    qemu_get_betls(f, &env->ivpr_mask);
 123    qemu_get_betls(f, &env->hreset_vector);
 124    qemu_get_betls(f, &env->nip);
 125    qemu_get_sbetl(f); /* Discard unused hflags */
 126    qemu_get_sbetl(f); /* Discard unused hflags_nmsr */
 127    qemu_get_sbe32(f); /* Discard unused mmu_idx */
 128    qemu_get_sbe32(f); /* Discard unused power_mode */
 129
 130    post_load_update_msr(env);
 131
 132    return 0;
 133}
 134
 135static int get_avr(QEMUFile *f, void *pv, size_t size,
 136                   const VMStateField *field)
 137{
 138    ppc_avr_t *v = pv;
 139
 140    v->u64[0] = qemu_get_be64(f);
 141    v->u64[1] = qemu_get_be64(f);
 142
 143    return 0;
 144}
 145
 146static int put_avr(QEMUFile *f, void *pv, size_t size,
 147                   const VMStateField *field, JSONWriter *vmdesc)
 148{
 149    ppc_avr_t *v = pv;
 150
 151    qemu_put_be64(f, v->u64[0]);
 152    qemu_put_be64(f, v->u64[1]);
 153    return 0;
 154}
 155
 156static const VMStateInfo vmstate_info_avr = {
 157    .name = "avr",
 158    .get  = get_avr,
 159    .put  = put_avr,
 160};
 161
 162#define VMSTATE_AVR_ARRAY_V(_f, _s, _n, _v)                       \
 163    VMSTATE_SUB_ARRAY(_f, _s, 32, _n, _v, vmstate_info_avr, ppc_avr_t)
 164
 165#define VMSTATE_AVR_ARRAY(_f, _s, _n)                             \
 166    VMSTATE_AVR_ARRAY_V(_f, _s, _n, 0)
 167
 168static int get_fpr(QEMUFile *f, void *pv, size_t size,
 169                   const VMStateField *field)
 170{
 171    ppc_vsr_t *v = pv;
 172
 173    v->VsrD(0) = qemu_get_be64(f);
 174
 175    return 0;
 176}
 177
 178static int put_fpr(QEMUFile *f, void *pv, size_t size,
 179                   const VMStateField *field, JSONWriter *vmdesc)
 180{
 181    ppc_vsr_t *v = pv;
 182
 183    qemu_put_be64(f, v->VsrD(0));
 184    return 0;
 185}
 186
 187static const VMStateInfo vmstate_info_fpr = {
 188    .name = "fpr",
 189    .get  = get_fpr,
 190    .put  = put_fpr,
 191};
 192
 193#define VMSTATE_FPR_ARRAY_V(_f, _s, _n, _v)                       \
 194    VMSTATE_SUB_ARRAY(_f, _s, 0, _n, _v, vmstate_info_fpr, ppc_vsr_t)
 195
 196#define VMSTATE_FPR_ARRAY(_f, _s, _n)                             \
 197    VMSTATE_FPR_ARRAY_V(_f, _s, _n, 0)
 198
 199static int get_vsr(QEMUFile *f, void *pv, size_t size,
 200                   const VMStateField *field)
 201{
 202    ppc_vsr_t *v = pv;
 203
 204    v->VsrD(1) = qemu_get_be64(f);
 205
 206    return 0;
 207}
 208
 209static int put_vsr(QEMUFile *f, void *pv, size_t size,
 210                   const VMStateField *field, JSONWriter *vmdesc)
 211{
 212    ppc_vsr_t *v = pv;
 213
 214    qemu_put_be64(f, v->VsrD(1));
 215    return 0;
 216}
 217
 218static const VMStateInfo vmstate_info_vsr = {
 219    .name = "vsr",
 220    .get  = get_vsr,
 221    .put  = put_vsr,
 222};
 223
 224#define VMSTATE_VSR_ARRAY_V(_f, _s, _n, _v)                       \
 225    VMSTATE_SUB_ARRAY(_f, _s, 0, _n, _v, vmstate_info_vsr, ppc_vsr_t)
 226
 227#define VMSTATE_VSR_ARRAY(_f, _s, _n)                             \
 228    VMSTATE_VSR_ARRAY_V(_f, _s, _n, 0)
 229
 230static bool cpu_pre_2_8_migration(void *opaque, int version_id)
 231{
 232    PowerPCCPU *cpu = opaque;
 233
 234    return cpu->pre_2_8_migration;
 235}
 236
 237#if defined(TARGET_PPC64)
 238static bool cpu_pre_3_0_migration(void *opaque, int version_id)
 239{
 240    PowerPCCPU *cpu = opaque;
 241
 242    return cpu->pre_3_0_migration;
 243}
 244#endif
 245
 246static int cpu_pre_save(void *opaque)
 247{
 248    PowerPCCPU *cpu = opaque;
 249    CPUPPCState *env = &cpu->env;
 250    int i;
 251    uint64_t insns_compat_mask =
 252        PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB
 253        | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES
 254        | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_FRSQRTES
 255        | PPC_FLOAT_STFIWX | PPC_FLOAT_EXT
 256        | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ
 257        | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC
 258        | PPC_64B | PPC_64BX | PPC_ALTIVEC
 259        | PPC_SEGMENT_64B | PPC_SLBI | PPC_POPCNTB | PPC_POPCNTWD;
 260    uint64_t insns_compat_mask2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX
 261        | PPC2_PERM_ISA206 | PPC2_DIVE_ISA206
 262        | PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206
 263        | PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207
 264        | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207
 265        | PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | PPC2_TM;
 266
 267    env->spr[SPR_LR] = env->lr;
 268    env->spr[SPR_CTR] = env->ctr;
 269    env->spr[SPR_XER] = cpu_read_xer(env);
 270#if defined(TARGET_PPC64)
 271    env->spr[SPR_CFAR] = env->cfar;
 272#endif
 273    env->spr[SPR_BOOKE_SPEFSCR] = env->spe_fscr;
 274
 275    for (i = 0; (i < 4) && (i < env->nb_BATs); i++) {
 276        env->spr[SPR_DBAT0U + 2 * i] = env->DBAT[0][i];
 277        env->spr[SPR_DBAT0U + 2 * i + 1] = env->DBAT[1][i];
 278        env->spr[SPR_IBAT0U + 2 * i] = env->IBAT[0][i];
 279        env->spr[SPR_IBAT0U + 2 * i + 1] = env->IBAT[1][i];
 280    }
 281    for (i = 0; (i < 4) && ((i + 4) < env->nb_BATs); i++) {
 282        env->spr[SPR_DBAT4U + 2 * i] = env->DBAT[0][i + 4];
 283        env->spr[SPR_DBAT4U + 2 * i + 1] = env->DBAT[1][i + 4];
 284        env->spr[SPR_IBAT4U + 2 * i] = env->IBAT[0][i + 4];
 285        env->spr[SPR_IBAT4U + 2 * i + 1] = env->IBAT[1][i + 4];
 286    }
 287
 288    /* Hacks for migration compatibility between 2.6, 2.7 & 2.8 */
 289    if (cpu->pre_2_8_migration) {
 290        /*
 291         * Mask out bits that got added to msr_mask since the versions
 292         * which stupidly included it in the migration stream.
 293         */
 294        target_ulong metamask = 0
 295#if defined(TARGET_PPC64)
 296            | (1ULL << MSR_TS0)
 297            | (1ULL << MSR_TS1)
 298#endif
 299            ;
 300        cpu->mig_msr_mask = env->msr_mask & ~metamask;
 301        cpu->mig_insns_flags = env->insns_flags & insns_compat_mask;
 302        /*
 303         * CPU models supported by old machines all have
 304         * PPC_MEM_TLBIE, so we set it unconditionally to allow
 305         * backward migration from a POWER9 host to a POWER8 host.
 306         */
 307        cpu->mig_insns_flags |= PPC_MEM_TLBIE;
 308        cpu->mig_insns_flags2 = env->insns_flags2 & insns_compat_mask2;
 309        cpu->mig_nb_BATs = env->nb_BATs;
 310    }
 311    if (cpu->pre_3_0_migration) {
 312        if (cpu->hash64_opts) {
 313            cpu->mig_slb_nr = cpu->hash64_opts->slb_size;
 314        }
 315    }
 316
 317    /* Retain migration compatibility for pre 6.0 for 601 machines. */
 318    env->hflags_compat_nmsr = (env->flags & POWERPC_FLAG_HID0_LE
 319                               ? env->hflags & MSR_LE : 0);
 320
 321    return 0;
 322}
 323
 324/*
 325 * Determine if a given PVR is a "close enough" match to the CPU
 326 * object.  For TCG and KVM PR it would probably be sufficient to
 327 * require an exact PVR match.  However for KVM HV the user is
 328 * restricted to a PVR exactly matching the host CPU.  The correct way
 329 * to handle this is to put the guest into an architected
 330 * compatibility mode.  However, to allow a more forgiving transition
 331 * and migration from before this was widely done, we allow migration
 332 * between sufficiently similar PVRs, as determined by the CPU class's
 333 * pvr_match() hook.
 334 */
 335static bool pvr_match(PowerPCCPU *cpu, uint32_t pvr)
 336{
 337    PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
 338
 339    if (pvr == pcc->pvr) {
 340        return true;
 341    }
 342    return pcc->pvr_match(pcc, pvr);
 343}
 344
 345static int cpu_post_load(void *opaque, int version_id)
 346{
 347    PowerPCCPU *cpu = opaque;
 348    CPUPPCState *env = &cpu->env;
 349    int i;
 350
 351    /*
 352     * If we're operating in compat mode, we should be ok as long as
 353     * the destination supports the same compatibility mode.
 354     *
 355     * Otherwise, however, we require that the destination has exactly
 356     * the same CPU model as the source.
 357     */
 358
 359#if defined(TARGET_PPC64)
 360    if (cpu->compat_pvr) {
 361        uint32_t compat_pvr = cpu->compat_pvr;
 362        Error *local_err = NULL;
 363        int ret;
 364
 365        cpu->compat_pvr = 0;
 366        ret = ppc_set_compat(cpu, compat_pvr, &local_err);
 367        if (ret < 0) {
 368            error_report_err(local_err);
 369            return ret;
 370        }
 371    } else
 372#endif
 373    {
 374        if (!pvr_match(cpu, env->spr[SPR_PVR])) {
 375            return -EINVAL;
 376        }
 377    }
 378
 379    /*
 380     * If we're running with KVM HV, there is a chance that the guest
 381     * is running with KVM HV and its kernel does not have the
 382     * capability of dealing with a different PVR other than this
 383     * exact host PVR in KVM_SET_SREGS. If that happens, the
 384     * guest freezes after migration.
 385     *
 386     * The function kvmppc_pvr_workaround_required does this verification
 387     * by first checking if the kernel has the cap, returning true immediately
 388     * if that is the case. Otherwise, it checks if we're running in KVM PR.
 389     * If the guest kernel does not have the cap and we're not running KVM-PR
 390     * (so, it is running KVM-HV), we need to ensure that KVM_SET_SREGS will
 391     * receive the PVR it expects as a workaround.
 392     *
 393     */
 394    if (kvmppc_pvr_workaround_required(cpu)) {
 395        env->spr[SPR_PVR] = env->spr_cb[SPR_PVR].default_value;
 396    }
 397
 398    env->lr = env->spr[SPR_LR];
 399    env->ctr = env->spr[SPR_CTR];
 400    cpu_write_xer(env, env->spr[SPR_XER]);
 401#if defined(TARGET_PPC64)
 402    env->cfar = env->spr[SPR_CFAR];
 403#endif
 404    env->spe_fscr = env->spr[SPR_BOOKE_SPEFSCR];
 405
 406    for (i = 0; (i < 4) && (i < env->nb_BATs); i++) {
 407        env->DBAT[0][i] = env->spr[SPR_DBAT0U + 2 * i];
 408        env->DBAT[1][i] = env->spr[SPR_DBAT0U + 2 * i + 1];
 409        env->IBAT[0][i] = env->spr[SPR_IBAT0U + 2 * i];
 410        env->IBAT[1][i] = env->spr[SPR_IBAT0U + 2 * i + 1];
 411    }
 412    for (i = 0; (i < 4) && ((i + 4) < env->nb_BATs); i++) {
 413        env->DBAT[0][i + 4] = env->spr[SPR_DBAT4U + 2 * i];
 414        env->DBAT[1][i + 4] = env->spr[SPR_DBAT4U + 2 * i + 1];
 415        env->IBAT[0][i + 4] = env->spr[SPR_IBAT4U + 2 * i];
 416        env->IBAT[1][i + 4] = env->spr[SPR_IBAT4U + 2 * i + 1];
 417    }
 418
 419    if (!cpu->vhyp) {
 420        ppc_store_sdr1(env, env->spr[SPR_SDR1]);
 421    }
 422
 423    post_load_update_msr(env);
 424
 425    return 0;
 426}
 427
 428static bool fpu_needed(void *opaque)
 429{
 430    PowerPCCPU *cpu = opaque;
 431
 432    return cpu->env.insns_flags & PPC_FLOAT;
 433}
 434
 435static const VMStateDescription vmstate_fpu = {
 436    .name = "cpu/fpu",
 437    .version_id = 1,
 438    .minimum_version_id = 1,
 439    .needed = fpu_needed,
 440    .fields = (VMStateField[]) {
 441        VMSTATE_FPR_ARRAY(env.vsr, PowerPCCPU, 32),
 442        VMSTATE_UINTTL(env.fpscr, PowerPCCPU),
 443        VMSTATE_END_OF_LIST()
 444    },
 445};
 446
 447static bool altivec_needed(void *opaque)
 448{
 449    PowerPCCPU *cpu = opaque;
 450
 451    return cpu->env.insns_flags & PPC_ALTIVEC;
 452}
 453
 454static int get_vscr(QEMUFile *f, void *opaque, size_t size,
 455                    const VMStateField *field)
 456{
 457    PowerPCCPU *cpu = opaque;
 458    ppc_store_vscr(&cpu->env, qemu_get_be32(f));
 459    return 0;
 460}
 461
 462static int put_vscr(QEMUFile *f, void *opaque, size_t size,
 463                    const VMStateField *field, JSONWriter *vmdesc)
 464{
 465    PowerPCCPU *cpu = opaque;
 466    qemu_put_be32(f, ppc_get_vscr(&cpu->env));
 467    return 0;
 468}
 469
 470static const VMStateInfo vmstate_vscr = {
 471    .name = "cpu/altivec/vscr",
 472    .get = get_vscr,
 473    .put = put_vscr,
 474};
 475
 476static const VMStateDescription vmstate_altivec = {
 477    .name = "cpu/altivec",
 478    .version_id = 1,
 479    .minimum_version_id = 1,
 480    .needed = altivec_needed,
 481    .fields = (VMStateField[]) {
 482        VMSTATE_AVR_ARRAY(env.vsr, PowerPCCPU, 32),
 483        /*
 484         * Save the architecture value of the vscr, not the internally
 485         * expanded version.  Since this architecture value does not
 486         * exist in memory to be stored, this requires a but of hoop
 487         * jumping.  We want OFFSET=0 so that we effectively pass CPU
 488         * to the helper functions.
 489         */
 490        {
 491            .name = "vscr",
 492            .version_id = 0,
 493            .size = sizeof(uint32_t),
 494            .info = &vmstate_vscr,
 495            .flags = VMS_SINGLE,
 496            .offset = 0
 497        },
 498        VMSTATE_END_OF_LIST()
 499    },
 500};
 501
 502static bool vsx_needed(void *opaque)
 503{
 504    PowerPCCPU *cpu = opaque;
 505
 506    return cpu->env.insns_flags2 & PPC2_VSX;
 507}
 508
 509static const VMStateDescription vmstate_vsx = {
 510    .name = "cpu/vsx",
 511    .version_id = 1,
 512    .minimum_version_id = 1,
 513    .needed = vsx_needed,
 514    .fields = (VMStateField[]) {
 515        VMSTATE_VSR_ARRAY(env.vsr, PowerPCCPU, 32),
 516        VMSTATE_END_OF_LIST()
 517    },
 518};
 519
 520#ifdef TARGET_PPC64
 521/* Transactional memory state */
 522static bool tm_needed(void *opaque)
 523{
 524    PowerPCCPU *cpu = opaque;
 525    CPUPPCState *env = &cpu->env;
 526    return msr_ts;
 527}
 528
 529static const VMStateDescription vmstate_tm = {
 530    .name = "cpu/tm",
 531    .version_id = 1,
 532    .minimum_version_id = 1,
 533    .minimum_version_id_old = 1,
 534    .needed = tm_needed,
 535    .fields      = (VMStateField []) {
 536        VMSTATE_UINTTL_ARRAY(env.tm_gpr, PowerPCCPU, 32),
 537        VMSTATE_AVR_ARRAY(env.tm_vsr, PowerPCCPU, 64),
 538        VMSTATE_UINT64(env.tm_cr, PowerPCCPU),
 539        VMSTATE_UINT64(env.tm_lr, PowerPCCPU),
 540        VMSTATE_UINT64(env.tm_ctr, PowerPCCPU),
 541        VMSTATE_UINT64(env.tm_fpscr, PowerPCCPU),
 542        VMSTATE_UINT64(env.tm_amr, PowerPCCPU),
 543        VMSTATE_UINT64(env.tm_ppr, PowerPCCPU),
 544        VMSTATE_UINT64(env.tm_vrsave, PowerPCCPU),
 545        VMSTATE_UINT32(env.tm_vscr, PowerPCCPU),
 546        VMSTATE_UINT64(env.tm_dscr, PowerPCCPU),
 547        VMSTATE_UINT64(env.tm_tar, PowerPCCPU),
 548        VMSTATE_END_OF_LIST()
 549    },
 550};
 551#endif
 552
 553static bool sr_needed(void *opaque)
 554{
 555#ifdef TARGET_PPC64
 556    PowerPCCPU *cpu = opaque;
 557
 558    return !mmu_is_64bit(cpu->env.mmu_model);
 559#else
 560    return true;
 561#endif
 562}
 563
 564static const VMStateDescription vmstate_sr = {
 565    .name = "cpu/sr",
 566    .version_id = 1,
 567    .minimum_version_id = 1,
 568    .needed = sr_needed,
 569    .fields = (VMStateField[]) {
 570        VMSTATE_UINTTL_ARRAY(env.sr, PowerPCCPU, 32),
 571        VMSTATE_END_OF_LIST()
 572    },
 573};
 574
 575#ifdef TARGET_PPC64
 576static int get_slbe(QEMUFile *f, void *pv, size_t size,
 577                    const VMStateField *field)
 578{
 579    ppc_slb_t *v = pv;
 580
 581    v->esid = qemu_get_be64(f);
 582    v->vsid = qemu_get_be64(f);
 583
 584    return 0;
 585}
 586
 587static int put_slbe(QEMUFile *f, void *pv, size_t size,
 588                    const VMStateField *field, JSONWriter *vmdesc)
 589{
 590    ppc_slb_t *v = pv;
 591
 592    qemu_put_be64(f, v->esid);
 593    qemu_put_be64(f, v->vsid);
 594    return 0;
 595}
 596
 597static const VMStateInfo vmstate_info_slbe = {
 598    .name = "slbe",
 599    .get  = get_slbe,
 600    .put  = put_slbe,
 601};
 602
 603#define VMSTATE_SLB_ARRAY_V(_f, _s, _n, _v)                       \
 604    VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_slbe, ppc_slb_t)
 605
 606#define VMSTATE_SLB_ARRAY(_f, _s, _n)                             \
 607    VMSTATE_SLB_ARRAY_V(_f, _s, _n, 0)
 608
 609static bool slb_needed(void *opaque)
 610{
 611    PowerPCCPU *cpu = opaque;
 612
 613    /* We don't support any of the old segment table based 64-bit CPUs */
 614    return mmu_is_64bit(cpu->env.mmu_model);
 615}
 616
 617static int slb_post_load(void *opaque, int version_id)
 618{
 619    PowerPCCPU *cpu = opaque;
 620    CPUPPCState *env = &cpu->env;
 621    int i;
 622
 623    /*
 624     * We've pulled in the raw esid and vsid values from the migration
 625     * stream, but we need to recompute the page size pointers
 626     */
 627    for (i = 0; i < cpu->hash64_opts->slb_size; i++) {
 628        if (ppc_store_slb(cpu, i, env->slb[i].esid, env->slb[i].vsid) < 0) {
 629            /* Migration source had bad values in its SLB */
 630            return -1;
 631        }
 632    }
 633
 634    return 0;
 635}
 636
 637static const VMStateDescription vmstate_slb = {
 638    .name = "cpu/slb",
 639    .version_id = 1,
 640    .minimum_version_id = 1,
 641    .needed = slb_needed,
 642    .post_load = slb_post_load,
 643    .fields = (VMStateField[]) {
 644        VMSTATE_INT32_TEST(mig_slb_nr, PowerPCCPU, cpu_pre_3_0_migration),
 645        VMSTATE_SLB_ARRAY(env.slb, PowerPCCPU, MAX_SLB_ENTRIES),
 646        VMSTATE_END_OF_LIST()
 647    }
 648};
 649#endif /* TARGET_PPC64 */
 650
 651static const VMStateDescription vmstate_tlb6xx_entry = {
 652    .name = "cpu/tlb6xx_entry",
 653    .version_id = 1,
 654    .minimum_version_id = 1,
 655    .fields = (VMStateField[]) {
 656        VMSTATE_UINTTL(pte0, ppc6xx_tlb_t),
 657        VMSTATE_UINTTL(pte1, ppc6xx_tlb_t),
 658        VMSTATE_UINTTL(EPN, ppc6xx_tlb_t),
 659        VMSTATE_END_OF_LIST()
 660    },
 661};
 662
 663static bool tlb6xx_needed(void *opaque)
 664{
 665    PowerPCCPU *cpu = opaque;
 666    CPUPPCState *env = &cpu->env;
 667
 668    return env->nb_tlb && (env->tlb_type == TLB_6XX);
 669}
 670
 671static const VMStateDescription vmstate_tlb6xx = {
 672    .name = "cpu/tlb6xx",
 673    .version_id = 1,
 674    .minimum_version_id = 1,
 675    .needed = tlb6xx_needed,
 676    .fields = (VMStateField[]) {
 677        VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL),
 678        VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlb6, PowerPCCPU,
 679                                            env.nb_tlb,
 680                                            vmstate_tlb6xx_entry,
 681                                            ppc6xx_tlb_t),
 682        VMSTATE_UINTTL_ARRAY(env.tgpr, PowerPCCPU, 4),
 683        VMSTATE_END_OF_LIST()
 684    }
 685};
 686
 687static const VMStateDescription vmstate_tlbemb_entry = {
 688    .name = "cpu/tlbemb_entry",
 689    .version_id = 1,
 690    .minimum_version_id = 1,
 691    .fields = (VMStateField[]) {
 692        VMSTATE_UINT64(RPN, ppcemb_tlb_t),
 693        VMSTATE_UINTTL(EPN, ppcemb_tlb_t),
 694        VMSTATE_UINTTL(PID, ppcemb_tlb_t),
 695        VMSTATE_UINTTL(size, ppcemb_tlb_t),
 696        VMSTATE_UINT32(prot, ppcemb_tlb_t),
 697        VMSTATE_UINT32(attr, ppcemb_tlb_t),
 698        VMSTATE_END_OF_LIST()
 699    },
 700};
 701
 702static bool tlbemb_needed(void *opaque)
 703{
 704    PowerPCCPU *cpu = opaque;
 705    CPUPPCState *env = &cpu->env;
 706
 707    return env->nb_tlb && (env->tlb_type == TLB_EMB);
 708}
 709
 710static bool pbr403_needed(void *opaque)
 711{
 712    PowerPCCPU *cpu = opaque;
 713    uint32_t pvr = cpu->env.spr[SPR_PVR];
 714
 715    return (pvr & 0xffff0000) == 0x00200000;
 716}
 717
 718static const VMStateDescription vmstate_pbr403 = {
 719    .name = "cpu/pbr403",
 720    .version_id = 1,
 721    .minimum_version_id = 1,
 722    .needed = pbr403_needed,
 723    .fields = (VMStateField[]) {
 724        VMSTATE_UINTTL_ARRAY(env.pb, PowerPCCPU, 4),
 725        VMSTATE_END_OF_LIST()
 726    },
 727};
 728
 729static const VMStateDescription vmstate_tlbemb = {
 730    .name = "cpu/tlb6xx",
 731    .version_id = 1,
 732    .minimum_version_id = 1,
 733    .needed = tlbemb_needed,
 734    .fields = (VMStateField[]) {
 735        VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL),
 736        VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbe, PowerPCCPU,
 737                                            env.nb_tlb,
 738                                            vmstate_tlbemb_entry,
 739                                            ppcemb_tlb_t),
 740        /* 403 protection registers */
 741        VMSTATE_END_OF_LIST()
 742    },
 743    .subsections = (const VMStateDescription*[]) {
 744        &vmstate_pbr403,
 745        NULL
 746    }
 747};
 748
 749static const VMStateDescription vmstate_tlbmas_entry = {
 750    .name = "cpu/tlbmas_entry",
 751    .version_id = 1,
 752    .minimum_version_id = 1,
 753    .fields = (VMStateField[]) {
 754        VMSTATE_UINT32(mas8, ppcmas_tlb_t),
 755        VMSTATE_UINT32(mas1, ppcmas_tlb_t),
 756        VMSTATE_UINT64(mas2, ppcmas_tlb_t),
 757        VMSTATE_UINT64(mas7_3, ppcmas_tlb_t),
 758        VMSTATE_END_OF_LIST()
 759    },
 760};
 761
 762static bool tlbmas_needed(void *opaque)
 763{
 764    PowerPCCPU *cpu = opaque;
 765    CPUPPCState *env = &cpu->env;
 766
 767    return env->nb_tlb && (env->tlb_type == TLB_MAS);
 768}
 769
 770static const VMStateDescription vmstate_tlbmas = {
 771    .name = "cpu/tlbmas",
 772    .version_id = 1,
 773    .minimum_version_id = 1,
 774    .needed = tlbmas_needed,
 775    .fields = (VMStateField[]) {
 776        VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL),
 777        VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbm, PowerPCCPU,
 778                                            env.nb_tlb,
 779                                            vmstate_tlbmas_entry,
 780                                            ppcmas_tlb_t),
 781        VMSTATE_END_OF_LIST()
 782    }
 783};
 784
 785static bool compat_needed(void *opaque)
 786{
 787    PowerPCCPU *cpu = opaque;
 788
 789    assert(!(cpu->compat_pvr && !cpu->vhyp));
 790    return !cpu->pre_2_10_migration && cpu->compat_pvr != 0;
 791}
 792
 793static const VMStateDescription vmstate_compat = {
 794    .name = "cpu/compat",
 795    .version_id = 1,
 796    .minimum_version_id = 1,
 797    .needed = compat_needed,
 798    .fields = (VMStateField[]) {
 799        VMSTATE_UINT32(compat_pvr, PowerPCCPU),
 800        VMSTATE_END_OF_LIST()
 801    }
 802};
 803
 804const VMStateDescription vmstate_ppc_cpu = {
 805    .name = "cpu",
 806    .version_id = 5,
 807    .minimum_version_id = 5,
 808    .minimum_version_id_old = 4,
 809    .load_state_old = cpu_load_old,
 810    .pre_save = cpu_pre_save,
 811    .post_load = cpu_post_load,
 812    .fields = (VMStateField[]) {
 813        VMSTATE_UNUSED(sizeof(target_ulong)), /* was _EQUAL(env.spr[SPR_PVR]) */
 814
 815        /* User mode architected state */
 816        VMSTATE_UINTTL_ARRAY(env.gpr, PowerPCCPU, 32),
 817#if !defined(TARGET_PPC64)
 818        VMSTATE_UINTTL_ARRAY(env.gprh, PowerPCCPU, 32),
 819#endif
 820        VMSTATE_UINT32_ARRAY(env.crf, PowerPCCPU, 8),
 821        VMSTATE_UINTTL(env.nip, PowerPCCPU),
 822
 823        /* SPRs */
 824        VMSTATE_UINTTL_ARRAY(env.spr, PowerPCCPU, 1024),
 825        VMSTATE_UINT64(env.spe_acc, PowerPCCPU),
 826
 827        /* Reservation */
 828        VMSTATE_UINTTL(env.reserve_addr, PowerPCCPU),
 829
 830        /* Supervisor mode architected state */
 831        VMSTATE_UINTTL(env.msr, PowerPCCPU),
 832
 833        /* Backward compatible internal state */
 834        VMSTATE_UINTTL(env.hflags_compat_nmsr, PowerPCCPU),
 835
 836        /* Sanity checking */
 837        VMSTATE_UINTTL_TEST(mig_msr_mask, PowerPCCPU, cpu_pre_2_8_migration),
 838        VMSTATE_UINT64_TEST(mig_insns_flags, PowerPCCPU, cpu_pre_2_8_migration),
 839        VMSTATE_UINT64_TEST(mig_insns_flags2, PowerPCCPU,
 840                            cpu_pre_2_8_migration),
 841        VMSTATE_UINT32_TEST(mig_nb_BATs, PowerPCCPU, cpu_pre_2_8_migration),
 842        VMSTATE_END_OF_LIST()
 843    },
 844    .subsections = (const VMStateDescription*[]) {
 845        &vmstate_fpu,
 846        &vmstate_altivec,
 847        &vmstate_vsx,
 848        &vmstate_sr,
 849#ifdef TARGET_PPC64
 850        &vmstate_tm,
 851        &vmstate_slb,
 852#endif /* TARGET_PPC64 */
 853        &vmstate_tlb6xx,
 854        &vmstate_tlbemb,
 855        &vmstate_tlbmas,
 856        &vmstate_compat,
 857        NULL
 858    }
 859};
 860