qemu/target/ppc/mmu-hash64.c
<<
>>
Prefs
   1/*
   2 *  PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
   3 *
   4 *  Copyright (c) 2003-2007 Jocelyn Mayer
   5 *  Copyright (c) 2013 David Gibson, IBM Corporation
   6 *
   7 * This library is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU Lesser General Public
   9 * License as published by the Free Software Foundation; either
  10 * version 2 of the License, or (at your option) any later version.
  11 *
  12 * This library is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * Lesser General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU Lesser General Public
  18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20#include "qemu/osdep.h"
  21#include "cpu.h"
  22#include "exec/exec-all.h"
  23#include "exec/helper-proto.h"
  24#include "qemu/error-report.h"
  25#include "sysemu/hw_accel.h"
  26#include "kvm_ppc.h"
  27#include "mmu-hash64.h"
  28#include "exec/log.h"
  29#include "hw/hw.h"
  30#include "mmu-book3s-v3.h"
  31
  32//#define DEBUG_SLB
  33
  34#ifdef DEBUG_SLB
  35#  define LOG_SLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
  36#else
  37#  define LOG_SLB(...) do { } while (0)
  38#endif
  39
  40/*
  41 * SLB handling
  42 */
  43
  44static ppc_slb_t *slb_lookup(PowerPCCPU *cpu, target_ulong eaddr)
  45{
  46    CPUPPCState *env = &cpu->env;
  47    uint64_t esid_256M, esid_1T;
  48    int n;
  49
  50    LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr);
  51
  52    esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V;
  53    esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V;
  54
  55    for (n = 0; n < env->slb_nr; n++) {
  56        ppc_slb_t *slb = &env->slb[n];
  57
  58        LOG_SLB("%s: slot %d %016" PRIx64 " %016"
  59                    PRIx64 "\n", __func__, n, slb->esid, slb->vsid);
  60        /* We check for 1T matches on all MMUs here - if the MMU
  61         * doesn't have 1T segment support, we will have prevented 1T
  62         * entries from being inserted in the slbmte code. */
  63        if (((slb->esid == esid_256M) &&
  64             ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M))
  65            || ((slb->esid == esid_1T) &&
  66                ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) {
  67            return slb;
  68        }
  69    }
  70
  71    return NULL;
  72}
  73
  74void dump_slb(FILE *f, fprintf_function cpu_fprintf, PowerPCCPU *cpu)
  75{
  76    CPUPPCState *env = &cpu->env;
  77    int i;
  78    uint64_t slbe, slbv;
  79
  80    cpu_synchronize_state(CPU(cpu));
  81
  82    cpu_fprintf(f, "SLB\tESID\t\t\tVSID\n");
  83    for (i = 0; i < env->slb_nr; i++) {
  84        slbe = env->slb[i].esid;
  85        slbv = env->slb[i].vsid;
  86        if (slbe == 0 && slbv == 0) {
  87            continue;
  88        }
  89        cpu_fprintf(f, "%d\t0x%016" PRIx64 "\t0x%016" PRIx64 "\n",
  90                    i, slbe, slbv);
  91    }
  92}
  93
  94void helper_slbia(CPUPPCState *env)
  95{
  96    int n;
  97
  98    /* XXX: Warning: slbia never invalidates the first segment */
  99    for (n = 1; n < env->slb_nr; n++) {
 100        ppc_slb_t *slb = &env->slb[n];
 101
 102        if (slb->esid & SLB_ESID_V) {
 103            slb->esid &= ~SLB_ESID_V;
 104            /* XXX: given the fact that segment size is 256 MB or 1TB,
 105             *      and we still don't have a tlb_flush_mask(env, n, mask)
 106             *      in QEMU, we just invalidate all TLBs
 107             */
 108            env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
 109        }
 110    }
 111}
 112
 113static void __helper_slbie(CPUPPCState *env, target_ulong addr,
 114                           target_ulong global)
 115{
 116    PowerPCCPU *cpu = ppc_env_get_cpu(env);
 117    ppc_slb_t *slb;
 118
 119    slb = slb_lookup(cpu, addr);
 120    if (!slb) {
 121        return;
 122    }
 123
 124    if (slb->esid & SLB_ESID_V) {
 125        slb->esid &= ~SLB_ESID_V;
 126
 127        /* XXX: given the fact that segment size is 256 MB or 1TB,
 128         *      and we still don't have a tlb_flush_mask(env, n, mask)
 129         *      in QEMU, we just invalidate all TLBs
 130         */
 131        env->tlb_need_flush |=
 132            (global == false ? TLB_NEED_LOCAL_FLUSH : TLB_NEED_GLOBAL_FLUSH);
 133    }
 134}
 135
 136void helper_slbie(CPUPPCState *env, target_ulong addr)
 137{
 138    __helper_slbie(env, addr, false);
 139}
 140
 141void helper_slbieg(CPUPPCState *env, target_ulong addr)
 142{
 143    __helper_slbie(env, addr, true);
 144}
 145
 146int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
 147                  target_ulong esid, target_ulong vsid)
 148{
 149    CPUPPCState *env = &cpu->env;
 150    ppc_slb_t *slb = &env->slb[slot];
 151    const struct ppc_one_seg_page_size *sps = NULL;
 152    int i;
 153
 154    if (slot >= env->slb_nr) {
 155        return -1; /* Bad slot number */
 156    }
 157    if (esid & ~(SLB_ESID_ESID | SLB_ESID_V)) {
 158        return -1; /* Reserved bits set */
 159    }
 160    if (vsid & (SLB_VSID_B & ~SLB_VSID_B_1T)) {
 161        return -1; /* Bad segment size */
 162    }
 163    if ((vsid & SLB_VSID_B) && !(env->mmu_model & POWERPC_MMU_1TSEG)) {
 164        return -1; /* 1T segment on MMU that doesn't support it */
 165    }
 166
 167    for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
 168        const struct ppc_one_seg_page_size *sps1 = &env->sps.sps[i];
 169
 170        if (!sps1->page_shift) {
 171            break;
 172        }
 173
 174        if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) {
 175            sps = sps1;
 176            break;
 177        }
 178    }
 179
 180    if (!sps) {
 181        error_report("Bad page size encoding in SLB store: slot "TARGET_FMT_lu
 182                     " esid 0x"TARGET_FMT_lx" vsid 0x"TARGET_FMT_lx,
 183                     slot, esid, vsid);
 184        return -1;
 185    }
 186
 187    slb->esid = esid;
 188    slb->vsid = vsid;
 189    slb->sps = sps;
 190
 191    LOG_SLB("%s: " TARGET_FMT_lu " " TARGET_FMT_lx " - " TARGET_FMT_lx
 192            " => %016" PRIx64 " %016" PRIx64 "\n", __func__, slot, esid, vsid,
 193            slb->esid, slb->vsid);
 194
 195    return 0;
 196}
 197
 198static int ppc_load_slb_esid(PowerPCCPU *cpu, target_ulong rb,
 199                             target_ulong *rt)
 200{
 201    CPUPPCState *env = &cpu->env;
 202    int slot = rb & 0xfff;
 203    ppc_slb_t *slb = &env->slb[slot];
 204
 205    if (slot >= env->slb_nr) {
 206        return -1;
 207    }
 208
 209    *rt = slb->esid;
 210    return 0;
 211}
 212
 213static int ppc_load_slb_vsid(PowerPCCPU *cpu, target_ulong rb,
 214                             target_ulong *rt)
 215{
 216    CPUPPCState *env = &cpu->env;
 217    int slot = rb & 0xfff;
 218    ppc_slb_t *slb = &env->slb[slot];
 219
 220    if (slot >= env->slb_nr) {
 221        return -1;
 222    }
 223
 224    *rt = slb->vsid;
 225    return 0;
 226}
 227
 228static int ppc_find_slb_vsid(PowerPCCPU *cpu, target_ulong rb,
 229                             target_ulong *rt)
 230{
 231    CPUPPCState *env = &cpu->env;
 232    ppc_slb_t *slb;
 233
 234    if (!msr_is_64bit(env, env->msr)) {
 235        rb &= 0xffffffff;
 236    }
 237    slb = slb_lookup(cpu, rb);
 238    if (slb == NULL) {
 239        *rt = (target_ulong)-1ul;
 240    } else {
 241        *rt = slb->vsid;
 242    }
 243    return 0;
 244}
 245
 246void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
 247{
 248    PowerPCCPU *cpu = ppc_env_get_cpu(env);
 249
 250    if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs) < 0) {
 251        raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
 252                               POWERPC_EXCP_INVAL, GETPC());
 253    }
 254}
 255
 256target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb)
 257{
 258    PowerPCCPU *cpu = ppc_env_get_cpu(env);
 259    target_ulong rt = 0;
 260
 261    if (ppc_load_slb_esid(cpu, rb, &rt) < 0) {
 262        raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
 263                               POWERPC_EXCP_INVAL, GETPC());
 264    }
 265    return rt;
 266}
 267
 268target_ulong helper_find_slb_vsid(CPUPPCState *env, target_ulong rb)
 269{
 270    PowerPCCPU *cpu = ppc_env_get_cpu(env);
 271    target_ulong rt = 0;
 272
 273    if (ppc_find_slb_vsid(cpu, rb, &rt) < 0) {
 274        raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
 275                               POWERPC_EXCP_INVAL, GETPC());
 276    }
 277    return rt;
 278}
 279
 280target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb)
 281{
 282    PowerPCCPU *cpu = ppc_env_get_cpu(env);
 283    target_ulong rt = 0;
 284
 285    if (ppc_load_slb_vsid(cpu, rb, &rt) < 0) {
 286        raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
 287                               POWERPC_EXCP_INVAL, GETPC());
 288    }
 289    return rt;
 290}
 291
 292/* Check No-Execute or Guarded Storage */
 293static inline int ppc_hash64_pte_noexec_guard(PowerPCCPU *cpu,
 294                                              ppc_hash_pte64_t pte)
 295{
 296    /* Exec permissions CANNOT take away read or write permissions */
 297    return (pte.pte1 & HPTE64_R_N) || (pte.pte1 & HPTE64_R_G) ?
 298            PAGE_READ | PAGE_WRITE : PAGE_READ | PAGE_WRITE | PAGE_EXEC;
 299}
 300
 301/* Check Basic Storage Protection */
 302static int ppc_hash64_pte_prot(PowerPCCPU *cpu,
 303                               ppc_slb_t *slb, ppc_hash_pte64_t pte)
 304{
 305    CPUPPCState *env = &cpu->env;
 306    unsigned pp, key;
 307    /* Some pp bit combinations have undefined behaviour, so default
 308     * to no access in those cases */
 309    int prot = 0;
 310
 311    key = !!(msr_pr ? (slb->vsid & SLB_VSID_KP)
 312             : (slb->vsid & SLB_VSID_KS));
 313    pp = (pte.pte1 & HPTE64_R_PP) | ((pte.pte1 & HPTE64_R_PP0) >> 61);
 314
 315    if (key == 0) {
 316        switch (pp) {
 317        case 0x0:
 318        case 0x1:
 319        case 0x2:
 320            prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
 321            break;
 322
 323        case 0x3:
 324        case 0x6:
 325            prot = PAGE_READ | PAGE_EXEC;
 326            break;
 327        }
 328    } else {
 329        switch (pp) {
 330        case 0x0:
 331        case 0x6:
 332            break;
 333
 334        case 0x1:
 335        case 0x3:
 336            prot = PAGE_READ | PAGE_EXEC;
 337            break;
 338
 339        case 0x2:
 340            prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
 341            break;
 342        }
 343    }
 344
 345    return prot;
 346}
 347
 348/* Check the instruction access permissions specified in the IAMR */
 349static int ppc_hash64_iamr_prot(PowerPCCPU *cpu, int key)
 350{
 351    CPUPPCState *env = &cpu->env;
 352    int iamr_bits = (env->spr[SPR_IAMR] >> 2 * (31 - key)) & 0x3;
 353
 354    /*
 355     * An instruction fetch is permitted if the IAMR bit is 0.
 356     * If the bit is set, return PAGE_READ | PAGE_WRITE because this bit
 357     * can only take away EXEC permissions not READ or WRITE permissions.
 358     * If bit is cleared return PAGE_READ | PAGE_WRITE | PAGE_EXEC since
 359     * EXEC permissions are allowed.
 360     */
 361    return (iamr_bits & 0x1) ? PAGE_READ | PAGE_WRITE :
 362                               PAGE_READ | PAGE_WRITE | PAGE_EXEC;
 363}
 364
 365static int ppc_hash64_amr_prot(PowerPCCPU *cpu, ppc_hash_pte64_t pte)
 366{
 367    CPUPPCState *env = &cpu->env;
 368    int key, amrbits;
 369    int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
 370
 371    /* Only recent MMUs implement Virtual Page Class Key Protection */
 372    if (!(env->mmu_model & POWERPC_MMU_AMR)) {
 373        return prot;
 374    }
 375
 376    key = HPTE64_R_KEY(pte.pte1);
 377    amrbits = (env->spr[SPR_AMR] >> 2*(31 - key)) & 0x3;
 378
 379    /* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */
 380    /*         env->spr[SPR_AMR]); */
 381
 382    /*
 383     * A store is permitted if the AMR bit is 0. Remove write
 384     * protection if it is set.
 385     */
 386    if (amrbits & 0x2) {
 387        prot &= ~PAGE_WRITE;
 388    }
 389    /*
 390     * A load is permitted if the AMR bit is 0. Remove read
 391     * protection if it is set.
 392     */
 393    if (amrbits & 0x1) {
 394        prot &= ~PAGE_READ;
 395    }
 396
 397    switch (env->mmu_model) {
 398    /*
 399     * MMU version 2.07 and later support IAMR
 400     * Check if the IAMR allows the instruction access - it will return
 401     * PAGE_EXEC if it doesn't (and thus that bit will be cleared) or 0
 402     * if it does (and prot will be unchanged indicating execution support).
 403     */
 404    case POWERPC_MMU_2_07:
 405    case POWERPC_MMU_3_00:
 406        prot &= ppc_hash64_iamr_prot(cpu, key);
 407        break;
 408    default:
 409        break;
 410    }
 411
 412    return prot;
 413}
 414
 415const ppc_hash_pte64_t *ppc_hash64_map_hptes(PowerPCCPU *cpu,
 416                                             hwaddr ptex, int n)
 417{
 418    hwaddr pte_offset = ptex * HASH_PTE_SIZE_64;
 419    hwaddr base = ppc_hash64_hpt_base(cpu);
 420    hwaddr plen = n * HASH_PTE_SIZE_64;
 421    const ppc_hash_pte64_t *hptes;
 422
 423    if (cpu->vhyp) {
 424        PPCVirtualHypervisorClass *vhc =
 425            PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
 426        return vhc->map_hptes(cpu->vhyp, ptex, n);
 427    }
 428
 429    if (!base) {
 430        return NULL;
 431    }
 432
 433    hptes = address_space_map(CPU(cpu)->as, base + pte_offset, &plen, false);
 434    if (plen < (n * HASH_PTE_SIZE_64)) {
 435        hw_error("%s: Unable to map all requested HPTEs\n", __func__);
 436    }
 437    return hptes;
 438}
 439
 440void ppc_hash64_unmap_hptes(PowerPCCPU *cpu, const ppc_hash_pte64_t *hptes,
 441                            hwaddr ptex, int n)
 442{
 443    if (cpu->vhyp) {
 444        PPCVirtualHypervisorClass *vhc =
 445            PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
 446        vhc->unmap_hptes(cpu->vhyp, hptes, ptex, n);
 447        return;
 448    }
 449
 450    address_space_unmap(CPU(cpu)->as, (void *)hptes, n * HASH_PTE_SIZE_64,
 451                        false, n * HASH_PTE_SIZE_64);
 452}
 453
 454static unsigned hpte_page_shift(const struct ppc_one_seg_page_size *sps,
 455    uint64_t pte0, uint64_t pte1)
 456{
 457    int i;
 458
 459    if (!(pte0 & HPTE64_V_LARGE)) {
 460        if (sps->page_shift != 12) {
 461            /* 4kiB page in a non 4kiB segment */
 462            return 0;
 463        }
 464        /* Normal 4kiB page */
 465        return 12;
 466    }
 467
 468    for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
 469        const struct ppc_one_page_size *ps = &sps->enc[i];
 470        uint64_t mask;
 471
 472        if (!ps->page_shift) {
 473            break;
 474        }
 475
 476        if (ps->page_shift == 12) {
 477            /* L bit is set so this can't be a 4kiB page */
 478            continue;
 479        }
 480
 481        mask = ((1ULL << ps->page_shift) - 1) & HPTE64_R_RPN;
 482
 483        if ((pte1 & mask) == ((uint64_t)ps->pte_enc << HPTE64_R_RPN_SHIFT)) {
 484            return ps->page_shift;
 485        }
 486    }
 487
 488    return 0; /* Bad page size encoding */
 489}
 490
 491static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash,
 492                                     const struct ppc_one_seg_page_size *sps,
 493                                     target_ulong ptem,
 494                                     ppc_hash_pte64_t *pte, unsigned *pshift)
 495{
 496    int i;
 497    const ppc_hash_pte64_t *pteg;
 498    target_ulong pte0, pte1;
 499    target_ulong ptex;
 500
 501    ptex = (hash & ppc_hash64_hpt_mask(cpu)) * HPTES_PER_GROUP;
 502    pteg = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP);
 503    if (!pteg) {
 504        return -1;
 505    }
 506    for (i = 0; i < HPTES_PER_GROUP; i++) {
 507        pte0 = ppc_hash64_hpte0(cpu, pteg, i);
 508        pte1 = ppc_hash64_hpte1(cpu, pteg, i);
 509
 510        /* This compares V, B, H (secondary) and the AVPN */
 511        if (HPTE64_V_COMPARE(pte0, ptem)) {
 512            *pshift = hpte_page_shift(sps, pte0, pte1);
 513            /*
 514             * If there is no match, ignore the PTE, it could simply
 515             * be for a different segment size encoding and the
 516             * architecture specifies we should not match. Linux will
 517             * potentially leave behind PTEs for the wrong base page
 518             * size when demoting segments.
 519             */
 520            if (*pshift == 0) {
 521                continue;
 522            }
 523            /* We don't do anything with pshift yet as qemu TLB only deals
 524             * with 4K pages anyway
 525             */
 526            pte->pte0 = pte0;
 527            pte->pte1 = pte1;
 528            ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP);
 529            return ptex + i;
 530        }
 531    }
 532    ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP);
 533    /*
 534     * We didn't find a valid entry.
 535     */
 536    return -1;
 537}
 538
 539static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
 540                                     ppc_slb_t *slb, target_ulong eaddr,
 541                                     ppc_hash_pte64_t *pte, unsigned *pshift)
 542{
 543    CPUPPCState *env = &cpu->env;
 544    hwaddr hash, ptex;
 545    uint64_t vsid, epnmask, epn, ptem;
 546    const struct ppc_one_seg_page_size *sps = slb->sps;
 547
 548    /* The SLB store path should prevent any bad page size encodings
 549     * getting in there, so: */
 550    assert(sps);
 551
 552    /* If ISL is set in LPCR we need to clamp the page size to 4K */
 553    if (env->spr[SPR_LPCR] & LPCR_ISL) {
 554        /* We assume that when using TCG, 4k is first entry of SPS */
 555        sps = &env->sps.sps[0];
 556        assert(sps->page_shift == 12);
 557    }
 558
 559    epnmask = ~((1ULL << sps->page_shift) - 1);
 560
 561    if (slb->vsid & SLB_VSID_B) {
 562        /* 1TB segment */
 563        vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T;
 564        epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask;
 565        hash = vsid ^ (vsid << 25) ^ (epn >> sps->page_shift);
 566    } else {
 567        /* 256M segment */
 568        vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT;
 569        epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask;
 570        hash = vsid ^ (epn >> sps->page_shift);
 571    }
 572    ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN);
 573    ptem |= HPTE64_V_VALID;
 574
 575    /* Page address translation */
 576    qemu_log_mask(CPU_LOG_MMU,
 577            "htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx
 578            " hash " TARGET_FMT_plx "\n",
 579            ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu), hash);
 580
 581    /* Primary PTEG lookup */
 582    qemu_log_mask(CPU_LOG_MMU,
 583            "0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
 584            " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx
 585            " hash=" TARGET_FMT_plx "\n",
 586            ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu),
 587            vsid, ptem,  hash);
 588    ptex = ppc_hash64_pteg_search(cpu, hash, sps, ptem, pte, pshift);
 589
 590    if (ptex == -1) {
 591        /* Secondary PTEG lookup */
 592        ptem |= HPTE64_V_SECONDARY;
 593        qemu_log_mask(CPU_LOG_MMU,
 594                "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
 595                " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx
 596                " hash=" TARGET_FMT_plx "\n", ppc_hash64_hpt_base(cpu),
 597                ppc_hash64_hpt_mask(cpu), vsid, ptem, ~hash);
 598
 599        ptex = ppc_hash64_pteg_search(cpu, ~hash, sps, ptem, pte, pshift);
 600    }
 601
 602    return ptex;
 603}
 604
 605unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
 606                                          uint64_t pte0, uint64_t pte1)
 607{
 608    CPUPPCState *env = &cpu->env;
 609    int i;
 610
 611    if (!(pte0 & HPTE64_V_LARGE)) {
 612        return 12;
 613    }
 614
 615    /*
 616     * The encodings in env->sps need to be carefully chosen so that
 617     * this gives an unambiguous result.
 618     */
 619    for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
 620        const struct ppc_one_seg_page_size *sps = &env->sps.sps[i];
 621        unsigned shift;
 622
 623        if (!sps->page_shift) {
 624            break;
 625        }
 626
 627        shift = hpte_page_shift(sps, pte0, pte1);
 628        if (shift) {
 629            return shift;
 630        }
 631    }
 632
 633    return 0;
 634}
 635
 636static void ppc_hash64_set_isi(CPUState *cs, CPUPPCState *env,
 637                               uint64_t error_code)
 638{
 639    bool vpm;
 640
 641    if (msr_ir) {
 642        vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1);
 643    } else {
 644        switch (env->mmu_model) {
 645        case POWERPC_MMU_3_00:
 646            /* Field deprecated in ISAv3.00 - interrupts always go to hyperv */
 647            vpm = true;
 648            break;
 649        default:
 650            vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM0);
 651            break;
 652        }
 653    }
 654    if (vpm && !msr_hv) {
 655        cs->exception_index = POWERPC_EXCP_HISI;
 656    } else {
 657        cs->exception_index = POWERPC_EXCP_ISI;
 658    }
 659    env->error_code = error_code;
 660}
 661
 662static void ppc_hash64_set_dsi(CPUState *cs, CPUPPCState *env, uint64_t dar,
 663                               uint64_t dsisr)
 664{
 665    bool vpm;
 666
 667    if (msr_dr) {
 668        vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1);
 669    } else {
 670        switch (env->mmu_model) {
 671        case POWERPC_MMU_3_00:
 672            /* Field deprecated in ISAv3.00 - interrupts always go to hyperv */
 673            vpm = true;
 674            break;
 675        default:
 676            vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM0);
 677            break;
 678        }
 679    }
 680    if (vpm && !msr_hv) {
 681        cs->exception_index = POWERPC_EXCP_HDSI;
 682        env->spr[SPR_HDAR] = dar;
 683        env->spr[SPR_HDSISR] = dsisr;
 684    } else {
 685        cs->exception_index = POWERPC_EXCP_DSI;
 686        env->spr[SPR_DAR] = dar;
 687        env->spr[SPR_DSISR] = dsisr;
 688   }
 689    env->error_code = 0;
 690}
 691
 692
 693int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
 694                                int rwx, int mmu_idx)
 695{
 696    CPUState *cs = CPU(cpu);
 697    CPUPPCState *env = &cpu->env;
 698    ppc_slb_t *slb;
 699    unsigned apshift;
 700    hwaddr ptex;
 701    ppc_hash_pte64_t pte;
 702    int exec_prot, pp_prot, amr_prot, prot;
 703    uint64_t new_pte1;
 704    const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC};
 705    hwaddr raddr;
 706
 707    assert((rwx == 0) || (rwx == 1) || (rwx == 2));
 708
 709    /* Note on LPCR usage: 970 uses HID4, but our special variant
 710     * of store_spr copies relevant fields into env->spr[SPR_LPCR].
 711     * Similarily we filter unimplemented bits when storing into
 712     * LPCR depending on the MMU version. This code can thus just
 713     * use the LPCR "as-is".
 714     */
 715
 716    /* 1. Handle real mode accesses */
 717    if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) {
 718        /* Translation is supposedly "off"  */
 719        /* In real mode the top 4 effective address bits are (mostly) ignored */
 720        raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
 721
 722        /* In HV mode, add HRMOR if top EA bit is clear */
 723        if (msr_hv || !env->has_hv_mode) {
 724            if (!(eaddr >> 63)) {
 725                raddr |= env->spr[SPR_HRMOR];
 726            }
 727        } else {
 728            /* Otherwise, check VPM for RMA vs VRMA */
 729            if (env->spr[SPR_LPCR] & LPCR_VPM0) {
 730                slb = &env->vrma_slb;
 731                if (slb->sps) {
 732                    goto skip_slb_search;
 733                }
 734                /* Not much else to do here */
 735                cs->exception_index = POWERPC_EXCP_MCHECK;
 736                env->error_code = 0;
 737                return 1;
 738            } else if (raddr < env->rmls) {
 739                /* RMA. Check bounds in RMLS */
 740                raddr |= env->spr[SPR_RMOR];
 741            } else {
 742                /* The access failed, generate the approriate interrupt */
 743                if (rwx == 2) {
 744                    ppc_hash64_set_isi(cs, env, SRR1_PROTFAULT);
 745                } else {
 746                    int dsisr = DSISR_PROTFAULT;
 747                    if (rwx == 1) {
 748                        dsisr |= DSISR_ISSTORE;
 749                    }
 750                    ppc_hash64_set_dsi(cs, env, eaddr, dsisr);
 751                }
 752                return 1;
 753            }
 754        }
 755        tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
 756                     PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx,
 757                     TARGET_PAGE_SIZE);
 758        return 0;
 759    }
 760
 761    /* 2. Translation is on, so look up the SLB */
 762    slb = slb_lookup(cpu, eaddr);
 763    if (!slb) {
 764        /* No entry found, check if in-memory segment tables are in use */
 765        if ((env->mmu_model & POWERPC_MMU_V3) && ppc64_use_proc_tbl(cpu)) {
 766            /* TODO - Unsupported */
 767            error_report("Segment Table Support Unimplemented");
 768            exit(1);
 769        }
 770        /* Segment still not found, generate the appropriate interrupt */
 771        if (rwx == 2) {
 772            cs->exception_index = POWERPC_EXCP_ISEG;
 773            env->error_code = 0;
 774        } else {
 775            cs->exception_index = POWERPC_EXCP_DSEG;
 776            env->error_code = 0;
 777            env->spr[SPR_DAR] = eaddr;
 778        }
 779        return 1;
 780    }
 781
 782skip_slb_search:
 783
 784    /* 3. Check for segment level no-execute violation */
 785    if ((rwx == 2) && (slb->vsid & SLB_VSID_N)) {
 786        ppc_hash64_set_isi(cs, env, SRR1_NOEXEC_GUARD);
 787        return 1;
 788    }
 789
 790    /* 4. Locate the PTE in the hash table */
 791    ptex = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte, &apshift);
 792    if (ptex == -1) {
 793        if (rwx == 2) {
 794            ppc_hash64_set_isi(cs, env, SRR1_NOPTE);
 795        } else {
 796            int dsisr = DSISR_NOPTE;
 797            if (rwx == 1) {
 798                dsisr |= DSISR_ISSTORE;
 799            }
 800            ppc_hash64_set_dsi(cs, env, eaddr, dsisr);
 801        }
 802        return 1;
 803    }
 804    qemu_log_mask(CPU_LOG_MMU,
 805                  "found PTE at index %08" HWADDR_PRIx "\n", ptex);
 806
 807    /* 5. Check access permissions */
 808
 809    exec_prot = ppc_hash64_pte_noexec_guard(cpu, pte);
 810    pp_prot = ppc_hash64_pte_prot(cpu, slb, pte);
 811    amr_prot = ppc_hash64_amr_prot(cpu, pte);
 812    prot = exec_prot & pp_prot & amr_prot;
 813
 814    if ((need_prot[rwx] & ~prot) != 0) {
 815        /* Access right violation */
 816        qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n");
 817        if (rwx == 2) {
 818            int srr1 = 0;
 819            if (PAGE_EXEC & ~exec_prot) {
 820                srr1 |= SRR1_NOEXEC_GUARD; /* Access violates noexec or guard */
 821            } else if (PAGE_EXEC & ~pp_prot) {
 822                srr1 |= SRR1_PROTFAULT; /* Access violates access authority */
 823            }
 824            if (PAGE_EXEC & ~amr_prot) {
 825                srr1 |= SRR1_IAMR; /* Access violates virt pg class key prot */
 826            }
 827            ppc_hash64_set_isi(cs, env, srr1);
 828        } else {
 829            int dsisr = 0;
 830            if (need_prot[rwx] & ~pp_prot) {
 831                dsisr |= DSISR_PROTFAULT;
 832            }
 833            if (rwx == 1) {
 834                dsisr |= DSISR_ISSTORE;
 835            }
 836            if (need_prot[rwx] & ~amr_prot) {
 837                dsisr |= DSISR_AMR;
 838            }
 839            ppc_hash64_set_dsi(cs, env, eaddr, dsisr);
 840        }
 841        return 1;
 842    }
 843
 844    qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n");
 845
 846    /* 6. Update PTE referenced and changed bits if necessary */
 847
 848    new_pte1 = pte.pte1 | HPTE64_R_R; /* set referenced bit */
 849    if (rwx == 1) {
 850        new_pte1 |= HPTE64_R_C; /* set changed (dirty) bit */
 851    } else {
 852        /* Treat the page as read-only for now, so that a later write
 853         * will pass through this function again to set the C bit */
 854        prot &= ~PAGE_WRITE;
 855    }
 856
 857    if (new_pte1 != pte.pte1) {
 858        ppc_hash64_store_hpte(cpu, ptex, pte.pte0, new_pte1);
 859    }
 860
 861    /* 7. Determine the real address from the PTE */
 862
 863    raddr = deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, eaddr);
 864
 865    tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
 866                 prot, mmu_idx, 1ULL << apshift);
 867
 868    return 0;
 869}
 870
 871hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr)
 872{
 873    CPUPPCState *env = &cpu->env;
 874    ppc_slb_t *slb;
 875    hwaddr ptex, raddr;
 876    ppc_hash_pte64_t pte;
 877    unsigned apshift;
 878
 879    /* Handle real mode */
 880    if (msr_dr == 0) {
 881        /* In real mode the top 4 effective address bits are ignored */
 882        raddr = addr & 0x0FFFFFFFFFFFFFFFULL;
 883
 884        /* In HV mode, add HRMOR if top EA bit is clear */
 885        if ((msr_hv || !env->has_hv_mode) && !(addr >> 63)) {
 886            return raddr | env->spr[SPR_HRMOR];
 887        }
 888
 889        /* Otherwise, check VPM for RMA vs VRMA */
 890        if (env->spr[SPR_LPCR] & LPCR_VPM0) {
 891            slb = &env->vrma_slb;
 892            if (!slb->sps) {
 893                return -1;
 894            }
 895        } else if (raddr < env->rmls) {
 896            /* RMA. Check bounds in RMLS */
 897            return raddr | env->spr[SPR_RMOR];
 898        } else {
 899            return -1;
 900        }
 901    } else {
 902        slb = slb_lookup(cpu, addr);
 903        if (!slb) {
 904            return -1;
 905        }
 906    }
 907
 908    ptex = ppc_hash64_htab_lookup(cpu, slb, addr, &pte, &apshift);
 909    if (ptex == -1) {
 910        return -1;
 911    }
 912
 913    return deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, addr)
 914        & TARGET_PAGE_MASK;
 915}
 916
 917void ppc_hash64_store_hpte(PowerPCCPU *cpu, hwaddr ptex,
 918                           uint64_t pte0, uint64_t pte1)
 919{
 920    hwaddr base = ppc_hash64_hpt_base(cpu);
 921    hwaddr offset = ptex * HASH_PTE_SIZE_64;
 922
 923    if (cpu->vhyp) {
 924        PPCVirtualHypervisorClass *vhc =
 925            PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
 926        vhc->store_hpte(cpu->vhyp, ptex, pte0, pte1);
 927        return;
 928    }
 929
 930    stq_phys(CPU(cpu)->as, base + offset, pte0);
 931    stq_phys(CPU(cpu)->as, base + offset + HASH_PTE_SIZE_64 / 2, pte1);
 932}
 933
 934void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, target_ulong ptex,
 935                               target_ulong pte0, target_ulong pte1)
 936{
 937    /*
 938     * XXX: given the fact that there are too many segments to
 939     * invalidate, and we still don't have a tlb_flush_mask(env, n,
 940     * mask) in QEMU, we just invalidate all TLBs
 941     */
 942    cpu->env.tlb_need_flush = TLB_NEED_GLOBAL_FLUSH | TLB_NEED_LOCAL_FLUSH;
 943}
 944
 945void ppc_hash64_update_rmls(CPUPPCState *env)
 946{
 947    uint64_t lpcr = env->spr[SPR_LPCR];
 948
 949    /*
 950     * This is the full 4 bits encoding of POWER8. Previous
 951     * CPUs only support a subset of these but the filtering
 952     * is done when writing LPCR
 953     */
 954    switch ((lpcr & LPCR_RMLS) >> LPCR_RMLS_SHIFT) {
 955    case 0x8: /* 32MB */
 956        env->rmls = 0x2000000ull;
 957        break;
 958    case 0x3: /* 64MB */
 959        env->rmls = 0x4000000ull;
 960        break;
 961    case 0x7: /* 128MB */
 962        env->rmls = 0x8000000ull;
 963        break;
 964    case 0x4: /* 256MB */
 965        env->rmls = 0x10000000ull;
 966        break;
 967    case 0x2: /* 1GB */
 968        env->rmls = 0x40000000ull;
 969        break;
 970    case 0x1: /* 16GB */
 971        env->rmls = 0x400000000ull;
 972        break;
 973    default:
 974        /* What to do here ??? */
 975        env->rmls = 0;
 976    }
 977}
 978
 979void ppc_hash64_update_vrma(CPUPPCState *env)
 980{
 981    const struct ppc_one_seg_page_size *sps = NULL;
 982    target_ulong esid, vsid, lpcr;
 983    ppc_slb_t *slb = &env->vrma_slb;
 984    uint32_t vrmasd;
 985    int i;
 986
 987    /* First clear it */
 988    slb->esid = slb->vsid = 0;
 989    slb->sps = NULL;
 990
 991    /* Is VRMA enabled ? */
 992    lpcr = env->spr[SPR_LPCR];
 993    if (!(lpcr & LPCR_VPM0)) {
 994        return;
 995    }
 996
 997    /* Make one up. Mostly ignore the ESID which will not be
 998     * needed for translation
 999     */
1000    vsid = SLB_VSID_VRMA;
1001    vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
1002    vsid |= (vrmasd << 4) & (SLB_VSID_L | SLB_VSID_LP);
1003    esid = SLB_ESID_V;
1004
1005   for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
1006        const struct ppc_one_seg_page_size *sps1 = &env->sps.sps[i];
1007
1008        if (!sps1->page_shift) {
1009            break;
1010        }
1011
1012        if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) {
1013            sps = sps1;
1014            break;
1015        }
1016    }
1017
1018    if (!sps) {
1019        error_report("Bad page size encoding esid 0x"TARGET_FMT_lx
1020                     " vsid 0x"TARGET_FMT_lx, esid, vsid);
1021        return;
1022    }
1023
1024    slb->vsid = vsid;
1025    slb->esid = esid;
1026    slb->sps = sps;
1027}
1028
1029void helper_store_lpcr(CPUPPCState *env, target_ulong val)
1030{
1031    uint64_t lpcr = 0;
1032
1033    /* Filter out bits */
1034    switch (POWERPC_MMU_VER(env->mmu_model)) {
1035    case POWERPC_MMU_VER_64B: /* 970 */
1036        if (val & 0x40) {
1037            lpcr |= LPCR_LPES0;
1038        }
1039        if (val & 0x8000000000000000ull) {
1040            lpcr |= LPCR_LPES1;
1041        }
1042        if (val & 0x20) {
1043            lpcr |= (0x4ull << LPCR_RMLS_SHIFT);
1044        }
1045        if (val & 0x4000000000000000ull) {
1046            lpcr |= (0x2ull << LPCR_RMLS_SHIFT);
1047        }
1048        if (val & 0x2000000000000000ull) {
1049            lpcr |= (0x1ull << LPCR_RMLS_SHIFT);
1050        }
1051        env->spr[SPR_RMOR] = ((lpcr >> 41) & 0xffffull) << 26;
1052
1053        /* XXX We could also write LPID from HID4 here
1054         * but since we don't tag any translation on it
1055         * it doesn't actually matter
1056         */
1057        /* XXX For proper emulation of 970 we also need
1058         * to dig HRMOR out of HID5
1059         */
1060        break;
1061    case POWERPC_MMU_VER_2_03: /* P5p */
1062        lpcr = val & (LPCR_RMLS | LPCR_ILE |
1063                      LPCR_LPES0 | LPCR_LPES1 |
1064                      LPCR_RMI | LPCR_HDICE);
1065        break;
1066    case POWERPC_MMU_VER_2_06: /* P7 */
1067        lpcr = val & (LPCR_VPM0 | LPCR_VPM1 | LPCR_ISL | LPCR_DPFD |
1068                      LPCR_VRMASD | LPCR_RMLS | LPCR_ILE |
1069                      LPCR_P7_PECE0 | LPCR_P7_PECE1 | LPCR_P7_PECE2 |
1070                      LPCR_MER | LPCR_TC |
1071                      LPCR_LPES0 | LPCR_LPES1 | LPCR_HDICE);
1072        break;
1073    case POWERPC_MMU_VER_2_07: /* P8 */
1074        lpcr = val & (LPCR_VPM0 | LPCR_VPM1 | LPCR_ISL | LPCR_KBV |
1075                      LPCR_DPFD | LPCR_VRMASD | LPCR_RMLS | LPCR_ILE |
1076                      LPCR_AIL | LPCR_ONL | LPCR_P8_PECE0 | LPCR_P8_PECE1 |
1077                      LPCR_P8_PECE2 | LPCR_P8_PECE3 | LPCR_P8_PECE4 |
1078                      LPCR_MER | LPCR_TC | LPCR_LPES0 | LPCR_HDICE);
1079        break;
1080    case POWERPC_MMU_VER_3_00: /* P9 */
1081        lpcr = val & (LPCR_VPM1 | LPCR_ISL | LPCR_KBV | LPCR_DPFD |
1082                      (LPCR_PECE_U_MASK & LPCR_HVEE) | LPCR_ILE | LPCR_AIL |
1083                      LPCR_UPRT | LPCR_EVIRT | LPCR_ONL |
1084                      (LPCR_PECE_L_MASK & (LPCR_PDEE | LPCR_HDEE | LPCR_EEE |
1085                      LPCR_DEE | LPCR_OEE)) | LPCR_MER | LPCR_GTSE | LPCR_TC |
1086                      LPCR_HEIC | LPCR_LPES0 | LPCR_HVICE | LPCR_HDICE);
1087        break;
1088    default:
1089        ;
1090    }
1091    env->spr[SPR_LPCR] = lpcr;
1092    ppc_hash64_update_rmls(env);
1093    ppc_hash64_update_vrma(env);
1094}
1095