qemu/target-ppc/mmu-hash64.c
<<
>>
Prefs
   1/*
   2 *  PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
   3 *
   4 *  Copyright (c) 2003-2007 Jocelyn Mayer
   5 *  Copyright (c) 2013 David Gibson, IBM Corporation
   6 *
   7 * This library is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU Lesser General Public
   9 * License as published by the Free Software Foundation; either
  10 * version 2 of the License, or (at your option) any later version.
  11 *
  12 * This library is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * Lesser General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU Lesser General Public
  18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20#include "qemu/osdep.h"
  21#include "qapi/error.h"
  22#include "cpu.h"
  23#include "exec/helper-proto.h"
  24#include "qemu/error-report.h"
  25#include "sysemu/kvm.h"
  26#include "qemu/error-report.h"
  27#include "kvm_ppc.h"
  28#include "mmu-hash64.h"
  29#include "exec/log.h"
  30
  31//#define DEBUG_SLB
  32
  33#ifdef DEBUG_SLB
  34#  define LOG_SLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
  35#else
  36#  define LOG_SLB(...) do { } while (0)
  37#endif
  38
  39/*
  40 * Used to indicate that a CPU has its hash page table (HPT) managed
  41 * within the host kernel
  42 */
  43#define MMU_HASH64_KVM_MANAGED_HPT      ((void *)-1)
  44
  45/*
  46 * SLB handling
  47 */
  48
  49static ppc_slb_t *slb_lookup(PowerPCCPU *cpu, target_ulong eaddr)
  50{
  51    CPUPPCState *env = &cpu->env;
  52    uint64_t esid_256M, esid_1T;
  53    int n;
  54
  55    LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr);
  56
  57    esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V;
  58    esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V;
  59
  60    for (n = 0; n < env->slb_nr; n++) {
  61        ppc_slb_t *slb = &env->slb[n];
  62
  63        LOG_SLB("%s: slot %d %016" PRIx64 " %016"
  64                    PRIx64 "\n", __func__, n, slb->esid, slb->vsid);
  65        /* We check for 1T matches on all MMUs here - if the MMU
  66         * doesn't have 1T segment support, we will have prevented 1T
  67         * entries from being inserted in the slbmte code. */
  68        if (((slb->esid == esid_256M) &&
  69             ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M))
  70            || ((slb->esid == esid_1T) &&
  71                ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) {
  72            return slb;
  73        }
  74    }
  75
  76    return NULL;
  77}
  78
  79void dump_slb(FILE *f, fprintf_function cpu_fprintf, PowerPCCPU *cpu)
  80{
  81    CPUPPCState *env = &cpu->env;
  82    int i;
  83    uint64_t slbe, slbv;
  84
  85    cpu_synchronize_state(CPU(cpu));
  86
  87    cpu_fprintf(f, "SLB\tESID\t\t\tVSID\n");
  88    for (i = 0; i < env->slb_nr; i++) {
  89        slbe = env->slb[i].esid;
  90        slbv = env->slb[i].vsid;
  91        if (slbe == 0 && slbv == 0) {
  92            continue;
  93        }
  94        cpu_fprintf(f, "%d\t0x%016" PRIx64 "\t0x%016" PRIx64 "\n",
  95                    i, slbe, slbv);
  96    }
  97}
  98
  99void helper_slbia(CPUPPCState *env)
 100{
 101    PowerPCCPU *cpu = ppc_env_get_cpu(env);
 102    int n, do_invalidate;
 103
 104    do_invalidate = 0;
 105    /* XXX: Warning: slbia never invalidates the first segment */
 106    for (n = 1; n < env->slb_nr; n++) {
 107        ppc_slb_t *slb = &env->slb[n];
 108
 109        if (slb->esid & SLB_ESID_V) {
 110            slb->esid &= ~SLB_ESID_V;
 111            /* XXX: given the fact that segment size is 256 MB or 1TB,
 112             *      and we still don't have a tlb_flush_mask(env, n, mask)
 113             *      in QEMU, we just invalidate all TLBs
 114             */
 115            do_invalidate = 1;
 116        }
 117    }
 118    if (do_invalidate) {
 119        tlb_flush(CPU(cpu), 1);
 120    }
 121}
 122
 123void helper_slbie(CPUPPCState *env, target_ulong addr)
 124{
 125    PowerPCCPU *cpu = ppc_env_get_cpu(env);
 126    ppc_slb_t *slb;
 127
 128    slb = slb_lookup(cpu, addr);
 129    if (!slb) {
 130        return;
 131    }
 132
 133    if (slb->esid & SLB_ESID_V) {
 134        slb->esid &= ~SLB_ESID_V;
 135
 136        /* XXX: given the fact that segment size is 256 MB or 1TB,
 137         *      and we still don't have a tlb_flush_mask(env, n, mask)
 138         *      in QEMU, we just invalidate all TLBs
 139         */
 140        tlb_flush(CPU(cpu), 1);
 141    }
 142}
 143
 144int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
 145                  target_ulong esid, target_ulong vsid)
 146{
 147    CPUPPCState *env = &cpu->env;
 148    ppc_slb_t *slb = &env->slb[slot];
 149    const struct ppc_one_seg_page_size *sps = NULL;
 150    int i;
 151
 152    if (slot >= env->slb_nr) {
 153        return -1; /* Bad slot number */
 154    }
 155    if (esid & ~(SLB_ESID_ESID | SLB_ESID_V)) {
 156        return -1; /* Reserved bits set */
 157    }
 158    if (vsid & (SLB_VSID_B & ~SLB_VSID_B_1T)) {
 159        return -1; /* Bad segment size */
 160    }
 161    if ((vsid & SLB_VSID_B) && !(env->mmu_model & POWERPC_MMU_1TSEG)) {
 162        return -1; /* 1T segment on MMU that doesn't support it */
 163    }
 164
 165    for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
 166        const struct ppc_one_seg_page_size *sps1 = &env->sps.sps[i];
 167
 168        if (!sps1->page_shift) {
 169            break;
 170        }
 171
 172        if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) {
 173            sps = sps1;
 174            break;
 175        }
 176    }
 177
 178    if (!sps) {
 179        error_report("Bad page size encoding in SLB store: slot "TARGET_FMT_lu
 180                     " esid 0x"TARGET_FMT_lx" vsid 0x"TARGET_FMT_lx,
 181                     slot, esid, vsid);
 182        return -1;
 183    }
 184
 185    slb->esid = esid;
 186    slb->vsid = vsid;
 187    slb->sps = sps;
 188
 189    LOG_SLB("%s: %d " TARGET_FMT_lx " - " TARGET_FMT_lx " => %016" PRIx64
 190            " %016" PRIx64 "\n", __func__, slot, esid, vsid,
 191            slb->esid, slb->vsid);
 192
 193    return 0;
 194}
 195
 196static int ppc_load_slb_esid(PowerPCCPU *cpu, target_ulong rb,
 197                             target_ulong *rt)
 198{
 199    CPUPPCState *env = &cpu->env;
 200    int slot = rb & 0xfff;
 201    ppc_slb_t *slb = &env->slb[slot];
 202
 203    if (slot >= env->slb_nr) {
 204        return -1;
 205    }
 206
 207    *rt = slb->esid;
 208    return 0;
 209}
 210
 211static int ppc_load_slb_vsid(PowerPCCPU *cpu, target_ulong rb,
 212                             target_ulong *rt)
 213{
 214    CPUPPCState *env = &cpu->env;
 215    int slot = rb & 0xfff;
 216    ppc_slb_t *slb = &env->slb[slot];
 217
 218    if (slot >= env->slb_nr) {
 219        return -1;
 220    }
 221
 222    *rt = slb->vsid;
 223    return 0;
 224}
 225
 226void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
 227{
 228    PowerPCCPU *cpu = ppc_env_get_cpu(env);
 229
 230    if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs) < 0) {
 231        helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
 232                                   POWERPC_EXCP_INVAL);
 233    }
 234}
 235
 236target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb)
 237{
 238    PowerPCCPU *cpu = ppc_env_get_cpu(env);
 239    target_ulong rt = 0;
 240
 241    if (ppc_load_slb_esid(cpu, rb, &rt) < 0) {
 242        helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
 243                                   POWERPC_EXCP_INVAL);
 244    }
 245    return rt;
 246}
 247
 248target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb)
 249{
 250    PowerPCCPU *cpu = ppc_env_get_cpu(env);
 251    target_ulong rt = 0;
 252
 253    if (ppc_load_slb_vsid(cpu, rb, &rt) < 0) {
 254        helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
 255                                   POWERPC_EXCP_INVAL);
 256    }
 257    return rt;
 258}
 259
 260/*
 261 * 64-bit hash table MMU handling
 262 */
 263void ppc_hash64_set_sdr1(PowerPCCPU *cpu, target_ulong value,
 264                         Error **errp)
 265{
 266    CPUPPCState *env = &cpu->env;
 267    target_ulong htabsize = value & SDR_64_HTABSIZE;
 268
 269    env->spr[SPR_SDR1] = value;
 270    if (htabsize > 28) {
 271        error_setg(errp,
 272                   "Invalid HTABSIZE 0x" TARGET_FMT_lx" stored in SDR1",
 273                   htabsize);
 274        htabsize = 28;
 275    }
 276    env->htab_mask = (1ULL << (htabsize + 18 - 7)) - 1;
 277    env->htab_base = value & SDR_64_HTABORG;
 278}
 279
 280void ppc_hash64_set_external_hpt(PowerPCCPU *cpu, void *hpt, int shift,
 281                                 Error **errp)
 282{
 283    CPUPPCState *env = &cpu->env;
 284    Error *local_err = NULL;
 285
 286    cpu_synchronize_state(CPU(cpu));
 287
 288    if (hpt) {
 289        env->external_htab = hpt;
 290    } else {
 291        env->external_htab = MMU_HASH64_KVM_MANAGED_HPT;
 292    }
 293    ppc_hash64_set_sdr1(cpu, (target_ulong)(uintptr_t)hpt | (shift - 18),
 294                        &local_err);
 295    if (local_err) {
 296        error_propagate(errp, local_err);
 297        return;
 298    }
 299
 300    /* Not strictly necessary, but makes it clearer that an external
 301     * htab is in use when debugging */
 302    env->htab_base = -1;
 303
 304    if (kvm_enabled()) {
 305        if (kvmppc_put_books_sregs(cpu) < 0) {
 306            error_setg(errp, "Unable to update SDR1 in KVM");
 307        }
 308    }
 309}
 310
 311static int ppc_hash64_pte_prot(PowerPCCPU *cpu,
 312                               ppc_slb_t *slb, ppc_hash_pte64_t pte)
 313{
 314    CPUPPCState *env = &cpu->env;
 315    unsigned pp, key;
 316    /* Some pp bit combinations have undefined behaviour, so default
 317     * to no access in those cases */
 318    int prot = 0;
 319
 320    key = !!(msr_pr ? (slb->vsid & SLB_VSID_KP)
 321             : (slb->vsid & SLB_VSID_KS));
 322    pp = (pte.pte1 & HPTE64_R_PP) | ((pte.pte1 & HPTE64_R_PP0) >> 61);
 323
 324    if (key == 0) {
 325        switch (pp) {
 326        case 0x0:
 327        case 0x1:
 328        case 0x2:
 329            prot = PAGE_READ | PAGE_WRITE;
 330            break;
 331
 332        case 0x3:
 333        case 0x6:
 334            prot = PAGE_READ;
 335            break;
 336        }
 337    } else {
 338        switch (pp) {
 339        case 0x0:
 340        case 0x6:
 341            prot = 0;
 342            break;
 343
 344        case 0x1:
 345        case 0x3:
 346            prot = PAGE_READ;
 347            break;
 348
 349        case 0x2:
 350            prot = PAGE_READ | PAGE_WRITE;
 351            break;
 352        }
 353    }
 354
 355    /* No execute if either noexec or guarded bits set */
 356    if (!(pte.pte1 & HPTE64_R_N) || (pte.pte1 & HPTE64_R_G)
 357        || (slb->vsid & SLB_VSID_N)) {
 358        prot |= PAGE_EXEC;
 359    }
 360
 361    return prot;
 362}
 363
 364static int ppc_hash64_amr_prot(PowerPCCPU *cpu, ppc_hash_pte64_t pte)
 365{
 366    CPUPPCState *env = &cpu->env;
 367    int key, amrbits;
 368    int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
 369
 370    /* Only recent MMUs implement Virtual Page Class Key Protection */
 371    if (!(env->mmu_model & POWERPC_MMU_AMR)) {
 372        return prot;
 373    }
 374
 375    key = HPTE64_R_KEY(pte.pte1);
 376    amrbits = (env->spr[SPR_AMR] >> 2*(31 - key)) & 0x3;
 377
 378    /* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */
 379    /*         env->spr[SPR_AMR]); */
 380
 381    /*
 382     * A store is permitted if the AMR bit is 0. Remove write
 383     * protection if it is set.
 384     */
 385    if (amrbits & 0x2) {
 386        prot &= ~PAGE_WRITE;
 387    }
 388    /*
 389     * A load is permitted if the AMR bit is 0. Remove read
 390     * protection if it is set.
 391     */
 392    if (amrbits & 0x1) {
 393        prot &= ~PAGE_READ;
 394    }
 395
 396    return prot;
 397}
 398
 399uint64_t ppc_hash64_start_access(PowerPCCPU *cpu, target_ulong pte_index)
 400{
 401    uint64_t token = 0;
 402    hwaddr pte_offset;
 403
 404    pte_offset = pte_index * HASH_PTE_SIZE_64;
 405    if (cpu->env.external_htab == MMU_HASH64_KVM_MANAGED_HPT) {
 406        /*
 407         * HTAB is controlled by KVM. Fetch the PTEG into a new buffer.
 408         */
 409        token = kvmppc_hash64_read_pteg(cpu, pte_index);
 410    } else if (cpu->env.external_htab) {
 411        /*
 412         * HTAB is controlled by QEMU. Just point to the internally
 413         * accessible PTEG.
 414         */
 415        token = (uint64_t)(uintptr_t) cpu->env.external_htab + pte_offset;
 416    } else if (cpu->env.htab_base) {
 417        token = cpu->env.htab_base + pte_offset;
 418    }
 419    return token;
 420}
 421
 422void ppc_hash64_stop_access(PowerPCCPU *cpu, uint64_t token)
 423{
 424    if (cpu->env.external_htab == MMU_HASH64_KVM_MANAGED_HPT) {
 425        kvmppc_hash64_free_pteg(token);
 426    }
 427}
 428
 429static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash,
 430                                     bool secondary, target_ulong ptem,
 431                                     ppc_hash_pte64_t *pte)
 432{
 433    CPUPPCState *env = &cpu->env;
 434    int i;
 435    uint64_t token;
 436    target_ulong pte0, pte1;
 437    target_ulong pte_index;
 438
 439    pte_index = (hash & env->htab_mask) * HPTES_PER_GROUP;
 440    token = ppc_hash64_start_access(cpu, pte_index);
 441    if (!token) {
 442        return -1;
 443    }
 444    for (i = 0; i < HPTES_PER_GROUP; i++) {
 445        pte0 = ppc_hash64_load_hpte0(cpu, token, i);
 446        pte1 = ppc_hash64_load_hpte1(cpu, token, i);
 447
 448        if ((pte0 & HPTE64_V_VALID)
 449            && (secondary == !!(pte0 & HPTE64_V_SECONDARY))
 450            && HPTE64_V_COMPARE(pte0, ptem)) {
 451            pte->pte0 = pte0;
 452            pte->pte1 = pte1;
 453            ppc_hash64_stop_access(cpu, token);
 454            return (pte_index + i) * HASH_PTE_SIZE_64;
 455        }
 456    }
 457    ppc_hash64_stop_access(cpu, token);
 458    /*
 459     * We didn't find a valid entry.
 460     */
 461    return -1;
 462}
 463
 464static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
 465                                     ppc_slb_t *slb, target_ulong eaddr,
 466                                     ppc_hash_pte64_t *pte)
 467{
 468    CPUPPCState *env = &cpu->env;
 469    hwaddr pte_offset;
 470    hwaddr hash;
 471    uint64_t vsid, epnmask, epn, ptem;
 472
 473    /* The SLB store path should prevent any bad page size encodings
 474     * getting in there, so: */
 475    assert(slb->sps);
 476
 477    epnmask = ~((1ULL << slb->sps->page_shift) - 1);
 478
 479    if (slb->vsid & SLB_VSID_B) {
 480        /* 1TB segment */
 481        vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T;
 482        epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask;
 483        hash = vsid ^ (vsid << 25) ^ (epn >> slb->sps->page_shift);
 484    } else {
 485        /* 256M segment */
 486        vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT;
 487        epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask;
 488        hash = vsid ^ (epn >> slb->sps->page_shift);
 489    }
 490    ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN);
 491
 492    /* Page address translation */
 493    qemu_log_mask(CPU_LOG_MMU,
 494            "htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx
 495            " hash " TARGET_FMT_plx "\n",
 496            env->htab_base, env->htab_mask, hash);
 497
 498    /* Primary PTEG lookup */
 499    qemu_log_mask(CPU_LOG_MMU,
 500            "0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
 501            " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx
 502            " hash=" TARGET_FMT_plx "\n",
 503            env->htab_base, env->htab_mask, vsid, ptem,  hash);
 504    pte_offset = ppc_hash64_pteg_search(cpu, hash, 0, ptem, pte);
 505
 506    if (pte_offset == -1) {
 507        /* Secondary PTEG lookup */
 508        qemu_log_mask(CPU_LOG_MMU,
 509                "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
 510                " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx
 511                " hash=" TARGET_FMT_plx "\n", env->htab_base,
 512                env->htab_mask, vsid, ptem, ~hash);
 513
 514        pte_offset = ppc_hash64_pteg_search(cpu, ~hash, 1, ptem, pte);
 515    }
 516
 517    return pte_offset;
 518}
 519
 520static unsigned hpte_page_shift(const struct ppc_one_seg_page_size *sps,
 521    uint64_t pte0, uint64_t pte1)
 522{
 523    int i;
 524
 525    if (!(pte0 & HPTE64_V_LARGE)) {
 526        if (sps->page_shift != 12) {
 527            /* 4kiB page in a non 4kiB segment */
 528            return 0;
 529        }
 530        /* Normal 4kiB page */
 531        return 12;
 532    }
 533
 534    for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
 535        const struct ppc_one_page_size *ps = &sps->enc[i];
 536        uint64_t mask;
 537
 538        if (!ps->page_shift) {
 539            break;
 540        }
 541
 542        if (ps->page_shift == 12) {
 543            /* L bit is set so this can't be a 4kiB page */
 544            continue;
 545        }
 546
 547        mask = ((1ULL << ps->page_shift) - 1) & HPTE64_R_RPN;
 548
 549        if ((pte1 & mask) == (ps->pte_enc << HPTE64_R_RPN_SHIFT)) {
 550            return ps->page_shift;
 551        }
 552    }
 553
 554    return 0; /* Bad page size encoding */
 555}
 556
 557unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
 558                                          uint64_t pte0, uint64_t pte1,
 559                                          unsigned *seg_page_shift)
 560{
 561    CPUPPCState *env = &cpu->env;
 562    int i;
 563
 564    if (!(pte0 & HPTE64_V_LARGE)) {
 565        *seg_page_shift = 12;
 566        return 12;
 567    }
 568
 569    /*
 570     * The encodings in env->sps need to be carefully chosen so that
 571     * this gives an unambiguous result.
 572     */
 573    for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
 574        const struct ppc_one_seg_page_size *sps = &env->sps.sps[i];
 575        unsigned shift;
 576
 577        if (!sps->page_shift) {
 578            break;
 579        }
 580
 581        shift = hpte_page_shift(sps, pte0, pte1);
 582        if (shift) {
 583            *seg_page_shift = sps->page_shift;
 584            return shift;
 585        }
 586    }
 587
 588    *seg_page_shift = 0;
 589    return 0;
 590}
 591
 592int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr,
 593                                int rwx, int mmu_idx)
 594{
 595    CPUState *cs = CPU(cpu);
 596    CPUPPCState *env = &cpu->env;
 597    ppc_slb_t *slb;
 598    unsigned apshift;
 599    hwaddr pte_offset;
 600    ppc_hash_pte64_t pte;
 601    int pp_prot, amr_prot, prot;
 602    uint64_t new_pte1;
 603    const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC};
 604    hwaddr raddr;
 605
 606    assert((rwx == 0) || (rwx == 1) || (rwx == 2));
 607
 608    /* 1. Handle real mode accesses */
 609    if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) {
 610        /* Translation is off */
 611        /* In real mode the top 4 effective address bits are ignored */
 612        raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
 613        tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
 614                     PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx,
 615                     TARGET_PAGE_SIZE);
 616        return 0;
 617    }
 618
 619    /* 2. Translation is on, so look up the SLB */
 620    slb = slb_lookup(cpu, eaddr);
 621
 622    if (!slb) {
 623        if (rwx == 2) {
 624            cs->exception_index = POWERPC_EXCP_ISEG;
 625            env->error_code = 0;
 626        } else {
 627            cs->exception_index = POWERPC_EXCP_DSEG;
 628            env->error_code = 0;
 629            env->spr[SPR_DAR] = eaddr;
 630        }
 631        return 1;
 632    }
 633
 634    /* 3. Check for segment level no-execute violation */
 635    if ((rwx == 2) && (slb->vsid & SLB_VSID_N)) {
 636        cs->exception_index = POWERPC_EXCP_ISI;
 637        env->error_code = 0x10000000;
 638        return 1;
 639    }
 640
 641    /* 4. Locate the PTE in the hash table */
 642    pte_offset = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte);
 643    if (pte_offset == -1) {
 644        if (rwx == 2) {
 645            cs->exception_index = POWERPC_EXCP_ISI;
 646            env->error_code = 0x40000000;
 647        } else {
 648            cs->exception_index = POWERPC_EXCP_DSI;
 649            env->error_code = 0;
 650            env->spr[SPR_DAR] = eaddr;
 651            if (rwx == 1) {
 652                env->spr[SPR_DSISR] = 0x42000000;
 653            } else {
 654                env->spr[SPR_DSISR] = 0x40000000;
 655            }
 656        }
 657        return 1;
 658    }
 659    qemu_log_mask(CPU_LOG_MMU,
 660                "found PTE at offset %08" HWADDR_PRIx "\n", pte_offset);
 661
 662    /* Validate page size encoding */
 663    apshift = hpte_page_shift(slb->sps, pte.pte0, pte.pte1);
 664    if (!apshift) {
 665        error_report("Bad page size encoding in HPTE 0x%"PRIx64" - 0x%"PRIx64
 666                     " @ 0x%"HWADDR_PRIx, pte.pte0, pte.pte1, pte_offset);
 667        /* Not entirely sure what the right action here, but machine
 668         * check seems reasonable */
 669        cs->exception_index = POWERPC_EXCP_MCHECK;
 670        env->error_code = 0;
 671        return 1;
 672    }
 673
 674    /* 5. Check access permissions */
 675
 676    pp_prot = ppc_hash64_pte_prot(cpu, slb, pte);
 677    amr_prot = ppc_hash64_amr_prot(cpu, pte);
 678    prot = pp_prot & amr_prot;
 679
 680    if ((need_prot[rwx] & ~prot) != 0) {
 681        /* Access right violation */
 682        qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n");
 683        if (rwx == 2) {
 684            cs->exception_index = POWERPC_EXCP_ISI;
 685            env->error_code = 0x08000000;
 686        } else {
 687            target_ulong dsisr = 0;
 688
 689            cs->exception_index = POWERPC_EXCP_DSI;
 690            env->error_code = 0;
 691            env->spr[SPR_DAR] = eaddr;
 692            if (need_prot[rwx] & ~pp_prot) {
 693                dsisr |= 0x08000000;
 694            }
 695            if (rwx == 1) {
 696                dsisr |= 0x02000000;
 697            }
 698            if (need_prot[rwx] & ~amr_prot) {
 699                dsisr |= 0x00200000;
 700            }
 701            env->spr[SPR_DSISR] = dsisr;
 702        }
 703        return 1;
 704    }
 705
 706    qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n");
 707
 708    /* 6. Update PTE referenced and changed bits if necessary */
 709
 710    new_pte1 = pte.pte1 | HPTE64_R_R; /* set referenced bit */
 711    if (rwx == 1) {
 712        new_pte1 |= HPTE64_R_C; /* set changed (dirty) bit */
 713    } else {
 714        /* Treat the page as read-only for now, so that a later write
 715         * will pass through this function again to set the C bit */
 716        prot &= ~PAGE_WRITE;
 717    }
 718
 719    if (new_pte1 != pte.pte1) {
 720        ppc_hash64_store_hpte(cpu, pte_offset / HASH_PTE_SIZE_64,
 721                              pte.pte0, new_pte1);
 722    }
 723
 724    /* 7. Determine the real address from the PTE */
 725
 726    raddr = deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, eaddr);
 727
 728    tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
 729                 prot, mmu_idx, 1ULL << apshift);
 730
 731    return 0;
 732}
 733
 734hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr)
 735{
 736    CPUPPCState *env = &cpu->env;
 737    ppc_slb_t *slb;
 738    hwaddr pte_offset;
 739    ppc_hash_pte64_t pte;
 740    unsigned apshift;
 741
 742    if (msr_dr == 0) {
 743        /* In real mode the top 4 effective address bits are ignored */
 744        return addr & 0x0FFFFFFFFFFFFFFFULL;
 745    }
 746
 747    slb = slb_lookup(cpu, addr);
 748    if (!slb) {
 749        return -1;
 750    }
 751
 752    pte_offset = ppc_hash64_htab_lookup(cpu, slb, addr, &pte);
 753    if (pte_offset == -1) {
 754        return -1;
 755    }
 756
 757    apshift = hpte_page_shift(slb->sps, pte.pte0, pte.pte1);
 758    if (!apshift) {
 759        return -1;
 760    }
 761
 762    return deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, addr)
 763        & TARGET_PAGE_MASK;
 764}
 765
 766void ppc_hash64_store_hpte(PowerPCCPU *cpu,
 767                           target_ulong pte_index,
 768                           target_ulong pte0, target_ulong pte1)
 769{
 770    CPUPPCState *env = &cpu->env;
 771
 772    if (env->external_htab == MMU_HASH64_KVM_MANAGED_HPT) {
 773        kvmppc_hash64_write_pte(env, pte_index, pte0, pte1);
 774        return;
 775    }
 776
 777    pte_index *= HASH_PTE_SIZE_64;
 778    if (env->external_htab) {
 779        stq_p(env->external_htab + pte_index, pte0);
 780        stq_p(env->external_htab + pte_index + HASH_PTE_SIZE_64 / 2, pte1);
 781    } else {
 782        stq_phys(CPU(cpu)->as, env->htab_base + pte_index, pte0);
 783        stq_phys(CPU(cpu)->as,
 784                 env->htab_base + pte_index + HASH_PTE_SIZE_64 / 2, pte1);
 785    }
 786}
 787
 788void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu,
 789                               target_ulong pte_index,
 790                               target_ulong pte0, target_ulong pte1)
 791{
 792    /*
 793     * XXX: given the fact that there are too many segments to
 794     * invalidate, and we still don't have a tlb_flush_mask(env, n,
 795     * mask) in QEMU, we just invalidate all TLBs
 796     */
 797    tlb_flush(CPU(cpu), 1);
 798}
 799