qemu/target/ppc/mmu-radix64.c
<<
>>
Prefs
   1/*
   2 *  PowerPC Radix MMU mulation helpers for QEMU.
   3 *
   4 *  Copyright (c) 2016 Suraj Jitindar Singh, IBM Corporation
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2.1 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#include "qemu/osdep.h"
  21#include "cpu.h"
  22#include "exec/exec-all.h"
  23#include "qemu/error-report.h"
  24#include "sysemu/kvm.h"
  25#include "kvm_ppc.h"
  26#include "exec/log.h"
  27#include "internal.h"
  28#include "mmu-radix64.h"
  29#include "mmu-book3s-v3.h"
  30
  31static bool ppc_radix64_get_fully_qualified_addr(const CPUPPCState *env,
  32                                                 vaddr eaddr,
  33                                                 uint64_t *lpid, uint64_t *pid)
  34{
  35    /* When EA(2:11) are nonzero, raise a segment interrupt */
  36    if (eaddr & ~R_EADDR_VALID_MASK) {
  37        return false;
  38    }
  39
  40    if (FIELD_EX64(env->msr, MSR, HV)) { /* MSR[HV] -> Hypervisor/bare metal */
  41        switch (eaddr & R_EADDR_QUADRANT) {
  42        case R_EADDR_QUADRANT0:
  43            *lpid = 0;
  44            *pid = env->spr[SPR_BOOKS_PID];
  45            break;
  46        case R_EADDR_QUADRANT1:
  47            *lpid = env->spr[SPR_LPIDR];
  48            *pid = env->spr[SPR_BOOKS_PID];
  49            break;
  50        case R_EADDR_QUADRANT2:
  51            *lpid = env->spr[SPR_LPIDR];
  52            *pid = 0;
  53            break;
  54        case R_EADDR_QUADRANT3:
  55            *lpid = 0;
  56            *pid = 0;
  57            break;
  58        default:
  59            g_assert_not_reached();
  60        }
  61    } else {  /* !MSR[HV] -> Guest */
  62        switch (eaddr & R_EADDR_QUADRANT) {
  63        case R_EADDR_QUADRANT0: /* Guest application */
  64            *lpid = env->spr[SPR_LPIDR];
  65            *pid = env->spr[SPR_BOOKS_PID];
  66            break;
  67        case R_EADDR_QUADRANT1: /* Illegal */
  68        case R_EADDR_QUADRANT2:
  69            return false;
  70        case R_EADDR_QUADRANT3: /* Guest OS */
  71            *lpid = env->spr[SPR_LPIDR];
  72            *pid = 0; /* pid set to 0 -> addresses guest operating system */
  73            break;
  74        default:
  75            g_assert_not_reached();
  76        }
  77    }
  78
  79    return true;
  80}
  81
  82static void ppc_radix64_raise_segi(PowerPCCPU *cpu, MMUAccessType access_type,
  83                                   vaddr eaddr)
  84{
  85    CPUState *cs = CPU(cpu);
  86    CPUPPCState *env = &cpu->env;
  87
  88    switch (access_type) {
  89    case MMU_INST_FETCH:
  90        /* Instruction Segment Interrupt */
  91        cs->exception_index = POWERPC_EXCP_ISEG;
  92        break;
  93    case MMU_DATA_STORE:
  94    case MMU_DATA_LOAD:
  95        /* Data Segment Interrupt */
  96        cs->exception_index = POWERPC_EXCP_DSEG;
  97        env->spr[SPR_DAR] = eaddr;
  98        break;
  99    default:
 100        g_assert_not_reached();
 101    }
 102    env->error_code = 0;
 103}
 104
 105static inline const char *access_str(MMUAccessType access_type)
 106{
 107    return access_type == MMU_DATA_LOAD ? "reading" :
 108        (access_type == MMU_DATA_STORE ? "writing" : "execute");
 109}
 110
 111static void ppc_radix64_raise_si(PowerPCCPU *cpu, MMUAccessType access_type,
 112                                 vaddr eaddr, uint32_t cause)
 113{
 114    CPUState *cs = CPU(cpu);
 115    CPUPPCState *env = &cpu->env;
 116
 117    qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx" cause %08x\n",
 118                  __func__, access_str(access_type),
 119                  eaddr, cause);
 120
 121    switch (access_type) {
 122    case MMU_INST_FETCH:
 123        /* Instruction Storage Interrupt */
 124        cs->exception_index = POWERPC_EXCP_ISI;
 125        env->error_code = cause;
 126        break;
 127    case MMU_DATA_STORE:
 128        cause |= DSISR_ISSTORE;
 129        /* fall through */
 130    case MMU_DATA_LOAD:
 131        /* Data Storage Interrupt */
 132        cs->exception_index = POWERPC_EXCP_DSI;
 133        env->spr[SPR_DSISR] = cause;
 134        env->spr[SPR_DAR] = eaddr;
 135        env->error_code = 0;
 136        break;
 137    default:
 138        g_assert_not_reached();
 139    }
 140}
 141
 142static void ppc_radix64_raise_hsi(PowerPCCPU *cpu, MMUAccessType access_type,
 143                                  vaddr eaddr, hwaddr g_raddr, uint32_t cause)
 144{
 145    CPUState *cs = CPU(cpu);
 146    CPUPPCState *env = &cpu->env;
 147
 148    qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx" 0x%"
 149                  HWADDR_PRIx" cause %08x\n",
 150                  __func__, access_str(access_type),
 151                  eaddr, g_raddr, cause);
 152
 153    switch (access_type) {
 154    case MMU_INST_FETCH:
 155        /* H Instruction Storage Interrupt */
 156        cs->exception_index = POWERPC_EXCP_HISI;
 157        env->spr[SPR_ASDR] = g_raddr;
 158        env->error_code = cause;
 159        break;
 160    case MMU_DATA_STORE:
 161        cause |= DSISR_ISSTORE;
 162        /* fall through */
 163    case MMU_DATA_LOAD:
 164        /* H Data Storage Interrupt */
 165        cs->exception_index = POWERPC_EXCP_HDSI;
 166        env->spr[SPR_HDSISR] = cause;
 167        env->spr[SPR_HDAR] = eaddr;
 168        env->spr[SPR_ASDR] = g_raddr;
 169        env->error_code = 0;
 170        break;
 171    default:
 172        g_assert_not_reached();
 173    }
 174}
 175
 176static bool ppc_radix64_check_prot(PowerPCCPU *cpu, MMUAccessType access_type,
 177                                   uint64_t pte, int *fault_cause, int *prot,
 178                                   int mmu_idx, bool partition_scoped)
 179{
 180    CPUPPCState *env = &cpu->env;
 181    int need_prot;
 182
 183    /* Check Page Attributes (pte58:59) */
 184    if ((pte & R_PTE_ATT) == R_PTE_ATT_NI_IO && access_type == MMU_INST_FETCH) {
 185        /*
 186         * Radix PTE entries with the non-idempotent I/O attribute are treated
 187         * as guarded storage
 188         */
 189        *fault_cause |= SRR1_NOEXEC_GUARD;
 190        return true;
 191    }
 192
 193    /* Determine permissions allowed by Encoded Access Authority */
 194    if (!partition_scoped && (pte & R_PTE_EAA_PRIV) &&
 195        FIELD_EX64(env->msr, MSR, PR)) {
 196        *prot = 0;
 197    } else if (mmuidx_pr(mmu_idx) || (pte & R_PTE_EAA_PRIV) ||
 198               partition_scoped) {
 199        *prot = ppc_radix64_get_prot_eaa(pte);
 200    } else { /* !MSR_PR && !(pte & R_PTE_EAA_PRIV) && !partition_scoped */
 201        *prot = ppc_radix64_get_prot_eaa(pte);
 202        *prot &= ppc_radix64_get_prot_amr(cpu); /* Least combined permissions */
 203    }
 204
 205    /* Check if requested access type is allowed */
 206    need_prot = prot_for_access_type(access_type);
 207    if (need_prot & ~*prot) { /* Page Protected for that Access */
 208        *fault_cause |= access_type == MMU_INST_FETCH ? SRR1_NOEXEC_GUARD :
 209                                                        DSISR_PROTFAULT;
 210        return true;
 211    }
 212
 213    return false;
 214}
 215
 216static void ppc_radix64_set_rc(PowerPCCPU *cpu, MMUAccessType access_type,
 217                               uint64_t pte, hwaddr pte_addr, int *prot)
 218{
 219    CPUState *cs = CPU(cpu);
 220    uint64_t npte;
 221
 222    npte = pte | R_PTE_R; /* Always set reference bit */
 223
 224    if (access_type == MMU_DATA_STORE) { /* Store/Write */
 225        npte |= R_PTE_C; /* Set change bit */
 226    } else {
 227        /*
 228         * Treat the page as read-only for now, so that a later write
 229         * will pass through this function again to set the C bit.
 230         */
 231        *prot &= ~PAGE_WRITE;
 232    }
 233
 234    if (pte ^ npte) { /* If pte has changed then write it back */
 235        stq_phys(cs->as, pte_addr, npte);
 236    }
 237}
 238
 239static bool ppc_radix64_is_valid_level(int level, int psize, uint64_t nls)
 240{
 241    bool ret;
 242
 243    /*
 244     * Check if this is a valid level, according to POWER9 and POWER10
 245     * Processor User's Manuals, sections 4.10.4.1 and 5.10.6.1, respectively:
 246     * Supported Radix Tree Configurations and Resulting Page Sizes.
 247     *
 248     * Note: these checks are specific to POWER9 and POWER10 CPUs. Any future
 249     * CPUs that supports a different Radix MMU configuration will need their
 250     * own implementation.
 251     */
 252    switch (level) {
 253    case 0:     /* Root Page Dir */
 254        ret = psize == 52 && nls == 13;
 255        break;
 256    case 1:
 257    case 2:
 258        ret = nls == 9;
 259        break;
 260    case 3:
 261        ret = nls == 9 || nls == 5;
 262        break;
 263    default:
 264        ret = false;
 265    }
 266
 267    if (unlikely(!ret)) {
 268        qemu_log_mask(LOG_GUEST_ERROR, "invalid radix configuration: "
 269                      "level %d size %d nls %"PRIu64"\n",
 270                      level, psize, nls);
 271    }
 272    return ret;
 273}
 274
 275static int ppc_radix64_next_level(AddressSpace *as, vaddr eaddr,
 276                                  uint64_t *pte_addr, uint64_t *nls,
 277                                  int *psize, uint64_t *pte, int *fault_cause)
 278{
 279    uint64_t index, mask, nlb, pde;
 280
 281    /* Read page <directory/table> entry from guest address space */
 282    pde = ldq_phys(as, *pte_addr);
 283    if (!(pde & R_PTE_VALID)) {         /* Invalid Entry */
 284        *fault_cause |= DSISR_NOPTE;
 285        return 1;
 286    }
 287
 288    *pte = pde;
 289    *psize -= *nls;
 290    if (!(pde & R_PTE_LEAF)) { /* Prepare for next iteration */
 291        *nls = pde & R_PDE_NLS;
 292        index = eaddr >> (*psize - *nls);       /* Shift */
 293        index &= ((1UL << *nls) - 1);           /* Mask */
 294        nlb = pde & R_PDE_NLB;
 295        mask = MAKE_64BIT_MASK(0, *nls + 3);
 296
 297        if (nlb & mask) {
 298            qemu_log_mask(LOG_GUEST_ERROR,
 299                "%s: misaligned page dir/table base: 0x"TARGET_FMT_lx
 300                " page dir size: 0x"TARGET_FMT_lx"\n",
 301                __func__, nlb, mask + 1);
 302            nlb &= ~mask;
 303        }
 304        *pte_addr = nlb + index * sizeof(pde);
 305    }
 306    return 0;
 307}
 308
 309static int ppc_radix64_walk_tree(AddressSpace *as, vaddr eaddr,
 310                                 uint64_t base_addr, uint64_t nls,
 311                                 hwaddr *raddr, int *psize, uint64_t *pte,
 312                                 int *fault_cause, hwaddr *pte_addr)
 313{
 314    uint64_t index, pde, rpn, mask;
 315    int level = 0;
 316
 317    index = eaddr >> (*psize - nls);    /* Shift */
 318    index &= ((1UL << nls) - 1);        /* Mask */
 319    mask = MAKE_64BIT_MASK(0, nls + 3);
 320
 321    if (base_addr & mask) {
 322        qemu_log_mask(LOG_GUEST_ERROR,
 323            "%s: misaligned page dir base: 0x"TARGET_FMT_lx
 324            " page dir size: 0x"TARGET_FMT_lx"\n",
 325            __func__, base_addr, mask + 1);
 326        base_addr &= ~mask;
 327    }
 328    *pte_addr = base_addr + index * sizeof(pde);
 329
 330    do {
 331        int ret;
 332
 333        if (!ppc_radix64_is_valid_level(level++, *psize, nls)) {
 334            *fault_cause |= DSISR_R_BADCONFIG;
 335            return 1;
 336        }
 337
 338        ret = ppc_radix64_next_level(as, eaddr, pte_addr, &nls, psize, &pde,
 339                                     fault_cause);
 340        if (ret) {
 341            return ret;
 342        }
 343    } while (!(pde & R_PTE_LEAF));
 344
 345    *pte = pde;
 346    rpn = pde & R_PTE_RPN;
 347    mask = (1UL << *psize) - 1;
 348
 349    /* Or high bits of rpn and low bits to ea to form whole real addr */
 350    *raddr = (rpn & ~mask) | (eaddr & mask);
 351    return 0;
 352}
 353
 354static bool validate_pate(PowerPCCPU *cpu, uint64_t lpid, ppc_v3_pate_t *pate)
 355{
 356    CPUPPCState *env = &cpu->env;
 357
 358    if (!(pate->dw0 & PATE0_HR)) {
 359        return false;
 360    }
 361    if (lpid == 0 && !FIELD_EX64(env->msr, MSR, HV)) {
 362        return false;
 363    }
 364    if ((pate->dw0 & PATE1_R_PRTS) < 5) {
 365        return false;
 366    }
 367    /* More checks ... */
 368    return true;
 369}
 370
 371static int ppc_radix64_partition_scoped_xlate(PowerPCCPU *cpu,
 372                                              MMUAccessType access_type,
 373                                              vaddr eaddr, hwaddr g_raddr,
 374                                              ppc_v3_pate_t pate,
 375                                              hwaddr *h_raddr, int *h_prot,
 376                                              int *h_page_size, bool pde_addr,
 377                                              int mmu_idx, bool guest_visible)
 378{
 379    int fault_cause = 0;
 380    hwaddr pte_addr;
 381    uint64_t pte;
 382
 383    qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx
 384                  " mmu_idx %u 0x%"HWADDR_PRIx"\n",
 385                  __func__, access_str(access_type),
 386                  eaddr, mmu_idx, g_raddr);
 387
 388    *h_page_size = PRTBE_R_GET_RTS(pate.dw0);
 389    /* No valid pte or access denied due to protection */
 390    if (ppc_radix64_walk_tree(CPU(cpu)->as, g_raddr, pate.dw0 & PRTBE_R_RPDB,
 391                              pate.dw0 & PRTBE_R_RPDS, h_raddr, h_page_size,
 392                              &pte, &fault_cause, &pte_addr) ||
 393        ppc_radix64_check_prot(cpu, access_type, pte,
 394                               &fault_cause, h_prot, mmu_idx, true)) {
 395        if (pde_addr) { /* address being translated was that of a guest pde */
 396            fault_cause |= DSISR_PRTABLE_FAULT;
 397        }
 398        if (guest_visible) {
 399            ppc_radix64_raise_hsi(cpu, access_type, eaddr, g_raddr, fault_cause);
 400        }
 401        return 1;
 402    }
 403
 404    if (guest_visible) {
 405        ppc_radix64_set_rc(cpu, access_type, pte, pte_addr, h_prot);
 406    }
 407
 408    return 0;
 409}
 410
 411/*
 412 * The spapr vhc has a flat partition scope provided by qemu memory when
 413 * not nested.
 414 *
 415 * When running a nested guest, the addressing is 2-level radix on top of the
 416 * vhc memory, so it works practically identically to the bare metal 2-level
 417 * radix. So that code is selected directly. A cleaner and more flexible nested
 418 * hypervisor implementation would allow the vhc to provide a ->nested_xlate()
 419 * function but that is not required for the moment.
 420 */
 421static bool vhyp_flat_addressing(PowerPCCPU *cpu)
 422{
 423    if (cpu->vhyp) {
 424        return !vhyp_cpu_in_nested(cpu);
 425    }
 426    return false;
 427}
 428
 429static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu,
 430                                            MMUAccessType access_type,
 431                                            vaddr eaddr, uint64_t pid,
 432                                            ppc_v3_pate_t pate, hwaddr *g_raddr,
 433                                            int *g_prot, int *g_page_size,
 434                                            int mmu_idx, bool guest_visible)
 435{
 436    CPUState *cs = CPU(cpu);
 437    CPUPPCState *env = &cpu->env;
 438    uint64_t offset, size, prtb, prtbe_addr, prtbe0, base_addr, nls, index, pte;
 439    int fault_cause = 0, h_page_size, h_prot;
 440    hwaddr h_raddr, pte_addr;
 441    int ret;
 442
 443    qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx
 444                  " mmu_idx %u pid %"PRIu64"\n",
 445                  __func__, access_str(access_type),
 446                  eaddr, mmu_idx, pid);
 447
 448    prtb = (pate.dw1 & PATE1_R_PRTB);
 449    size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12);
 450    if (prtb & (size - 1)) {
 451        /* Process Table not properly aligned */
 452        if (guest_visible) {
 453            ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_R_BADCONFIG);
 454        }
 455        return 1;
 456    }
 457
 458    /* Index Process Table by PID to Find Corresponding Process Table Entry */
 459    offset = pid * sizeof(struct prtb_entry);
 460    if (offset >= size) {
 461        /* offset exceeds size of the process table */
 462        if (guest_visible) {
 463            ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_NOPTE);
 464        }
 465        return 1;
 466    }
 467    prtbe_addr = prtb + offset;
 468
 469    if (vhyp_flat_addressing(cpu)) {
 470        prtbe0 = ldq_phys(cs->as, prtbe_addr);
 471    } else {
 472        /*
 473         * Process table addresses are subject to partition-scoped
 474         * translation
 475         *
 476         * On a Radix host, the partition-scoped page table for LPID=0
 477         * is only used to translate the effective addresses of the
 478         * process table entries.
 479         */
 480        ret = ppc_radix64_partition_scoped_xlate(cpu, 0, eaddr, prtbe_addr,
 481                                                 pate, &h_raddr, &h_prot,
 482                                                 &h_page_size, true,
 483            /* mmu_idx is 5 because we're translating from hypervisor scope */
 484                                                 5, guest_visible);
 485        if (ret) {
 486            return ret;
 487        }
 488        prtbe0 = ldq_phys(cs->as, h_raddr);
 489    }
 490
 491    /* Walk Radix Tree from Process Table Entry to Convert EA to RA */
 492    *g_page_size = PRTBE_R_GET_RTS(prtbe0);
 493    base_addr = prtbe0 & PRTBE_R_RPDB;
 494    nls = prtbe0 & PRTBE_R_RPDS;
 495    if (FIELD_EX64(env->msr, MSR, HV) || vhyp_flat_addressing(cpu)) {
 496        /*
 497         * Can treat process table addresses as real addresses
 498         */
 499        ret = ppc_radix64_walk_tree(cs->as, eaddr & R_EADDR_MASK, base_addr,
 500                                    nls, g_raddr, g_page_size, &pte,
 501                                    &fault_cause, &pte_addr);
 502        if (ret) {
 503            /* No valid PTE */
 504            if (guest_visible) {
 505                ppc_radix64_raise_si(cpu, access_type, eaddr, fault_cause);
 506            }
 507            return ret;
 508        }
 509    } else {
 510        uint64_t rpn, mask;
 511        int level = 0;
 512
 513        index = (eaddr & R_EADDR_MASK) >> (*g_page_size - nls); /* Shift */
 514        index &= ((1UL << nls) - 1);                            /* Mask */
 515        pte_addr = base_addr + (index * sizeof(pte));
 516
 517        /*
 518         * Each process table address is subject to a partition-scoped
 519         * translation
 520         */
 521        do {
 522            ret = ppc_radix64_partition_scoped_xlate(cpu, 0, eaddr, pte_addr,
 523                                                     pate, &h_raddr, &h_prot,
 524                                                     &h_page_size, true,
 525            /* mmu_idx is 5 because we're translating from hypervisor scope */
 526                                                     5, guest_visible);
 527            if (ret) {
 528                return ret;
 529            }
 530
 531            if (!ppc_radix64_is_valid_level(level++, *g_page_size, nls)) {
 532                fault_cause |= DSISR_R_BADCONFIG;
 533                ret = 1;
 534            } else {
 535                ret = ppc_radix64_next_level(cs->as, eaddr & R_EADDR_MASK,
 536                                             &h_raddr, &nls, g_page_size,
 537                                             &pte, &fault_cause);
 538            }
 539
 540            if (ret) {
 541                /* No valid pte */
 542                if (guest_visible) {
 543                    ppc_radix64_raise_si(cpu, access_type, eaddr, fault_cause);
 544                }
 545                return ret;
 546            }
 547            pte_addr = h_raddr;
 548        } while (!(pte & R_PTE_LEAF));
 549
 550        rpn = pte & R_PTE_RPN;
 551        mask = (1UL << *g_page_size) - 1;
 552
 553        /* Or high bits of rpn and low bits to ea to form whole real addr */
 554        *g_raddr = (rpn & ~mask) | (eaddr & mask);
 555    }
 556
 557    if (ppc_radix64_check_prot(cpu, access_type, pte, &fault_cause,
 558                               g_prot, mmu_idx, false)) {
 559        /* Access denied due to protection */
 560        if (guest_visible) {
 561            ppc_radix64_raise_si(cpu, access_type, eaddr, fault_cause);
 562        }
 563        return 1;
 564    }
 565
 566    if (guest_visible) {
 567        ppc_radix64_set_rc(cpu, access_type, pte, pte_addr, g_prot);
 568    }
 569
 570    return 0;
 571}
 572
 573/*
 574 * Radix tree translation is a 2 steps translation process:
 575 *
 576 * 1. Process-scoped translation:   Guest Eff Addr  -> Guest Real Addr
 577 * 2. Partition-scoped translation: Guest Real Addr -> Host Real Addr
 578 *
 579 *                                  MSR[HV]
 580 *              +-------------+----------------+---------------+
 581 *              |             |     HV = 0     |     HV = 1    |
 582 *              +-------------+----------------+---------------+
 583 *              | Relocation  |    Partition   |      No       |
 584 *              | = Off       |     Scoped     |  Translation  |
 585 *  Relocation  +-------------+----------------+---------------+
 586 *              | Relocation  |   Partition &  |    Process    |
 587 *              | = On        | Process Scoped |    Scoped     |
 588 *              +-------------+----------------+---------------+
 589 */
 590static bool ppc_radix64_xlate_impl(PowerPCCPU *cpu, vaddr eaddr,
 591                                   MMUAccessType access_type, hwaddr *raddr,
 592                                   int *psizep, int *protp, int mmu_idx,
 593                                   bool guest_visible)
 594{
 595    CPUPPCState *env = &cpu->env;
 596    uint64_t lpid, pid;
 597    ppc_v3_pate_t pate;
 598    int psize, prot;
 599    hwaddr g_raddr;
 600    bool relocation;
 601
 602    assert(!(mmuidx_hv(mmu_idx) && cpu->vhyp));
 603
 604    relocation = !mmuidx_real(mmu_idx);
 605
 606    /* HV or virtual hypervisor Real Mode Access */
 607    if (!relocation && (mmuidx_hv(mmu_idx) || vhyp_flat_addressing(cpu))) {
 608        /* In real mode top 4 effective addr bits (mostly) ignored */
 609        *raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
 610
 611        /* In HV mode, add HRMOR if top EA bit is clear */
 612        if (mmuidx_hv(mmu_idx) || !env->has_hv_mode) {
 613            if (!(eaddr >> 63)) {
 614                *raddr |= env->spr[SPR_HRMOR];
 615           }
 616        }
 617        *protp = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
 618        *psizep = TARGET_PAGE_BITS;
 619        return true;
 620    }
 621
 622    /*
 623     * Check UPRT (we avoid the check in real mode to deal with
 624     * transitional states during kexec.
 625     */
 626    if (guest_visible && !ppc64_use_proc_tbl(cpu)) {
 627        qemu_log_mask(LOG_GUEST_ERROR,
 628                      "LPCR:UPRT not set in radix mode ! LPCR="
 629                      TARGET_FMT_lx "\n", env->spr[SPR_LPCR]);
 630    }
 631
 632    /* Virtual Mode Access - get the fully qualified address */
 633    if (!ppc_radix64_get_fully_qualified_addr(&cpu->env, eaddr, &lpid, &pid)) {
 634        if (guest_visible) {
 635            ppc_radix64_raise_segi(cpu, access_type, eaddr);
 636        }
 637        return false;
 638    }
 639
 640    /* Get Partition Table */
 641    if (cpu->vhyp) {
 642        PPCVirtualHypervisorClass *vhc;
 643        vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
 644        if (!vhc->get_pate(cpu->vhyp, cpu, lpid, &pate)) {
 645            if (guest_visible) {
 646                ppc_radix64_raise_hsi(cpu, access_type, eaddr, eaddr,
 647                                      DSISR_R_BADCONFIG);
 648            }
 649            return false;
 650        }
 651    } else {
 652        if (!ppc64_v3_get_pate(cpu, lpid, &pate)) {
 653            if (guest_visible) {
 654                ppc_radix64_raise_hsi(cpu, access_type, eaddr, eaddr,
 655                                      DSISR_R_BADCONFIG);
 656            }
 657            return false;
 658        }
 659        if (!validate_pate(cpu, lpid, &pate)) {
 660            if (guest_visible) {
 661                ppc_radix64_raise_hsi(cpu, access_type, eaddr, eaddr,
 662                                      DSISR_R_BADCONFIG);
 663            }
 664            return false;
 665        }
 666    }
 667
 668    *psizep = INT_MAX;
 669    *protp = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
 670
 671    /*
 672     * Perform process-scoped translation if relocation enabled.
 673     *
 674     * - Translates an effective address to a host real address in
 675     *   quadrants 0 and 3 when HV=1.
 676     *
 677     * - Translates an effective address to a guest real address.
 678     */
 679    if (relocation) {
 680        int ret = ppc_radix64_process_scoped_xlate(cpu, access_type, eaddr, pid,
 681                                                   pate, &g_raddr, &prot,
 682                                                   &psize, mmu_idx, guest_visible);
 683        if (ret) {
 684            return false;
 685        }
 686        *psizep = MIN(*psizep, psize);
 687        *protp &= prot;
 688    } else {
 689        g_raddr = eaddr & R_EADDR_MASK;
 690    }
 691
 692    if (vhyp_flat_addressing(cpu)) {
 693        *raddr = g_raddr;
 694    } else {
 695        /*
 696         * Perform partition-scoped translation if !HV or HV access to
 697         * quadrants 1 or 2. Translates a guest real address to a host
 698         * real address.
 699         */
 700        if (lpid || !mmuidx_hv(mmu_idx)) {
 701            int ret;
 702
 703            ret = ppc_radix64_partition_scoped_xlate(cpu, access_type, eaddr,
 704                                                     g_raddr, pate, raddr,
 705                                                     &prot, &psize, false,
 706                                                     mmu_idx, guest_visible);
 707            if (ret) {
 708                return false;
 709            }
 710            *psizep = MIN(*psizep, psize);
 711            *protp &= prot;
 712        } else {
 713            *raddr = g_raddr;
 714        }
 715    }
 716
 717    return true;
 718}
 719
 720bool ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
 721                       hwaddr *raddrp, int *psizep, int *protp, int mmu_idx,
 722                       bool guest_visible)
 723{
 724    bool ret = ppc_radix64_xlate_impl(cpu, eaddr, access_type, raddrp,
 725                                      psizep, protp, mmu_idx, guest_visible);
 726
 727    qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx
 728                  " mmu_idx %u (prot %c%c%c) -> 0x%"HWADDR_PRIx"\n",
 729                  __func__, access_str(access_type),
 730                  eaddr, mmu_idx,
 731                  *protp & PAGE_READ ? 'r' : '-',
 732                  *protp & PAGE_WRITE ? 'w' : '-',
 733                  *protp & PAGE_EXEC ? 'x' : '-',
 734                  *raddrp);
 735
 736    return ret;
 737}
 738