qemu/target/ppc/mmu-radix64.c
<<
>>
Prefs
   1/*
   2 *  PowerPC Radix MMU mulation helpers for QEMU.
   3 *
   4 *  Copyright (c) 2016 Suraj Jitindar Singh, IBM Corporation
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2.1 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#include "qemu/osdep.h"
  21#include "cpu.h"
  22#include "exec/exec-all.h"
  23#include "qemu/error-report.h"
  24#include "sysemu/kvm.h"
  25#include "kvm_ppc.h"
  26#include "exec/log.h"
  27#include "internal.h"
  28#include "mmu-radix64.h"
  29#include "mmu-book3s-v3.h"
  30
  31static bool ppc_radix64_get_fully_qualified_addr(const CPUPPCState *env,
  32                                                 vaddr eaddr,
  33                                                 uint64_t *lpid, uint64_t *pid)
  34{
  35    /* When EA(2:11) are nonzero, raise a segment interrupt */
  36    if (eaddr & ~R_EADDR_VALID_MASK) {
  37        return false;
  38    }
  39
  40    if (FIELD_EX64(env->msr, MSR, HV)) { /* MSR[HV] -> Hypervisor/bare metal */
  41        switch (eaddr & R_EADDR_QUADRANT) {
  42        case R_EADDR_QUADRANT0:
  43            *lpid = 0;
  44            *pid = env->spr[SPR_BOOKS_PID];
  45            break;
  46        case R_EADDR_QUADRANT1:
  47            *lpid = env->spr[SPR_LPIDR];
  48            *pid = env->spr[SPR_BOOKS_PID];
  49            break;
  50        case R_EADDR_QUADRANT2:
  51            *lpid = env->spr[SPR_LPIDR];
  52            *pid = 0;
  53            break;
  54        case R_EADDR_QUADRANT3:
  55            *lpid = 0;
  56            *pid = 0;
  57            break;
  58        default:
  59            g_assert_not_reached();
  60        }
  61    } else {  /* !MSR[HV] -> Guest */
  62        switch (eaddr & R_EADDR_QUADRANT) {
  63        case R_EADDR_QUADRANT0: /* Guest application */
  64            *lpid = env->spr[SPR_LPIDR];
  65            *pid = env->spr[SPR_BOOKS_PID];
  66            break;
  67        case R_EADDR_QUADRANT1: /* Illegal */
  68        case R_EADDR_QUADRANT2:
  69            return false;
  70        case R_EADDR_QUADRANT3: /* Guest OS */
  71            *lpid = env->spr[SPR_LPIDR];
  72            *pid = 0; /* pid set to 0 -> addresses guest operating system */
  73            break;
  74        default:
  75            g_assert_not_reached();
  76        }
  77    }
  78
  79    return true;
  80}
  81
  82static void ppc_radix64_raise_segi(PowerPCCPU *cpu, MMUAccessType access_type,
  83                                   vaddr eaddr)
  84{
  85    CPUState *cs = CPU(cpu);
  86    CPUPPCState *env = &cpu->env;
  87
  88    switch (access_type) {
  89    case MMU_INST_FETCH:
  90        /* Instruction Segment Interrupt */
  91        cs->exception_index = POWERPC_EXCP_ISEG;
  92        break;
  93    case MMU_DATA_STORE:
  94    case MMU_DATA_LOAD:
  95        /* Data Segment Interrupt */
  96        cs->exception_index = POWERPC_EXCP_DSEG;
  97        env->spr[SPR_DAR] = eaddr;
  98        break;
  99    default:
 100        g_assert_not_reached();
 101    }
 102    env->error_code = 0;
 103}
 104
 105static inline const char *access_str(MMUAccessType access_type)
 106{
 107    return access_type == MMU_DATA_LOAD ? "reading" :
 108        (access_type == MMU_DATA_STORE ? "writing" : "execute");
 109}
 110
 111static void ppc_radix64_raise_si(PowerPCCPU *cpu, MMUAccessType access_type,
 112                                 vaddr eaddr, uint32_t cause)
 113{
 114    CPUState *cs = CPU(cpu);
 115    CPUPPCState *env = &cpu->env;
 116
 117    qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx" cause %08x\n",
 118                  __func__, access_str(access_type),
 119                  eaddr, cause);
 120
 121    switch (access_type) {
 122    case MMU_INST_FETCH:
 123        /* Instruction Storage Interrupt */
 124        cs->exception_index = POWERPC_EXCP_ISI;
 125        env->error_code = cause;
 126        break;
 127    case MMU_DATA_STORE:
 128        cause |= DSISR_ISSTORE;
 129        /* fall through */
 130    case MMU_DATA_LOAD:
 131        /* Data Storage Interrupt */
 132        cs->exception_index = POWERPC_EXCP_DSI;
 133        env->spr[SPR_DSISR] = cause;
 134        env->spr[SPR_DAR] = eaddr;
 135        env->error_code = 0;
 136        break;
 137    default:
 138        g_assert_not_reached();
 139    }
 140}
 141
 142static void ppc_radix64_raise_hsi(PowerPCCPU *cpu, MMUAccessType access_type,
 143                                  vaddr eaddr, hwaddr g_raddr, uint32_t cause)
 144{
 145    CPUState *cs = CPU(cpu);
 146    CPUPPCState *env = &cpu->env;
 147
 148    qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx" 0x%"
 149                  HWADDR_PRIx" cause %08x\n",
 150                  __func__, access_str(access_type),
 151                  eaddr, g_raddr, cause);
 152
 153    switch (access_type) {
 154    case MMU_INST_FETCH:
 155        /* H Instruction Storage Interrupt */
 156        cs->exception_index = POWERPC_EXCP_HISI;
 157        env->spr[SPR_ASDR] = g_raddr;
 158        env->error_code = cause;
 159        break;
 160    case MMU_DATA_STORE:
 161        cause |= DSISR_ISSTORE;
 162        /* fall through */
 163    case MMU_DATA_LOAD:
 164        /* H Data Storage Interrupt */
 165        cs->exception_index = POWERPC_EXCP_HDSI;
 166        env->spr[SPR_HDSISR] = cause;
 167        env->spr[SPR_HDAR] = eaddr;
 168        env->spr[SPR_ASDR] = g_raddr;
 169        env->error_code = 0;
 170        break;
 171    default:
 172        g_assert_not_reached();
 173    }
 174}
 175
 176static bool ppc_radix64_check_prot(PowerPCCPU *cpu, MMUAccessType access_type,
 177                                   uint64_t pte, int *fault_cause, int *prot,
 178                                   int mmu_idx, bool partition_scoped)
 179{
 180    CPUPPCState *env = &cpu->env;
 181    int need_prot;
 182
 183    /* Check Page Attributes (pte58:59) */
 184    if ((pte & R_PTE_ATT) == R_PTE_ATT_NI_IO && access_type == MMU_INST_FETCH) {
 185        /*
 186         * Radix PTE entries with the non-idempotent I/O attribute are treated
 187         * as guarded storage
 188         */
 189        *fault_cause |= SRR1_NOEXEC_GUARD;
 190        return true;
 191    }
 192
 193    /* Determine permissions allowed by Encoded Access Authority */
 194    if (!partition_scoped && (pte & R_PTE_EAA_PRIV) &&
 195        FIELD_EX64(env->msr, MSR, PR)) {
 196        *prot = 0;
 197    } else if (mmuidx_pr(mmu_idx) || (pte & R_PTE_EAA_PRIV) ||
 198               partition_scoped) {
 199        *prot = ppc_radix64_get_prot_eaa(pte);
 200    } else { /* !MSR_PR && !(pte & R_PTE_EAA_PRIV) && !partition_scoped */
 201        *prot = ppc_radix64_get_prot_eaa(pte);
 202        *prot &= ppc_radix64_get_prot_amr(cpu); /* Least combined permissions */
 203    }
 204
 205    /* Check if requested access type is allowed */
 206    need_prot = prot_for_access_type(access_type);
 207    if (need_prot & ~*prot) { /* Page Protected for that Access */
 208        *fault_cause |= access_type == MMU_INST_FETCH ? SRR1_NOEXEC_GUARD :
 209                                                        DSISR_PROTFAULT;
 210        return true;
 211    }
 212
 213    return false;
 214}
 215
 216static void ppc_radix64_set_rc(PowerPCCPU *cpu, MMUAccessType access_type,
 217                               uint64_t pte, hwaddr pte_addr, int *prot)
 218{
 219    CPUState *cs = CPU(cpu);
 220    uint64_t npte;
 221
 222    npte = pte | R_PTE_R; /* Always set reference bit */
 223
 224    if (access_type == MMU_DATA_STORE) { /* Store/Write */
 225        npte |= R_PTE_C; /* Set change bit */
 226    } else {
 227        /*
 228         * Treat the page as read-only for now, so that a later write
 229         * will pass through this function again to set the C bit.
 230         */
 231        *prot &= ~PAGE_WRITE;
 232    }
 233
 234    if (pte ^ npte) { /* If pte has changed then write it back */
 235        stq_phys(cs->as, pte_addr, npte);
 236    }
 237}
 238
 239static bool ppc_radix64_is_valid_level(int level, int psize, uint64_t nls)
 240{
 241    /*
 242     * Check if this is a valid level, according to POWER9 and POWER10
 243     * Processor User's Manuals, sections 4.10.4.1 and 5.10.6.1, respectively:
 244     * Supported Radix Tree Configurations and Resulting Page Sizes.
 245     *
 246     * Note: these checks are specific to POWER9 and POWER10 CPUs. Any future
 247     * CPUs that supports a different Radix MMU configuration will need their
 248     * own implementation.
 249     */
 250    switch (level) {
 251    case 0:     /* Root Page Dir */
 252        return psize == 52 && nls == 13;
 253    case 1:
 254    case 2:
 255        return nls == 9;
 256    case 3:
 257        return nls == 9 || nls == 5;
 258    default:
 259        qemu_log_mask(LOG_GUEST_ERROR, "invalid radix level: %d\n", level);
 260        return false;
 261    }
 262}
 263
 264static int ppc_radix64_next_level(AddressSpace *as, vaddr eaddr,
 265                                  uint64_t *pte_addr, uint64_t *nls,
 266                                  int *psize, uint64_t *pte, int *fault_cause)
 267{
 268    uint64_t index, mask, nlb, pde;
 269
 270    /* Read page <directory/table> entry from guest address space */
 271    pde = ldq_phys(as, *pte_addr);
 272    if (!(pde & R_PTE_VALID)) {         /* Invalid Entry */
 273        *fault_cause |= DSISR_NOPTE;
 274        return 1;
 275    }
 276
 277    *pte = pde;
 278    *psize -= *nls;
 279    if (!(pde & R_PTE_LEAF)) { /* Prepare for next iteration */
 280        *nls = pde & R_PDE_NLS;
 281        index = eaddr >> (*psize - *nls);       /* Shift */
 282        index &= ((1UL << *nls) - 1);           /* Mask */
 283        nlb = pde & R_PDE_NLB;
 284        mask = MAKE_64BIT_MASK(0, *nls + 3);
 285
 286        if (nlb & mask) {
 287            qemu_log_mask(LOG_GUEST_ERROR,
 288                "%s: misaligned page dir/table base: 0x"TARGET_FMT_lx
 289                " page dir size: 0x"TARGET_FMT_lx"\n",
 290                __func__, nlb, mask + 1);
 291            nlb &= ~mask;
 292        }
 293        *pte_addr = nlb + index * sizeof(pde);
 294    }
 295    return 0;
 296}
 297
 298static int ppc_radix64_walk_tree(AddressSpace *as, vaddr eaddr,
 299                                 uint64_t base_addr, uint64_t nls,
 300                                 hwaddr *raddr, int *psize, uint64_t *pte,
 301                                 int *fault_cause, hwaddr *pte_addr)
 302{
 303    uint64_t index, pde, rpn, mask;
 304    int level = 0;
 305
 306    index = eaddr >> (*psize - nls);    /* Shift */
 307    index &= ((1UL << nls) - 1);        /* Mask */
 308    mask = MAKE_64BIT_MASK(0, nls + 3);
 309
 310    if (base_addr & mask) {
 311        qemu_log_mask(LOG_GUEST_ERROR,
 312            "%s: misaligned page dir base: 0x"TARGET_FMT_lx
 313            " page dir size: 0x"TARGET_FMT_lx"\n",
 314            __func__, base_addr, mask + 1);
 315        base_addr &= ~mask;
 316    }
 317    *pte_addr = base_addr + index * sizeof(pde);
 318
 319    do {
 320        int ret;
 321
 322        if (!ppc_radix64_is_valid_level(level++, *psize, nls)) {
 323            *fault_cause |= DSISR_R_BADCONFIG;
 324            return 1;
 325        }
 326
 327        ret = ppc_radix64_next_level(as, eaddr, pte_addr, &nls, psize, &pde,
 328                                     fault_cause);
 329        if (ret) {
 330            return ret;
 331        }
 332    } while (!(pde & R_PTE_LEAF));
 333
 334    *pte = pde;
 335    rpn = pde & R_PTE_RPN;
 336    mask = (1UL << *psize) - 1;
 337
 338    /* Or high bits of rpn and low bits to ea to form whole real addr */
 339    *raddr = (rpn & ~mask) | (eaddr & mask);
 340    return 0;
 341}
 342
 343static bool validate_pate(PowerPCCPU *cpu, uint64_t lpid, ppc_v3_pate_t *pate)
 344{
 345    CPUPPCState *env = &cpu->env;
 346
 347    if (!(pate->dw0 & PATE0_HR)) {
 348        return false;
 349    }
 350    if (lpid == 0 && !FIELD_EX64(env->msr, MSR, HV)) {
 351        return false;
 352    }
 353    if ((pate->dw0 & PATE1_R_PRTS) < 5) {
 354        return false;
 355    }
 356    /* More checks ... */
 357    return true;
 358}
 359
 360static int ppc_radix64_partition_scoped_xlate(PowerPCCPU *cpu,
 361                                              MMUAccessType access_type,
 362                                              vaddr eaddr, hwaddr g_raddr,
 363                                              ppc_v3_pate_t pate,
 364                                              hwaddr *h_raddr, int *h_prot,
 365                                              int *h_page_size, bool pde_addr,
 366                                              int mmu_idx, bool guest_visible)
 367{
 368    int fault_cause = 0;
 369    hwaddr pte_addr;
 370    uint64_t pte;
 371
 372    qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx
 373                  " mmu_idx %u 0x%"HWADDR_PRIx"\n",
 374                  __func__, access_str(access_type),
 375                  eaddr, mmu_idx, g_raddr);
 376
 377    *h_page_size = PRTBE_R_GET_RTS(pate.dw0);
 378    /* No valid pte or access denied due to protection */
 379    if (ppc_radix64_walk_tree(CPU(cpu)->as, g_raddr, pate.dw0 & PRTBE_R_RPDB,
 380                              pate.dw0 & PRTBE_R_RPDS, h_raddr, h_page_size,
 381                              &pte, &fault_cause, &pte_addr) ||
 382        ppc_radix64_check_prot(cpu, access_type, pte,
 383                               &fault_cause, h_prot, mmu_idx, true)) {
 384        if (pde_addr) { /* address being translated was that of a guest pde */
 385            fault_cause |= DSISR_PRTABLE_FAULT;
 386        }
 387        if (guest_visible) {
 388            ppc_radix64_raise_hsi(cpu, access_type, eaddr, g_raddr, fault_cause);
 389        }
 390        return 1;
 391    }
 392
 393    if (guest_visible) {
 394        ppc_radix64_set_rc(cpu, access_type, pte, pte_addr, h_prot);
 395    }
 396
 397    return 0;
 398}
 399
 400/*
 401 * The spapr vhc has a flat partition scope provided by qemu memory when
 402 * not nested.
 403 *
 404 * When running a nested guest, the addressing is 2-level radix on top of the
 405 * vhc memory, so it works practically identically to the bare metal 2-level
 406 * radix. So that code is selected directly. A cleaner and more flexible nested
 407 * hypervisor implementation would allow the vhc to provide a ->nested_xlate()
 408 * function but that is not required for the moment.
 409 */
 410static bool vhyp_flat_addressing(PowerPCCPU *cpu)
 411{
 412    if (cpu->vhyp) {
 413        return !vhyp_cpu_in_nested(cpu);
 414    }
 415    return false;
 416}
 417
 418static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu,
 419                                            MMUAccessType access_type,
 420                                            vaddr eaddr, uint64_t pid,
 421                                            ppc_v3_pate_t pate, hwaddr *g_raddr,
 422                                            int *g_prot, int *g_page_size,
 423                                            int mmu_idx, bool guest_visible)
 424{
 425    CPUState *cs = CPU(cpu);
 426    CPUPPCState *env = &cpu->env;
 427    uint64_t offset, size, prtb, prtbe_addr, prtbe0, base_addr, nls, index, pte;
 428    int fault_cause = 0, h_page_size, h_prot;
 429    hwaddr h_raddr, pte_addr;
 430    int ret;
 431
 432    qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx
 433                  " mmu_idx %u pid %"PRIu64"\n",
 434                  __func__, access_str(access_type),
 435                  eaddr, mmu_idx, pid);
 436
 437    prtb = (pate.dw1 & PATE1_R_PRTB);
 438    size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12);
 439    if (prtb & (size - 1)) {
 440        /* Process Table not properly aligned */
 441        if (guest_visible) {
 442            ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_R_BADCONFIG);
 443        }
 444        return 1;
 445    }
 446
 447    /* Index Process Table by PID to Find Corresponding Process Table Entry */
 448    offset = pid * sizeof(struct prtb_entry);
 449    if (offset >= size) {
 450        /* offset exceeds size of the process table */
 451        if (guest_visible) {
 452            ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_NOPTE);
 453        }
 454        return 1;
 455    }
 456    prtbe_addr = prtb + offset;
 457
 458    if (vhyp_flat_addressing(cpu)) {
 459        prtbe0 = ldq_phys(cs->as, prtbe_addr);
 460    } else {
 461        /*
 462         * Process table addresses are subject to partition-scoped
 463         * translation
 464         *
 465         * On a Radix host, the partition-scoped page table for LPID=0
 466         * is only used to translate the effective addresses of the
 467         * process table entries.
 468         */
 469        ret = ppc_radix64_partition_scoped_xlate(cpu, 0, eaddr, prtbe_addr,
 470                                                 pate, &h_raddr, &h_prot,
 471                                                 &h_page_size, true,
 472            /* mmu_idx is 5 because we're translating from hypervisor scope */
 473                                                 5, guest_visible);
 474        if (ret) {
 475            return ret;
 476        }
 477        prtbe0 = ldq_phys(cs->as, h_raddr);
 478    }
 479
 480    /* Walk Radix Tree from Process Table Entry to Convert EA to RA */
 481    *g_page_size = PRTBE_R_GET_RTS(prtbe0);
 482    base_addr = prtbe0 & PRTBE_R_RPDB;
 483    nls = prtbe0 & PRTBE_R_RPDS;
 484    if (FIELD_EX64(env->msr, MSR, HV) || vhyp_flat_addressing(cpu)) {
 485        /*
 486         * Can treat process table addresses as real addresses
 487         */
 488        ret = ppc_radix64_walk_tree(cs->as, eaddr & R_EADDR_MASK, base_addr,
 489                                    nls, g_raddr, g_page_size, &pte,
 490                                    &fault_cause, &pte_addr);
 491        if (ret) {
 492            /* No valid PTE */
 493            if (guest_visible) {
 494                ppc_radix64_raise_si(cpu, access_type, eaddr, fault_cause);
 495            }
 496            return ret;
 497        }
 498    } else {
 499        uint64_t rpn, mask;
 500        int level = 0;
 501
 502        index = (eaddr & R_EADDR_MASK) >> (*g_page_size - nls); /* Shift */
 503        index &= ((1UL << nls) - 1);                            /* Mask */
 504        pte_addr = base_addr + (index * sizeof(pte));
 505
 506        /*
 507         * Each process table address is subject to a partition-scoped
 508         * translation
 509         */
 510        do {
 511            ret = ppc_radix64_partition_scoped_xlate(cpu, 0, eaddr, pte_addr,
 512                                                     pate, &h_raddr, &h_prot,
 513                                                     &h_page_size, true,
 514            /* mmu_idx is 5 because we're translating from hypervisor scope */
 515                                                     5, guest_visible);
 516            if (ret) {
 517                return ret;
 518            }
 519
 520            if (!ppc_radix64_is_valid_level(level++, *g_page_size, nls)) {
 521                fault_cause |= DSISR_R_BADCONFIG;
 522                return 1;
 523            }
 524
 525            ret = ppc_radix64_next_level(cs->as, eaddr & R_EADDR_MASK, &h_raddr,
 526                                         &nls, g_page_size, &pte, &fault_cause);
 527            if (ret) {
 528                /* No valid pte */
 529                if (guest_visible) {
 530                    ppc_radix64_raise_si(cpu, access_type, eaddr, fault_cause);
 531                }
 532                return ret;
 533            }
 534            pte_addr = h_raddr;
 535        } while (!(pte & R_PTE_LEAF));
 536
 537        rpn = pte & R_PTE_RPN;
 538        mask = (1UL << *g_page_size) - 1;
 539
 540        /* Or high bits of rpn and low bits to ea to form whole real addr */
 541        *g_raddr = (rpn & ~mask) | (eaddr & mask);
 542    }
 543
 544    if (ppc_radix64_check_prot(cpu, access_type, pte, &fault_cause,
 545                               g_prot, mmu_idx, false)) {
 546        /* Access denied due to protection */
 547        if (guest_visible) {
 548            ppc_radix64_raise_si(cpu, access_type, eaddr, fault_cause);
 549        }
 550        return 1;
 551    }
 552
 553    if (guest_visible) {
 554        ppc_radix64_set_rc(cpu, access_type, pte, pte_addr, g_prot);
 555    }
 556
 557    return 0;
 558}
 559
 560/*
 561 * Radix tree translation is a 2 steps translation process:
 562 *
 563 * 1. Process-scoped translation:   Guest Eff Addr  -> Guest Real Addr
 564 * 2. Partition-scoped translation: Guest Real Addr -> Host Real Addr
 565 *
 566 *                                  MSR[HV]
 567 *              +-------------+----------------+---------------+
 568 *              |             |     HV = 0     |     HV = 1    |
 569 *              +-------------+----------------+---------------+
 570 *              | Relocation  |    Partition   |      No       |
 571 *              | = Off       |     Scoped     |  Translation  |
 572 *  Relocation  +-------------+----------------+---------------+
 573 *              | Relocation  |   Partition &  |    Process    |
 574 *              | = On        | Process Scoped |    Scoped     |
 575 *              +-------------+----------------+---------------+
 576 */
 577static bool ppc_radix64_xlate_impl(PowerPCCPU *cpu, vaddr eaddr,
 578                                   MMUAccessType access_type, hwaddr *raddr,
 579                                   int *psizep, int *protp, int mmu_idx,
 580                                   bool guest_visible)
 581{
 582    CPUPPCState *env = &cpu->env;
 583    uint64_t lpid, pid;
 584    ppc_v3_pate_t pate;
 585    int psize, prot;
 586    hwaddr g_raddr;
 587    bool relocation;
 588
 589    assert(!(mmuidx_hv(mmu_idx) && cpu->vhyp));
 590
 591    relocation = !mmuidx_real(mmu_idx);
 592
 593    /* HV or virtual hypervisor Real Mode Access */
 594    if (!relocation && (mmuidx_hv(mmu_idx) || vhyp_flat_addressing(cpu))) {
 595        /* In real mode top 4 effective addr bits (mostly) ignored */
 596        *raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
 597
 598        /* In HV mode, add HRMOR if top EA bit is clear */
 599        if (mmuidx_hv(mmu_idx) || !env->has_hv_mode) {
 600            if (!(eaddr >> 63)) {
 601                *raddr |= env->spr[SPR_HRMOR];
 602           }
 603        }
 604        *protp = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
 605        *psizep = TARGET_PAGE_BITS;
 606        return true;
 607    }
 608
 609    /*
 610     * Check UPRT (we avoid the check in real mode to deal with
 611     * transitional states during kexec.
 612     */
 613    if (guest_visible && !ppc64_use_proc_tbl(cpu)) {
 614        qemu_log_mask(LOG_GUEST_ERROR,
 615                      "LPCR:UPRT not set in radix mode ! LPCR="
 616                      TARGET_FMT_lx "\n", env->spr[SPR_LPCR]);
 617    }
 618
 619    /* Virtual Mode Access - get the fully qualified address */
 620    if (!ppc_radix64_get_fully_qualified_addr(&cpu->env, eaddr, &lpid, &pid)) {
 621        if (guest_visible) {
 622            ppc_radix64_raise_segi(cpu, access_type, eaddr);
 623        }
 624        return false;
 625    }
 626
 627    /* Get Partition Table */
 628    if (cpu->vhyp) {
 629        PPCVirtualHypervisorClass *vhc;
 630        vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
 631        if (!vhc->get_pate(cpu->vhyp, cpu, lpid, &pate)) {
 632            if (guest_visible) {
 633                ppc_radix64_raise_hsi(cpu, access_type, eaddr, eaddr,
 634                                      DSISR_R_BADCONFIG);
 635            }
 636            return false;
 637        }
 638    } else {
 639        if (!ppc64_v3_get_pate(cpu, lpid, &pate)) {
 640            if (guest_visible) {
 641                ppc_radix64_raise_hsi(cpu, access_type, eaddr, eaddr,
 642                                      DSISR_R_BADCONFIG);
 643            }
 644            return false;
 645        }
 646        if (!validate_pate(cpu, lpid, &pate)) {
 647            if (guest_visible) {
 648                ppc_radix64_raise_hsi(cpu, access_type, eaddr, eaddr,
 649                                      DSISR_R_BADCONFIG);
 650            }
 651            return false;
 652        }
 653    }
 654
 655    *psizep = INT_MAX;
 656    *protp = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
 657
 658    /*
 659     * Perform process-scoped translation if relocation enabled.
 660     *
 661     * - Translates an effective address to a host real address in
 662     *   quadrants 0 and 3 when HV=1.
 663     *
 664     * - Translates an effective address to a guest real address.
 665     */
 666    if (relocation) {
 667        int ret = ppc_radix64_process_scoped_xlate(cpu, access_type, eaddr, pid,
 668                                                   pate, &g_raddr, &prot,
 669                                                   &psize, mmu_idx, guest_visible);
 670        if (ret) {
 671            return false;
 672        }
 673        *psizep = MIN(*psizep, psize);
 674        *protp &= prot;
 675    } else {
 676        g_raddr = eaddr & R_EADDR_MASK;
 677    }
 678
 679    if (vhyp_flat_addressing(cpu)) {
 680        *raddr = g_raddr;
 681    } else {
 682        /*
 683         * Perform partition-scoped translation if !HV or HV access to
 684         * quadrants 1 or 2. Translates a guest real address to a host
 685         * real address.
 686         */
 687        if (lpid || !mmuidx_hv(mmu_idx)) {
 688            int ret;
 689
 690            ret = ppc_radix64_partition_scoped_xlate(cpu, access_type, eaddr,
 691                                                     g_raddr, pate, raddr,
 692                                                     &prot, &psize, false,
 693                                                     mmu_idx, guest_visible);
 694            if (ret) {
 695                return false;
 696            }
 697            *psizep = MIN(*psizep, psize);
 698            *protp &= prot;
 699        } else {
 700            *raddr = g_raddr;
 701        }
 702    }
 703
 704    return true;
 705}
 706
 707bool ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
 708                       hwaddr *raddrp, int *psizep, int *protp, int mmu_idx,
 709                       bool guest_visible)
 710{
 711    bool ret = ppc_radix64_xlate_impl(cpu, eaddr, access_type, raddrp,
 712                                      psizep, protp, mmu_idx, guest_visible);
 713
 714    qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx
 715                  " mmu_idx %u (prot %c%c%c) -> 0x%"HWADDR_PRIx"\n",
 716                  __func__, access_str(access_type),
 717                  eaddr, mmu_idx,
 718                  *protp & PAGE_READ ? 'r' : '-',
 719                  *protp & PAGE_WRITE ? 'w' : '-',
 720                  *protp & PAGE_EXEC ? 'x' : '-',
 721                  *raddrp);
 722
 723    return ret;
 724}
 725