qemu/target/ppc/mmu-radix64.c
<<
>>
Prefs
   1/*
   2 *  PowerPC Radix MMU mulation helpers for QEMU.
   3 *
   4 *  Copyright (c) 2016 Suraj Jitindar Singh, IBM Corporation
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#include "qemu/osdep.h"
  21#include "cpu.h"
  22#include "exec/exec-all.h"
  23#include "exec/helper-proto.h"
  24#include "qemu/error-report.h"
  25#include "sysemu/kvm.h"
  26#include "kvm_ppc.h"
  27#include "exec/log.h"
  28#include "mmu-radix64.h"
  29#include "mmu-book3s-v3.h"
  30
  31static bool ppc_radix64_get_fully_qualified_addr(CPUPPCState *env, vaddr eaddr,
  32                                                 uint64_t *lpid, uint64_t *pid)
  33{
  34    if (msr_hv) { /* MSR[HV] -> Hypervisor/bare metal */
  35        switch (eaddr & R_EADDR_QUADRANT) {
  36        case R_EADDR_QUADRANT0:
  37            *lpid = 0;
  38            *pid = env->spr[SPR_BOOKS_PID];
  39            break;
  40        case R_EADDR_QUADRANT1:
  41            *lpid = env->spr[SPR_LPIDR];
  42            *pid = env->spr[SPR_BOOKS_PID];
  43            break;
  44        case R_EADDR_QUADRANT2:
  45            *lpid = env->spr[SPR_LPIDR];
  46            *pid = 0;
  47            break;
  48        case R_EADDR_QUADRANT3:
  49            *lpid = 0;
  50            *pid = 0;
  51            break;
  52        }
  53    } else {  /* !MSR[HV] -> Guest */
  54        switch (eaddr & R_EADDR_QUADRANT) {
  55        case R_EADDR_QUADRANT0: /* Guest application */
  56            *lpid = env->spr[SPR_LPIDR];
  57            *pid = env->spr[SPR_BOOKS_PID];
  58            break;
  59        case R_EADDR_QUADRANT1: /* Illegal */
  60        case R_EADDR_QUADRANT2:
  61            return false;
  62        case R_EADDR_QUADRANT3: /* Guest OS */
  63            *lpid = env->spr[SPR_LPIDR];
  64            *pid = 0; /* pid set to 0 -> addresses guest operating system */
  65            break;
  66        }
  67    }
  68
  69    return true;
  70}
  71
  72static void ppc_radix64_raise_segi(PowerPCCPU *cpu, int rwx, vaddr eaddr)
  73{
  74    CPUState *cs = CPU(cpu);
  75    CPUPPCState *env = &cpu->env;
  76
  77    if (rwx == 2) { /* Instruction Segment Interrupt */
  78        cs->exception_index = POWERPC_EXCP_ISEG;
  79    } else { /* Data Segment Interrupt */
  80        cs->exception_index = POWERPC_EXCP_DSEG;
  81        env->spr[SPR_DAR] = eaddr;
  82    }
  83    env->error_code = 0;
  84}
  85
  86static void ppc_radix64_raise_si(PowerPCCPU *cpu, int rwx, vaddr eaddr,
  87                                uint32_t cause)
  88{
  89    CPUState *cs = CPU(cpu);
  90    CPUPPCState *env = &cpu->env;
  91
  92    if (rwx == 2) { /* Instruction Storage Interrupt */
  93        cs->exception_index = POWERPC_EXCP_ISI;
  94        env->error_code = cause;
  95    } else { /* Data Storage Interrupt */
  96        cs->exception_index = POWERPC_EXCP_DSI;
  97        if (rwx == 1) { /* Write -> Store */
  98            cause |= DSISR_ISSTORE;
  99        }
 100        env->spr[SPR_DSISR] = cause;
 101        env->spr[SPR_DAR] = eaddr;
 102        env->error_code = 0;
 103    }
 104}
 105
 106
 107static bool ppc_radix64_check_prot(PowerPCCPU *cpu, int rwx, uint64_t pte,
 108                                   int *fault_cause, int *prot)
 109{
 110    CPUPPCState *env = &cpu->env;
 111    const int need_prot[] = { PAGE_READ, PAGE_WRITE, PAGE_EXEC };
 112
 113    /* Check Page Attributes (pte58:59) */
 114    if (((pte & R_PTE_ATT) == R_PTE_ATT_NI_IO) && (rwx == 2)) {
 115        /*
 116         * Radix PTE entries with the non-idempotent I/O attribute are treated
 117         * as guarded storage
 118         */
 119        *fault_cause |= SRR1_NOEXEC_GUARD;
 120        return true;
 121    }
 122
 123    /* Determine permissions allowed by Encoded Access Authority */
 124    if ((pte & R_PTE_EAA_PRIV) && msr_pr) { /* Insufficient Privilege */
 125        *prot = 0;
 126    } else if (msr_pr || (pte & R_PTE_EAA_PRIV)) {
 127        *prot = ppc_radix64_get_prot_eaa(pte);
 128    } else { /* !msr_pr && !(pte & R_PTE_EAA_PRIV) */
 129        *prot = ppc_radix64_get_prot_eaa(pte);
 130        *prot &= ppc_radix64_get_prot_amr(cpu); /* Least combined permissions */
 131    }
 132
 133    /* Check if requested access type is allowed */
 134    if (need_prot[rwx] & ~(*prot)) { /* Page Protected for that Access */
 135        *fault_cause |= DSISR_PROTFAULT;
 136        return true;
 137    }
 138
 139    return false;
 140}
 141
 142static void ppc_radix64_set_rc(PowerPCCPU *cpu, int rwx, uint64_t pte,
 143                               hwaddr pte_addr, int *prot)
 144{
 145    CPUState *cs = CPU(cpu);
 146    uint64_t npte;
 147
 148    npte = pte | R_PTE_R; /* Always set reference bit */
 149
 150    if (rwx == 1) { /* Store/Write */
 151        npte |= R_PTE_C; /* Set change bit */
 152    } else {
 153        /*
 154         * Treat the page as read-only for now, so that a later write
 155         * will pass through this function again to set the C bit.
 156         */
 157        *prot &= ~PAGE_WRITE;
 158    }
 159
 160    if (pte ^ npte) { /* If pte has changed then write it back */
 161        stq_phys(cs->as, pte_addr, npte);
 162    }
 163}
 164
 165static uint64_t ppc_radix64_walk_tree(PowerPCCPU *cpu, vaddr eaddr,
 166                                      uint64_t base_addr, uint64_t nls,
 167                                      hwaddr *raddr, int *psize,
 168                                      int *fault_cause, hwaddr *pte_addr)
 169{
 170    CPUState *cs = CPU(cpu);
 171    uint64_t index, pde;
 172
 173    if (nls < 5) { /* Directory maps less than 2**5 entries */
 174        *fault_cause |= DSISR_R_BADCONFIG;
 175        return 0;
 176    }
 177
 178    /* Read page <directory/table> entry from guest address space */
 179    index = eaddr >> (*psize - nls); /* Shift */
 180    index &= ((1UL << nls) - 1); /* Mask */
 181    pde = ldq_phys(cs->as, base_addr + (index * sizeof(pde)));
 182    if (!(pde & R_PTE_VALID)) { /* Invalid Entry */
 183        *fault_cause |= DSISR_NOPTE;
 184        return 0;
 185    }
 186
 187    *psize -= nls;
 188
 189    /* Check if Leaf Entry -> Page Table Entry -> Stop the Search */
 190    if (pde & R_PTE_LEAF) {
 191        uint64_t rpn = pde & R_PTE_RPN;
 192        uint64_t mask = (1UL << *psize) - 1;
 193
 194        /* Or high bits of rpn and low bits to ea to form whole real addr */
 195        *raddr = (rpn & ~mask) | (eaddr & mask);
 196        *pte_addr = base_addr + (index * sizeof(pde));
 197        return pde;
 198    }
 199
 200    /* Next Level of Radix Tree */
 201    return ppc_radix64_walk_tree(cpu, eaddr, pde & R_PDE_NLB, pde & R_PDE_NLS,
 202                                 raddr, psize, fault_cause, pte_addr);
 203}
 204
 205static bool validate_pate(PowerPCCPU *cpu, uint64_t lpid, ppc_v3_pate_t *pate)
 206{
 207    CPUPPCState *env = &cpu->env;
 208
 209    if (!(pate->dw0 & PATE0_HR)) {
 210        return false;
 211    }
 212    if (lpid == 0 && !msr_hv) {
 213        return false;
 214    }
 215    /* More checks ... */
 216    return true;
 217}
 218
 219int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
 220                                 int mmu_idx)
 221{
 222    CPUState *cs = CPU(cpu);
 223    CPUPPCState *env = &cpu->env;
 224    PPCVirtualHypervisorClass *vhc;
 225    hwaddr raddr, pte_addr;
 226    uint64_t lpid = 0, pid = 0, offset, size, prtbe0, pte;
 227    int page_size, prot, fault_cause = 0;
 228    ppc_v3_pate_t pate;
 229
 230    assert((rwx == 0) || (rwx == 1) || (rwx == 2));
 231
 232    /* HV or virtual hypervisor Real Mode Access */
 233    if ((msr_hv || cpu->vhyp) &&
 234        (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0)))) {
 235        /* In real mode top 4 effective addr bits (mostly) ignored */
 236        raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
 237
 238        /* In HV mode, add HRMOR if top EA bit is clear */
 239        if (msr_hv || !env->has_hv_mode) {
 240            if (!(eaddr >> 63)) {
 241                raddr |= env->spr[SPR_HRMOR];
 242           }
 243        }
 244        tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
 245                     PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx,
 246                     TARGET_PAGE_SIZE);
 247        return 0;
 248    }
 249
 250    /*
 251     * Check UPRT (we avoid the check in real mode to deal with
 252     * transitional states during kexec.
 253     */
 254    if (!ppc64_use_proc_tbl(cpu)) {
 255        qemu_log_mask(LOG_GUEST_ERROR,
 256                      "LPCR:UPRT not set in radix mode ! LPCR="
 257                      TARGET_FMT_lx "\n", env->spr[SPR_LPCR]);
 258    }
 259
 260    /* Virtual Mode Access - get the fully qualified address */
 261    if (!ppc_radix64_get_fully_qualified_addr(env, eaddr, &lpid, &pid)) {
 262        ppc_radix64_raise_segi(cpu, rwx, eaddr);
 263        return 1;
 264    }
 265
 266    /* Get Process Table */
 267    if (cpu->vhyp) {
 268        vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
 269        vhc->get_pate(cpu->vhyp, &pate);
 270    } else {
 271        if (!ppc64_v3_get_pate(cpu, lpid, &pate)) {
 272            ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE);
 273            return 1;
 274        }
 275        if (!validate_pate(cpu, lpid, &pate)) {
 276            ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_R_BADCONFIG);
 277        }
 278        /* We don't support guest mode yet */
 279        if (lpid != 0) {
 280            error_report("PowerNV guest support Unimplemented");
 281            exit(1);
 282       }
 283    }
 284
 285    /* Index Process Table by PID to Find Corresponding Process Table Entry */
 286    offset = pid * sizeof(struct prtb_entry);
 287    size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12);
 288    if (offset >= size) {
 289        /* offset exceeds size of the process table */
 290        ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE);
 291        return 1;
 292    }
 293    prtbe0 = ldq_phys(cs->as, (pate.dw1 & PATE1_R_PRTB) + offset);
 294
 295    /* Walk Radix Tree from Process Table Entry to Convert EA to RA */
 296    page_size = PRTBE_R_GET_RTS(prtbe0);
 297    pte = ppc_radix64_walk_tree(cpu, eaddr & R_EADDR_MASK,
 298                                prtbe0 & PRTBE_R_RPDB, prtbe0 & PRTBE_R_RPDS,
 299                                &raddr, &page_size, &fault_cause, &pte_addr);
 300    if (!pte || ppc_radix64_check_prot(cpu, rwx, pte, &fault_cause, &prot)) {
 301        /* Couldn't get pte or access denied due to protection */
 302        ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause);
 303        return 1;
 304    }
 305
 306    /* Update Reference and Change Bits */
 307    ppc_radix64_set_rc(cpu, rwx, pte, pte_addr, &prot);
 308
 309    tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
 310                 prot, mmu_idx, 1UL << page_size);
 311    return 0;
 312}
 313
 314hwaddr ppc_radix64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong eaddr)
 315{
 316    CPUState *cs = CPU(cpu);
 317    CPUPPCState *env = &cpu->env;
 318    PPCVirtualHypervisorClass *vhc;
 319    hwaddr raddr, pte_addr;
 320    uint64_t lpid = 0, pid = 0, offset, size, prtbe0, pte;
 321    int page_size, fault_cause = 0;
 322    ppc_v3_pate_t pate;
 323
 324    /* Handle Real Mode */
 325    if (msr_dr == 0) {
 326        /* In real mode top 4 effective addr bits (mostly) ignored */
 327        return eaddr & 0x0FFFFFFFFFFFFFFFULL;
 328    }
 329
 330    /* Virtual Mode Access - get the fully qualified address */
 331    if (!ppc_radix64_get_fully_qualified_addr(env, eaddr, &lpid, &pid)) {
 332        return -1;
 333    }
 334
 335    /* Get Process Table */
 336    if (cpu->vhyp) {
 337        vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
 338        vhc->get_pate(cpu->vhyp, &pate);
 339    } else {
 340        if (!ppc64_v3_get_pate(cpu, lpid, &pate)) {
 341            return -1;
 342        }
 343        if (!validate_pate(cpu, lpid, &pate)) {
 344            return -1;
 345        }
 346        /* We don't support guest mode yet */
 347        if (lpid != 0) {
 348            error_report("PowerNV guest support Unimplemented");
 349            exit(1);
 350       }
 351    }
 352
 353    /* Index Process Table by PID to Find Corresponding Process Table Entry */
 354    offset = pid * sizeof(struct prtb_entry);
 355    size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12);
 356    if (offset >= size) {
 357        /* offset exceeds size of the process table */
 358        return -1;
 359    }
 360    prtbe0 = ldq_phys(cs->as, (pate.dw1 & PATE1_R_PRTB) + offset);
 361
 362    /* Walk Radix Tree from Process Table Entry to Convert EA to RA */
 363    page_size = PRTBE_R_GET_RTS(prtbe0);
 364    pte = ppc_radix64_walk_tree(cpu, eaddr & R_EADDR_MASK,
 365                                prtbe0 & PRTBE_R_RPDB, prtbe0 & PRTBE_R_RPDS,
 366                                &raddr, &page_size, &fault_cause, &pte_addr);
 367    if (!pte) {
 368        return -1;
 369    }
 370
 371    return raddr & TARGET_PAGE_MASK;
 372}
 373