qemu/target-sparc/mmu_helper.c
<<
>>
Prefs
   1/*
   2 *  Sparc MMU helpers
   3 *
   4 *  Copyright (c) 2003-2005 Fabrice Bellard
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#include "cpu.h"
  21#include "trace.h"
  22#include "exec/address-spaces.h"
  23
  24/* Sparc MMU emulation */
  25
  26#if defined(CONFIG_USER_ONLY)
  27
  28int cpu_sparc_handle_mmu_fault(CPUSPARCState *env1, target_ulong address, int rw,
  29                               int mmu_idx)
  30{
  31    if (rw & 2) {
  32        env1->exception_index = TT_TFAULT;
  33    } else {
  34        env1->exception_index = TT_DFAULT;
  35    }
  36    return 1;
  37}
  38
  39#else
  40
  41#ifndef TARGET_SPARC64
  42/*
  43 * Sparc V8 Reference MMU (SRMMU)
  44 */
  45static const int access_table[8][8] = {
  46    { 0, 0, 0, 0, 8, 0, 12, 12 },
  47    { 0, 0, 0, 0, 8, 0, 0, 0 },
  48    { 8, 8, 0, 0, 0, 8, 12, 12 },
  49    { 8, 8, 0, 0, 0, 8, 0, 0 },
  50    { 8, 0, 8, 0, 8, 8, 12, 12 },
  51    { 8, 0, 8, 0, 8, 0, 8, 0 },
  52    { 8, 8, 8, 0, 8, 8, 12, 12 },
  53    { 8, 8, 8, 0, 8, 8, 8, 0 }
  54};
  55
  56static const int perm_table[2][8] = {
  57    {
  58        PAGE_READ,
  59        PAGE_READ | PAGE_WRITE,
  60        PAGE_READ | PAGE_EXEC,
  61        PAGE_READ | PAGE_WRITE | PAGE_EXEC,
  62        PAGE_EXEC,
  63        PAGE_READ | PAGE_WRITE,
  64        PAGE_READ | PAGE_EXEC,
  65        PAGE_READ | PAGE_WRITE | PAGE_EXEC
  66    },
  67    {
  68        PAGE_READ,
  69        PAGE_READ | PAGE_WRITE,
  70        PAGE_READ | PAGE_EXEC,
  71        PAGE_READ | PAGE_WRITE | PAGE_EXEC,
  72        PAGE_EXEC,
  73        PAGE_READ,
  74        0,
  75        0,
  76    }
  77};
  78
  79static int get_physical_address(CPUSPARCState *env, hwaddr *physical,
  80                                int *prot, int *access_index,
  81                                target_ulong address, int rw, int mmu_idx,
  82                                target_ulong *page_size)
  83{
  84    int access_perms = 0;
  85    hwaddr pde_ptr;
  86    uint32_t pde;
  87    int error_code = 0, is_dirty, is_user;
  88    unsigned long page_offset;
  89
  90    is_user = mmu_idx == MMU_USER_IDX;
  91
  92    if ((env->mmuregs[0] & MMU_E) == 0) { /* MMU disabled */
  93        *page_size = TARGET_PAGE_SIZE;
  94        /* Boot mode: instruction fetches are taken from PROM */
  95        if (rw == 2 && (env->mmuregs[0] & env->def->mmu_bm)) {
  96            *physical = env->prom_addr | (address & 0x7ffffULL);
  97            *prot = PAGE_READ | PAGE_EXEC;
  98            return 0;
  99        }
 100        *physical = address;
 101        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
 102        return 0;
 103    }
 104
 105    *access_index = ((rw & 1) << 2) | (rw & 2) | (is_user ? 0 : 1);
 106    *physical = 0xffffffffffff0000ULL;
 107
 108    /* SPARC reference MMU table walk: Context table->L1->L2->PTE */
 109    /* Context base + context number */
 110    pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2);
 111    pde = ldl_phys(pde_ptr);
 112
 113    /* Ctx pde */
 114    switch (pde & PTE_ENTRYTYPE_MASK) {
 115    default:
 116    case 0: /* Invalid */
 117        return 1 << 2;
 118    case 2: /* L0 PTE, maybe should not happen? */
 119    case 3: /* Reserved */
 120        return 4 << 2;
 121    case 1: /* L0 PDE */
 122        pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4);
 123        pde = ldl_phys(pde_ptr);
 124
 125        switch (pde & PTE_ENTRYTYPE_MASK) {
 126        default:
 127        case 0: /* Invalid */
 128            return (1 << 8) | (1 << 2);
 129        case 3: /* Reserved */
 130            return (1 << 8) | (4 << 2);
 131        case 1: /* L1 PDE */
 132            pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4);
 133            pde = ldl_phys(pde_ptr);
 134
 135            switch (pde & PTE_ENTRYTYPE_MASK) {
 136            default:
 137            case 0: /* Invalid */
 138                return (2 << 8) | (1 << 2);
 139            case 3: /* Reserved */
 140                return (2 << 8) | (4 << 2);
 141            case 1: /* L2 PDE */
 142                pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4);
 143                pde = ldl_phys(pde_ptr);
 144
 145                switch (pde & PTE_ENTRYTYPE_MASK) {
 146                default:
 147                case 0: /* Invalid */
 148                    return (3 << 8) | (1 << 2);
 149                case 1: /* PDE, should not happen */
 150                case 3: /* Reserved */
 151                    return (3 << 8) | (4 << 2);
 152                case 2: /* L3 PTE */
 153                    page_offset = 0;
 154                }
 155                *page_size = TARGET_PAGE_SIZE;
 156                break;
 157            case 2: /* L2 PTE */
 158                page_offset = address & 0x3f000;
 159                *page_size = 0x40000;
 160            }
 161            break;
 162        case 2: /* L1 PTE */
 163            page_offset = address & 0xfff000;
 164            *page_size = 0x1000000;
 165        }
 166    }
 167
 168    /* check access */
 169    access_perms = (pde & PTE_ACCESS_MASK) >> PTE_ACCESS_SHIFT;
 170    error_code = access_table[*access_index][access_perms];
 171    if (error_code && !((env->mmuregs[0] & MMU_NF) && is_user)) {
 172        return error_code;
 173    }
 174
 175    /* update page modified and dirty bits */
 176    is_dirty = (rw & 1) && !(pde & PG_MODIFIED_MASK);
 177    if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
 178        pde |= PG_ACCESSED_MASK;
 179        if (is_dirty) {
 180            pde |= PG_MODIFIED_MASK;
 181        }
 182        stl_phys_notdirty(pde_ptr, pde);
 183    }
 184
 185    /* the page can be put in the TLB */
 186    *prot = perm_table[is_user][access_perms];
 187    if (!(pde & PG_MODIFIED_MASK)) {
 188        /* only set write access if already dirty... otherwise wait
 189           for dirty access */
 190        *prot &= ~PAGE_WRITE;
 191    }
 192
 193    /* Even if large ptes, we map only one 4KB page in the cache to
 194       avoid filling it too fast */
 195    *physical = ((hwaddr)(pde & PTE_ADDR_MASK) << 4) + page_offset;
 196    return error_code;
 197}
 198
 199/* Perform address translation */
 200int cpu_sparc_handle_mmu_fault(CPUSPARCState *env, target_ulong address, int rw,
 201                               int mmu_idx)
 202{
 203    hwaddr paddr;
 204    target_ulong vaddr;
 205    target_ulong page_size;
 206    int error_code = 0, prot, access_index;
 207
 208    address &= TARGET_PAGE_MASK;
 209    error_code = get_physical_address(env, &paddr, &prot, &access_index,
 210                                      address, rw, mmu_idx, &page_size);
 211    vaddr = address;
 212    if (error_code == 0) {
 213#ifdef DEBUG_MMU
 214        printf("Translate at " TARGET_FMT_lx " -> " TARGET_FMT_plx ", vaddr "
 215               TARGET_FMT_lx "\n", address, paddr, vaddr);
 216#endif
 217        tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
 218        return 0;
 219    }
 220
 221    if (env->mmuregs[3]) { /* Fault status register */
 222        env->mmuregs[3] = 1; /* overflow (not read before another fault) */
 223    }
 224    env->mmuregs[3] |= (access_index << 5) | error_code | 2;
 225    env->mmuregs[4] = address; /* Fault address register */
 226
 227    if ((env->mmuregs[0] & MMU_NF) || env->psret == 0)  {
 228        /* No fault mode: if a mapping is available, just override
 229           permissions. If no mapping is available, redirect accesses to
 230           neverland. Fake/overridden mappings will be flushed when
 231           switching to normal mode. */
 232        prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
 233        tlb_set_page(env, vaddr, paddr, prot, mmu_idx, TARGET_PAGE_SIZE);
 234        return 0;
 235    } else {
 236        if (rw & 2) {
 237            env->exception_index = TT_TFAULT;
 238        } else {
 239            env->exception_index = TT_DFAULT;
 240        }
 241        return 1;
 242    }
 243}
 244
 245target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev)
 246{
 247    hwaddr pde_ptr;
 248    uint32_t pde;
 249
 250    /* Context base + context number */
 251    pde_ptr = (hwaddr)(env->mmuregs[1] << 4) +
 252        (env->mmuregs[2] << 2);
 253    pde = ldl_phys(pde_ptr);
 254
 255    switch (pde & PTE_ENTRYTYPE_MASK) {
 256    default:
 257    case 0: /* Invalid */
 258    case 2: /* PTE, maybe should not happen? */
 259    case 3: /* Reserved */
 260        return 0;
 261    case 1: /* L1 PDE */
 262        if (mmulev == 3) {
 263            return pde;
 264        }
 265        pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4);
 266        pde = ldl_phys(pde_ptr);
 267
 268        switch (pde & PTE_ENTRYTYPE_MASK) {
 269        default:
 270        case 0: /* Invalid */
 271        case 3: /* Reserved */
 272            return 0;
 273        case 2: /* L1 PTE */
 274            return pde;
 275        case 1: /* L2 PDE */
 276            if (mmulev == 2) {
 277                return pde;
 278            }
 279            pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4);
 280            pde = ldl_phys(pde_ptr);
 281
 282            switch (pde & PTE_ENTRYTYPE_MASK) {
 283            default:
 284            case 0: /* Invalid */
 285            case 3: /* Reserved */
 286                return 0;
 287            case 2: /* L2 PTE */
 288                return pde;
 289            case 1: /* L3 PDE */
 290                if (mmulev == 1) {
 291                    return pde;
 292                }
 293                pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4);
 294                pde = ldl_phys(pde_ptr);
 295
 296                switch (pde & PTE_ENTRYTYPE_MASK) {
 297                default:
 298                case 0: /* Invalid */
 299                case 1: /* PDE, should not happen */
 300                case 3: /* Reserved */
 301                    return 0;
 302                case 2: /* L3 PTE */
 303                    return pde;
 304                }
 305            }
 306        }
 307    }
 308    return 0;
 309}
 310
 311void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUSPARCState *env)
 312{
 313    CPUState *cs = CPU(sparc_env_get_cpu(env));
 314    target_ulong va, va1, va2;
 315    unsigned int n, m, o;
 316    hwaddr pde_ptr, pa;
 317    uint32_t pde;
 318
 319    pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2);
 320    pde = ldl_phys(pde_ptr);
 321    (*cpu_fprintf)(f, "Root ptr: " TARGET_FMT_plx ", ctx: %d\n",
 322                   (hwaddr)env->mmuregs[1] << 4, env->mmuregs[2]);
 323    for (n = 0, va = 0; n < 256; n++, va += 16 * 1024 * 1024) {
 324        pde = mmu_probe(env, va, 2);
 325        if (pde) {
 326            pa = cpu_get_phys_page_debug(cs, va);
 327            (*cpu_fprintf)(f, "VA: " TARGET_FMT_lx ", PA: " TARGET_FMT_plx
 328                           " PDE: " TARGET_FMT_lx "\n", va, pa, pde);
 329            for (m = 0, va1 = va; m < 64; m++, va1 += 256 * 1024) {
 330                pde = mmu_probe(env, va1, 1);
 331                if (pde) {
 332                    pa = cpu_get_phys_page_debug(cs, va1);
 333                    (*cpu_fprintf)(f, " VA: " TARGET_FMT_lx ", PA: "
 334                                   TARGET_FMT_plx " PDE: " TARGET_FMT_lx "\n",
 335                                   va1, pa, pde);
 336                    for (o = 0, va2 = va1; o < 64; o++, va2 += 4 * 1024) {
 337                        pde = mmu_probe(env, va2, 0);
 338                        if (pde) {
 339                            pa = cpu_get_phys_page_debug(cs, va2);
 340                            (*cpu_fprintf)(f, "  VA: " TARGET_FMT_lx ", PA: "
 341                                           TARGET_FMT_plx " PTE: "
 342                                           TARGET_FMT_lx "\n",
 343                                           va2, pa, pde);
 344                        }
 345                    }
 346                }
 347            }
 348        }
 349    }
 350}
 351
 352/* Gdb expects all registers windows to be flushed in ram. This function handles
 353 * reads (and only reads) in stack frames as if windows were flushed. We assume
 354 * that the sparc ABI is followed.
 355 */
 356int sparc_cpu_memory_rw_debug(CPUState *cs, vaddr address,
 357                              uint8_t *buf, int len, bool is_write)
 358{
 359    SPARCCPU *cpu = SPARC_CPU(cs);
 360    CPUSPARCState *env = &cpu->env;
 361    target_ulong addr = address;
 362    int i;
 363    int len1;
 364    int cwp = env->cwp;
 365
 366    if (!is_write) {
 367        for (i = 0; i < env->nwindows; i++) {
 368            int off;
 369            target_ulong fp = env->regbase[cwp * 16 + 22];
 370
 371            /* Assume fp == 0 means end of frame.  */
 372            if (fp == 0) {
 373                break;
 374            }
 375
 376            cwp = cpu_cwp_inc(env, cwp + 1);
 377
 378            /* Invalid window ? */
 379            if (env->wim & (1 << cwp)) {
 380                break;
 381            }
 382
 383            /* According to the ABI, the stack is growing downward.  */
 384            if (addr + len < fp) {
 385                break;
 386            }
 387
 388            /* Not in this frame.  */
 389            if (addr > fp + 64) {
 390                continue;
 391            }
 392
 393            /* Handle access before this window.  */
 394            if (addr < fp) {
 395                len1 = fp - addr;
 396                if (cpu_memory_rw_debug(cs, addr, buf, len1, is_write) != 0) {
 397                    return -1;
 398                }
 399                addr += len1;
 400                len -= len1;
 401                buf += len1;
 402            }
 403
 404            /* Access byte per byte to registers. Not very efficient but speed
 405             * is not critical.
 406             */
 407            off = addr - fp;
 408            len1 = 64 - off;
 409
 410            if (len1 > len) {
 411                len1 = len;
 412            }
 413
 414            for (; len1; len1--) {
 415                int reg = cwp * 16 + 8 + (off >> 2);
 416                union {
 417                    uint32_t v;
 418                    uint8_t c[4];
 419                } u;
 420                u.v = cpu_to_be32(env->regbase[reg]);
 421                *buf++ = u.c[off & 3];
 422                addr++;
 423                len--;
 424                off++;
 425            }
 426
 427            if (len == 0) {
 428                return 0;
 429            }
 430        }
 431    }
 432    return cpu_memory_rw_debug(cs, addr, buf, len, is_write);
 433}
 434
 435#else /* !TARGET_SPARC64 */
 436
 437/* 41 bit physical address space */
 438static inline hwaddr ultrasparc_truncate_physical(uint64_t x)
 439{
 440    return x & 0x1ffffffffffULL;
 441}
 442
 443/*
 444 * UltraSparc IIi I/DMMUs
 445 */
 446
 447/* Returns true if TTE tag is valid and matches virtual address value
 448   in context requires virtual address mask value calculated from TTE
 449   entry size */
 450static inline int ultrasparc_tag_match(SparcTLBEntry *tlb,
 451                                       uint64_t address, uint64_t context,
 452                                       hwaddr *physical)
 453{
 454    uint64_t mask;
 455
 456    switch (TTE_PGSIZE(tlb->tte)) {
 457    default:
 458    case 0x0: /* 8k */
 459        mask = 0xffffffffffffe000ULL;
 460        break;
 461    case 0x1: /* 64k */
 462        mask = 0xffffffffffff0000ULL;
 463        break;
 464    case 0x2: /* 512k */
 465        mask = 0xfffffffffff80000ULL;
 466        break;
 467    case 0x3: /* 4M */
 468        mask = 0xffffffffffc00000ULL;
 469        break;
 470    }
 471
 472    /* valid, context match, virtual address match? */
 473    if (TTE_IS_VALID(tlb->tte) &&
 474        (TTE_IS_GLOBAL(tlb->tte) || tlb_compare_context(tlb, context))
 475        && compare_masked(address, tlb->tag, mask)) {
 476        /* decode physical address */
 477        *physical = ((tlb->tte & mask) | (address & ~mask)) & 0x1ffffffe000ULL;
 478        return 1;
 479    }
 480
 481    return 0;
 482}
 483
 484static int get_physical_address_data(CPUSPARCState *env,
 485                                     hwaddr *physical, int *prot,
 486                                     target_ulong address, int rw, int mmu_idx)
 487{
 488    unsigned int i;
 489    uint64_t context;
 490    uint64_t sfsr = 0;
 491
 492    int is_user = (mmu_idx == MMU_USER_IDX ||
 493                   mmu_idx == MMU_USER_SECONDARY_IDX);
 494
 495    if ((env->lsu & DMMU_E) == 0) { /* DMMU disabled */
 496        *physical = ultrasparc_truncate_physical(address);
 497        *prot = PAGE_READ | PAGE_WRITE;
 498        return 0;
 499    }
 500
 501    switch (mmu_idx) {
 502    case MMU_USER_IDX:
 503    case MMU_KERNEL_IDX:
 504        context = env->dmmu.mmu_primary_context & 0x1fff;
 505        sfsr |= SFSR_CT_PRIMARY;
 506        break;
 507    case MMU_USER_SECONDARY_IDX:
 508    case MMU_KERNEL_SECONDARY_IDX:
 509        context = env->dmmu.mmu_secondary_context & 0x1fff;
 510        sfsr |= SFSR_CT_SECONDARY;
 511        break;
 512    case MMU_NUCLEUS_IDX:
 513        sfsr |= SFSR_CT_NUCLEUS;
 514        /* FALLTHRU */
 515    default:
 516        context = 0;
 517        break;
 518    }
 519
 520    if (rw == 1) {
 521        sfsr |= SFSR_WRITE_BIT;
 522    } else if (rw == 4) {
 523        sfsr |= SFSR_NF_BIT;
 524    }
 525
 526    for (i = 0; i < 64; i++) {
 527        /* ctx match, vaddr match, valid? */
 528        if (ultrasparc_tag_match(&env->dtlb[i], address, context, physical)) {
 529            int do_fault = 0;
 530
 531            /* access ok? */
 532            /* multiple bits in SFSR.FT may be set on TT_DFAULT */
 533            if (TTE_IS_PRIV(env->dtlb[i].tte) && is_user) {
 534                do_fault = 1;
 535                sfsr |= SFSR_FT_PRIV_BIT; /* privilege violation */
 536                trace_mmu_helper_dfault(address, context, mmu_idx, env->tl);
 537            }
 538            if (rw == 4) {
 539                if (TTE_IS_SIDEEFFECT(env->dtlb[i].tte)) {
 540                    do_fault = 1;
 541                    sfsr |= SFSR_FT_NF_E_BIT;
 542                }
 543            } else {
 544                if (TTE_IS_NFO(env->dtlb[i].tte)) {
 545                    do_fault = 1;
 546                    sfsr |= SFSR_FT_NFO_BIT;
 547                }
 548            }
 549
 550            if (do_fault) {
 551                /* faults above are reported with TT_DFAULT. */
 552                env->exception_index = TT_DFAULT;
 553            } else if (!TTE_IS_W_OK(env->dtlb[i].tte) && (rw == 1)) {
 554                do_fault = 1;
 555                env->exception_index = TT_DPROT;
 556
 557                trace_mmu_helper_dprot(address, context, mmu_idx, env->tl);
 558            }
 559
 560            if (!do_fault) {
 561                *prot = PAGE_READ;
 562                if (TTE_IS_W_OK(env->dtlb[i].tte)) {
 563                    *prot |= PAGE_WRITE;
 564                }
 565
 566                TTE_SET_USED(env->dtlb[i].tte);
 567
 568                return 0;
 569            }
 570
 571            if (env->dmmu.sfsr & SFSR_VALID_BIT) { /* Fault status register */
 572                sfsr |= SFSR_OW_BIT; /* overflow (not read before
 573                                        another fault) */
 574            }
 575
 576            if (env->pstate & PS_PRIV) {
 577                sfsr |= SFSR_PR_BIT;
 578            }
 579
 580            /* FIXME: ASI field in SFSR must be set */
 581            env->dmmu.sfsr = sfsr | SFSR_VALID_BIT;
 582
 583            env->dmmu.sfar = address; /* Fault address register */
 584
 585            env->dmmu.tag_access = (address & ~0x1fffULL) | context;
 586
 587            return 1;
 588        }
 589    }
 590
 591    trace_mmu_helper_dmiss(address, context);
 592
 593    /*
 594     * On MMU misses:
 595     * - UltraSPARC IIi: SFSR and SFAR unmodified
 596     * - JPS1: SFAR updated and some fields of SFSR updated
 597     */
 598    env->dmmu.tag_access = (address & ~0x1fffULL) | context;
 599    env->exception_index = TT_DMISS;
 600    return 1;
 601}
 602
 603static int get_physical_address_code(CPUSPARCState *env,
 604                                     hwaddr *physical, int *prot,
 605                                     target_ulong address, int mmu_idx)
 606{
 607    unsigned int i;
 608    uint64_t context;
 609
 610    int is_user = (mmu_idx == MMU_USER_IDX ||
 611                   mmu_idx == MMU_USER_SECONDARY_IDX);
 612
 613    if ((env->lsu & IMMU_E) == 0 || (env->pstate & PS_RED) != 0) {
 614        /* IMMU disabled */
 615        *physical = ultrasparc_truncate_physical(address);
 616        *prot = PAGE_EXEC;
 617        return 0;
 618    }
 619
 620    if (env->tl == 0) {
 621        /* PRIMARY context */
 622        context = env->dmmu.mmu_primary_context & 0x1fff;
 623    } else {
 624        /* NUCLEUS context */
 625        context = 0;
 626    }
 627
 628    for (i = 0; i < 64; i++) {
 629        /* ctx match, vaddr match, valid? */
 630        if (ultrasparc_tag_match(&env->itlb[i],
 631                                 address, context, physical)) {
 632            /* access ok? */
 633            if (TTE_IS_PRIV(env->itlb[i].tte) && is_user) {
 634                /* Fault status register */
 635                if (env->immu.sfsr & SFSR_VALID_BIT) {
 636                    env->immu.sfsr = SFSR_OW_BIT; /* overflow (not read before
 637                                                     another fault) */
 638                } else {
 639                    env->immu.sfsr = 0;
 640                }
 641                if (env->pstate & PS_PRIV) {
 642                    env->immu.sfsr |= SFSR_PR_BIT;
 643                }
 644                if (env->tl > 0) {
 645                    env->immu.sfsr |= SFSR_CT_NUCLEUS;
 646                }
 647
 648                /* FIXME: ASI field in SFSR must be set */
 649                env->immu.sfsr |= SFSR_FT_PRIV_BIT | SFSR_VALID_BIT;
 650                env->exception_index = TT_TFAULT;
 651
 652                env->immu.tag_access = (address & ~0x1fffULL) | context;
 653
 654                trace_mmu_helper_tfault(address, context);
 655
 656                return 1;
 657            }
 658            *prot = PAGE_EXEC;
 659            TTE_SET_USED(env->itlb[i].tte);
 660            return 0;
 661        }
 662    }
 663
 664    trace_mmu_helper_tmiss(address, context);
 665
 666    /* Context is stored in DMMU (dmmuregs[1]) also for IMMU */
 667    env->immu.tag_access = (address & ~0x1fffULL) | context;
 668    env->exception_index = TT_TMISS;
 669    return 1;
 670}
 671
 672static int get_physical_address(CPUSPARCState *env, hwaddr *physical,
 673                                int *prot, int *access_index,
 674                                target_ulong address, int rw, int mmu_idx,
 675                                target_ulong *page_size)
 676{
 677    /* ??? We treat everything as a small page, then explicitly flush
 678       everything when an entry is evicted.  */
 679    *page_size = TARGET_PAGE_SIZE;
 680
 681    /* safety net to catch wrong softmmu index use from dynamic code */
 682    if (env->tl > 0 && mmu_idx != MMU_NUCLEUS_IDX) {
 683        if (rw == 2) {
 684            trace_mmu_helper_get_phys_addr_code(env->tl, mmu_idx,
 685                                                env->dmmu.mmu_primary_context,
 686                                                env->dmmu.mmu_secondary_context,
 687                                                address);
 688        } else {
 689            trace_mmu_helper_get_phys_addr_data(env->tl, mmu_idx,
 690                                                env->dmmu.mmu_primary_context,
 691                                                env->dmmu.mmu_secondary_context,
 692                                                address);
 693        }
 694    }
 695
 696    if (rw == 2) {
 697        return get_physical_address_code(env, physical, prot, address,
 698                                         mmu_idx);
 699    } else {
 700        return get_physical_address_data(env, physical, prot, address, rw,
 701                                         mmu_idx);
 702    }
 703}
 704
 705/* Perform address translation */
 706int cpu_sparc_handle_mmu_fault(CPUSPARCState *env, target_ulong address, int rw,
 707                               int mmu_idx)
 708{
 709    target_ulong vaddr;
 710    hwaddr paddr;
 711    target_ulong page_size;
 712    int error_code = 0, prot, access_index;
 713
 714    address &= TARGET_PAGE_MASK;
 715    error_code = get_physical_address(env, &paddr, &prot, &access_index,
 716                                      address, rw, mmu_idx, &page_size);
 717    if (error_code == 0) {
 718        vaddr = address;
 719
 720        trace_mmu_helper_mmu_fault(address, paddr, mmu_idx, env->tl,
 721                                   env->dmmu.mmu_primary_context,
 722                                   env->dmmu.mmu_secondary_context);
 723
 724        tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
 725        return 0;
 726    }
 727    /* XXX */
 728    return 1;
 729}
 730
 731void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUSPARCState *env)
 732{
 733    unsigned int i;
 734    const char *mask;
 735
 736    (*cpu_fprintf)(f, "MMU contexts: Primary: %" PRId64 ", Secondary: %"
 737                   PRId64 "\n",
 738                   env->dmmu.mmu_primary_context,
 739                   env->dmmu.mmu_secondary_context);
 740    if ((env->lsu & DMMU_E) == 0) {
 741        (*cpu_fprintf)(f, "DMMU disabled\n");
 742    } else {
 743        (*cpu_fprintf)(f, "DMMU dump\n");
 744        for (i = 0; i < 64; i++) {
 745            switch (TTE_PGSIZE(env->dtlb[i].tte)) {
 746            default:
 747            case 0x0:
 748                mask = "  8k";
 749                break;
 750            case 0x1:
 751                mask = " 64k";
 752                break;
 753            case 0x2:
 754                mask = "512k";
 755                break;
 756            case 0x3:
 757                mask = "  4M";
 758                break;
 759            }
 760            if (TTE_IS_VALID(env->dtlb[i].tte)) {
 761                (*cpu_fprintf)(f, "[%02u] VA: %" PRIx64 ", PA: %llx"
 762                               ", %s, %s, %s, %s, ctx %" PRId64 " %s\n",
 763                               i,
 764                               env->dtlb[i].tag & (uint64_t)~0x1fffULL,
 765                               TTE_PA(env->dtlb[i].tte),
 766                               mask,
 767                               TTE_IS_PRIV(env->dtlb[i].tte) ? "priv" : "user",
 768                               TTE_IS_W_OK(env->dtlb[i].tte) ? "RW" : "RO",
 769                               TTE_IS_LOCKED(env->dtlb[i].tte) ?
 770                               "locked" : "unlocked",
 771                               env->dtlb[i].tag & (uint64_t)0x1fffULL,
 772                               TTE_IS_GLOBAL(env->dtlb[i].tte) ?
 773                               "global" : "local");
 774            }
 775        }
 776    }
 777    if ((env->lsu & IMMU_E) == 0) {
 778        (*cpu_fprintf)(f, "IMMU disabled\n");
 779    } else {
 780        (*cpu_fprintf)(f, "IMMU dump\n");
 781        for (i = 0; i < 64; i++) {
 782            switch (TTE_PGSIZE(env->itlb[i].tte)) {
 783            default:
 784            case 0x0:
 785                mask = "  8k";
 786                break;
 787            case 0x1:
 788                mask = " 64k";
 789                break;
 790            case 0x2:
 791                mask = "512k";
 792                break;
 793            case 0x3:
 794                mask = "  4M";
 795                break;
 796            }
 797            if (TTE_IS_VALID(env->itlb[i].tte)) {
 798                (*cpu_fprintf)(f, "[%02u] VA: %" PRIx64 ", PA: %llx"
 799                               ", %s, %s, %s, ctx %" PRId64 " %s\n",
 800                               i,
 801                               env->itlb[i].tag & (uint64_t)~0x1fffULL,
 802                               TTE_PA(env->itlb[i].tte),
 803                               mask,
 804                               TTE_IS_PRIV(env->itlb[i].tte) ? "priv" : "user",
 805                               TTE_IS_LOCKED(env->itlb[i].tte) ?
 806                               "locked" : "unlocked",
 807                               env->itlb[i].tag & (uint64_t)0x1fffULL,
 808                               TTE_IS_GLOBAL(env->itlb[i].tte) ?
 809                               "global" : "local");
 810            }
 811        }
 812    }
 813}
 814
 815#endif /* TARGET_SPARC64 */
 816
 817static int cpu_sparc_get_phys_page(CPUSPARCState *env, hwaddr *phys,
 818                                   target_ulong addr, int rw, int mmu_idx)
 819{
 820    target_ulong page_size;
 821    int prot, access_index;
 822
 823    return get_physical_address(env, phys, &prot, &access_index, addr, rw,
 824                                mmu_idx, &page_size);
 825}
 826
 827#if defined(TARGET_SPARC64)
 828hwaddr cpu_get_phys_page_nofault(CPUSPARCState *env, target_ulong addr,
 829                                           int mmu_idx)
 830{
 831    hwaddr phys_addr;
 832
 833    if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 4, mmu_idx) != 0) {
 834        return -1;
 835    }
 836    return phys_addr;
 837}
 838#endif
 839
 840hwaddr sparc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
 841{
 842    SPARCCPU *cpu = SPARC_CPU(cs);
 843    CPUSPARCState *env = &cpu->env;
 844    hwaddr phys_addr;
 845    int mmu_idx = cpu_mmu_index(env);
 846    MemoryRegionSection section;
 847
 848    if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 2, mmu_idx) != 0) {
 849        if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 0, mmu_idx) != 0) {
 850            return -1;
 851        }
 852    }
 853    section = memory_region_find(get_system_memory(), phys_addr, 1);
 854    memory_region_unref(section.mr);
 855    if (!int128_nz(section.size)) {
 856        return -1;
 857    }
 858    return phys_addr;
 859}
 860#endif
 861