qemu/target/microblaze/mmu.c
<<
>>
Prefs
   1/*
   2 *  Microblaze MMU emulation for qemu.
   3 *
   4 *  Copyright (c) 2009 Edgar E. Iglesias
   5 *  Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
   6 *
   7 * This library is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU Lesser General Public
   9 * License as published by the Free Software Foundation; either
  10 * version 2.1 of the License, or (at your option) any later version.
  11 *
  12 * This library is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * Lesser General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU Lesser General Public
  18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21#include "qemu/osdep.h"
  22#include "cpu.h"
  23#include "exec/exec-all.h"
  24
  25static unsigned int tlb_decode_size(unsigned int f)
  26{
  27    static const unsigned int sizes[] = {
  28        1 * 1024, 4 * 1024, 16 * 1024, 64 * 1024, 256 * 1024,
  29        1 * 1024 * 1024, 4 * 1024 * 1024, 16 * 1024 * 1024
  30    };
  31    assert(f < ARRAY_SIZE(sizes));
  32    return sizes[f];
  33}
  34
  35static void mmu_flush_idx(CPUMBState *env, unsigned int idx)
  36{
  37    CPUState *cs = env_cpu(env);
  38    MicroBlazeMMU *mmu = &env->mmu;
  39    unsigned int tlb_size;
  40    uint32_t tlb_tag, end, t;
  41
  42    t = mmu->rams[RAM_TAG][idx];
  43    if (!(t & TLB_VALID))
  44        return;
  45
  46    tlb_tag = t & TLB_EPN_MASK;
  47    tlb_size = tlb_decode_size((t & TLB_PAGESZ_MASK) >> 7);
  48    end = tlb_tag + tlb_size;
  49
  50    while (tlb_tag < end) {
  51        tlb_flush_page(cs, tlb_tag);
  52        tlb_tag += TARGET_PAGE_SIZE;
  53    }
  54}
  55
  56static void mmu_change_pid(CPUMBState *env, unsigned int newpid) 
  57{
  58    MicroBlazeMMU *mmu = &env->mmu;
  59    unsigned int i;
  60    uint32_t t;
  61
  62    if (newpid & ~0xff)
  63        qemu_log_mask(LOG_GUEST_ERROR, "Illegal rpid=%x\n", newpid);
  64
  65    for (i = 0; i < ARRAY_SIZE(mmu->rams[RAM_TAG]); i++) {
  66        /* Lookup and decode.  */
  67        t = mmu->rams[RAM_TAG][i];
  68        if (t & TLB_VALID) {
  69            if (mmu->tids[i] && ((mmu->regs[MMU_R_PID] & 0xff) == mmu->tids[i]))
  70                mmu_flush_idx(env, i);
  71        }
  72    }
  73}
  74
  75/* rw - 0 = read, 1 = write, 2 = fetch.  */
  76unsigned int mmu_translate(MicroBlazeCPU *cpu, MicroBlazeMMULookup *lu,
  77                           target_ulong vaddr, MMUAccessType rw, int mmu_idx)
  78{
  79    MicroBlazeMMU *mmu = &cpu->env.mmu;
  80    unsigned int i, hit = 0;
  81    unsigned int tlb_ex = 0, tlb_wr = 0, tlb_zsel;
  82    uint64_t tlb_tag, tlb_rpn, mask;
  83    uint32_t tlb_size, t0;
  84
  85    lu->err = ERR_MISS;
  86    for (i = 0; i < ARRAY_SIZE(mmu->rams[RAM_TAG]); i++) {
  87        uint64_t t, d;
  88
  89        /* Lookup and decode.  */
  90        t = mmu->rams[RAM_TAG][i];
  91        if (t & TLB_VALID) {
  92            tlb_size = tlb_decode_size((t & TLB_PAGESZ_MASK) >> 7);
  93            if (tlb_size < TARGET_PAGE_SIZE) {
  94                qemu_log_mask(LOG_UNIMP, "%d pages not supported\n", tlb_size);
  95                abort();
  96            }
  97
  98            mask = ~((uint64_t)tlb_size - 1);
  99            tlb_tag = t & TLB_EPN_MASK;
 100            if ((vaddr & mask) != (tlb_tag & mask)) {
 101                continue;
 102            }
 103            if (mmu->tids[i]
 104                && ((mmu->regs[MMU_R_PID] & 0xff) != mmu->tids[i])) {
 105                continue;
 106            }
 107
 108            /* Bring in the data part.  */
 109            d = mmu->rams[RAM_DATA][i];
 110            tlb_ex = d & TLB_EX;
 111            tlb_wr = d & TLB_WR;
 112
 113            /* Now let's see if there is a zone that overrides the protbits.  */
 114            tlb_zsel = (d >> 4) & 0xf;
 115            t0 = mmu->regs[MMU_R_ZPR] >> (30 - (tlb_zsel * 2));
 116            t0 &= 0x3;
 117
 118            if (tlb_zsel > cpu->cfg.mmu_zones) {
 119                qemu_log_mask(LOG_GUEST_ERROR,
 120                              "tlb zone select out of range! %d\n", tlb_zsel);
 121                t0 = 1; /* Ignore.  */
 122            }
 123
 124            if (cpu->cfg.mmu == 1) {
 125                t0 = 1; /* Zones are disabled.  */
 126            }
 127
 128            switch (t0) {
 129                case 0:
 130                    if (mmu_idx == MMU_USER_IDX)
 131                        continue;
 132                    break;
 133                case 2:
 134                    if (mmu_idx != MMU_USER_IDX) {
 135                        tlb_ex = 1;
 136                        tlb_wr = 1;
 137                    }
 138                    break;
 139                case 3:
 140                    tlb_ex = 1;
 141                    tlb_wr = 1;
 142                    break;
 143                default: break;
 144            }
 145
 146            lu->err = ERR_PROT;
 147            lu->prot = PAGE_READ;
 148            if (tlb_wr)
 149                lu->prot |= PAGE_WRITE;
 150            else if (rw == 1)
 151                goto done;
 152            if (tlb_ex)
 153                lu->prot |=PAGE_EXEC;
 154            else if (rw == 2) {
 155                goto done;
 156            }
 157
 158            tlb_rpn = d & TLB_RPN_MASK;
 159
 160            lu->vaddr = tlb_tag;
 161            lu->paddr = tlb_rpn & cpu->cfg.addr_mask;
 162            lu->size = tlb_size;
 163            lu->err = ERR_HIT;
 164            lu->idx = i;
 165            hit = 1;
 166            goto done;
 167        }
 168    }
 169done:
 170    qemu_log_mask(CPU_LOG_MMU,
 171                  "MMU vaddr=%" PRIx64 " rw=%d tlb_wr=%d tlb_ex=%d hit=%d\n",
 172                  vaddr, rw, tlb_wr, tlb_ex, hit);
 173    return hit;
 174}
 175
 176/* Writes/reads to the MMU's special regs end up here.  */
 177uint32_t mmu_read(CPUMBState *env, bool ext, uint32_t rn)
 178{
 179    MicroBlazeCPU *cpu = env_archcpu(env);
 180    unsigned int i;
 181    uint32_t r = 0;
 182
 183    if (cpu->cfg.mmu < 2 || !cpu->cfg.mmu_tlb_access) {
 184        qemu_log_mask(LOG_GUEST_ERROR, "MMU access on MMU-less system\n");
 185        return 0;
 186    }
 187    if (ext && rn != MMU_R_TLBLO) {
 188        qemu_log_mask(LOG_GUEST_ERROR, "Extended access only to TLBLO.\n");
 189        return 0;
 190    }
 191
 192    switch (rn) {
 193        /* Reads to HI/LO trig reads from the mmu rams.  */
 194        case MMU_R_TLBLO:
 195        case MMU_R_TLBHI:
 196            if (!(cpu->cfg.mmu_tlb_access & 1)) {
 197                qemu_log_mask(LOG_GUEST_ERROR,
 198                              "Invalid access to MMU reg %d\n", rn);
 199                return 0;
 200            }
 201
 202            i = env->mmu.regs[MMU_R_TLBX] & 0xff;
 203            r = extract64(env->mmu.rams[rn & 1][i], ext * 32, 32);
 204            if (rn == MMU_R_TLBHI)
 205                env->mmu.regs[MMU_R_PID] = env->mmu.tids[i];
 206            break;
 207        case MMU_R_PID:
 208        case MMU_R_ZPR:
 209            if (!(cpu->cfg.mmu_tlb_access & 1)) {
 210                qemu_log_mask(LOG_GUEST_ERROR,
 211                              "Invalid access to MMU reg %d\n", rn);
 212                return 0;
 213            }
 214            r = env->mmu.regs[rn];
 215            break;
 216        case MMU_R_TLBX:
 217            r = env->mmu.regs[rn];
 218            break;
 219        case MMU_R_TLBSX:
 220            qemu_log_mask(LOG_GUEST_ERROR, "TLBSX is write-only.\n");
 221            break;
 222        default:
 223            qemu_log_mask(LOG_GUEST_ERROR, "Invalid MMU register %d.\n", rn);
 224            break;
 225    }
 226    qemu_log_mask(CPU_LOG_MMU, "%s rn=%d=%x\n", __func__, rn, r);
 227    return r;
 228}
 229
 230void mmu_write(CPUMBState *env, bool ext, uint32_t rn, uint32_t v)
 231{
 232    MicroBlazeCPU *cpu = env_archcpu(env);
 233    uint64_t tmp64;
 234    unsigned int i;
 235
 236    qemu_log_mask(CPU_LOG_MMU,
 237                  "%s rn=%d=%x old=%x\n", __func__, rn, v,
 238                  rn < 3 ? env->mmu.regs[rn] : env->mmu.regs[MMU_R_TLBX]);
 239
 240    if (cpu->cfg.mmu < 2 || !cpu->cfg.mmu_tlb_access) {
 241        qemu_log_mask(LOG_GUEST_ERROR, "MMU access on MMU-less system\n");
 242        return;
 243    }
 244    if (ext && rn != MMU_R_TLBLO) {
 245        qemu_log_mask(LOG_GUEST_ERROR, "Extended access only to TLBLO.\n");
 246        return;
 247    }
 248
 249    switch (rn) {
 250        /* Writes to HI/LO trig writes to the mmu rams.  */
 251        case MMU_R_TLBLO:
 252        case MMU_R_TLBHI:
 253            i = env->mmu.regs[MMU_R_TLBX] & 0xff;
 254            if (rn == MMU_R_TLBHI) {
 255                if (i < 3 && !(v & TLB_VALID) && qemu_loglevel_mask(~0))
 256                    qemu_log_mask(LOG_GUEST_ERROR,
 257                                  "invalidating index %x at pc=%x\n",
 258                                  i, env->pc);
 259                env->mmu.tids[i] = env->mmu.regs[MMU_R_PID] & 0xff;
 260                mmu_flush_idx(env, i);
 261            }
 262            tmp64 = env->mmu.rams[rn & 1][i];
 263            env->mmu.rams[rn & 1][i] = deposit64(tmp64, ext * 32, 32, v);
 264            break;
 265        case MMU_R_ZPR:
 266            if (cpu->cfg.mmu_tlb_access <= 1) {
 267                qemu_log_mask(LOG_GUEST_ERROR,
 268                              "Invalid access to MMU reg %d\n", rn);
 269                return;
 270            }
 271
 272            /* Changes to the zone protection reg flush the QEMU TLB.
 273               Fortunately, these are very uncommon.  */
 274            if (v != env->mmu.regs[rn]) {
 275                tlb_flush(env_cpu(env));
 276            }
 277            env->mmu.regs[rn] = v;
 278            break;
 279        case MMU_R_PID:
 280            if (cpu->cfg.mmu_tlb_access <= 1) {
 281                qemu_log_mask(LOG_GUEST_ERROR,
 282                              "Invalid access to MMU reg %d\n", rn);
 283                return;
 284            }
 285
 286            if (v != env->mmu.regs[rn]) {
 287                mmu_change_pid(env, v);
 288                env->mmu.regs[rn] = v;
 289            }
 290            break;
 291        case MMU_R_TLBX:
 292            /* Bit 31 is read-only.  */
 293            env->mmu.regs[rn] = deposit32(env->mmu.regs[rn], 0, 31, v);
 294            break;
 295        case MMU_R_TLBSX:
 296        {
 297            MicroBlazeMMULookup lu;
 298            int hit;
 299
 300            if (cpu->cfg.mmu_tlb_access <= 1) {
 301                qemu_log_mask(LOG_GUEST_ERROR,
 302                              "Invalid access to MMU reg %d\n", rn);
 303                return;
 304            }
 305
 306            hit = mmu_translate(cpu, &lu, v & TLB_EPN_MASK,
 307                                0, cpu_mmu_index(env, false));
 308            if (hit) {
 309                env->mmu.regs[MMU_R_TLBX] = lu.idx;
 310            } else {
 311                env->mmu.regs[MMU_R_TLBX] |= R_TBLX_MISS_MASK;
 312            }
 313            break;
 314        }
 315        default:
 316            qemu_log_mask(LOG_GUEST_ERROR, "Invalid MMU register %d.\n", rn);
 317            break;
 318   }
 319}
 320
 321void mmu_init(MicroBlazeMMU *mmu)
 322{
 323    int i;
 324    for (i = 0; i < ARRAY_SIZE(mmu->regs); i++) {
 325        mmu->regs[i] = 0;
 326    }
 327}
 328