qemu/target/microblaze/mmu.c
<<
>>
Prefs
   1/*
   2 *  Microblaze MMU emulation for qemu.
   3 *
   4 *  Copyright (c) 2009 Edgar E. Iglesias
   5 *  Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
   6 *
   7 * This library is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU Lesser General Public
   9 * License as published by the Free Software Foundation; either
  10 * version 2 of the License, or (at your option) any later version.
  11 *
  12 * This library is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * Lesser General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU Lesser General Public
  18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21#include "qemu/osdep.h"
  22#include "cpu.h"
  23#include "exec/exec-all.h"
  24
  25#define D(x)
  26
  27static unsigned int tlb_decode_size(unsigned int f)
  28{
  29    static const unsigned int sizes[] = {
  30        1 * 1024, 4 * 1024, 16 * 1024, 64 * 1024, 256 * 1024,
  31        1 * 1024 * 1024, 4 * 1024 * 1024, 16 * 1024 * 1024
  32    };
  33    assert(f < ARRAY_SIZE(sizes));
  34    return sizes[f];
  35}
  36
  37static void mmu_flush_idx(CPUMBState *env, unsigned int idx)
  38{
  39    CPUState *cs = CPU(mb_env_get_cpu(env));
  40    struct microblaze_mmu *mmu = &env->mmu;
  41    unsigned int tlb_size;
  42    uint32_t tlb_tag, end, t;
  43
  44    t = mmu->rams[RAM_TAG][idx];
  45    if (!(t & TLB_VALID))
  46        return;
  47
  48    tlb_tag = t & TLB_EPN_MASK;
  49    tlb_size = tlb_decode_size((t & TLB_PAGESZ_MASK) >> 7);
  50    end = tlb_tag + tlb_size;
  51
  52    while (tlb_tag < end) {
  53        tlb_flush_page(cs, tlb_tag);
  54        tlb_tag += TARGET_PAGE_SIZE;
  55    }
  56}
  57
  58static void mmu_change_pid(CPUMBState *env, unsigned int newpid) 
  59{
  60    struct microblaze_mmu *mmu = &env->mmu;
  61    unsigned int i;
  62    uint32_t t;
  63
  64    if (newpid & ~0xff)
  65        qemu_log_mask(LOG_GUEST_ERROR, "Illegal rpid=%x\n", newpid);
  66
  67    for (i = 0; i < ARRAY_SIZE(mmu->rams[RAM_TAG]); i++) {
  68        /* Lookup and decode.  */
  69        t = mmu->rams[RAM_TAG][i];
  70        if (t & TLB_VALID) {
  71            if (mmu->tids[i] && ((mmu->regs[MMU_R_PID] & 0xff) == mmu->tids[i]))
  72                mmu_flush_idx(env, i);
  73        }
  74    }
  75}
  76
  77/* rw - 0 = read, 1 = write, 2 = fetch.  */
  78unsigned int mmu_translate(struct microblaze_mmu *mmu,
  79                           struct microblaze_mmu_lookup *lu,
  80                           target_ulong vaddr, int rw, int mmu_idx)
  81{
  82    unsigned int i, hit = 0;
  83    unsigned int tlb_ex = 0, tlb_wr = 0, tlb_zsel;
  84    uint64_t tlb_tag, tlb_rpn, mask;
  85    uint32_t tlb_size, t0;
  86
  87    lu->err = ERR_MISS;
  88    for (i = 0; i < ARRAY_SIZE(mmu->rams[RAM_TAG]); i++) {
  89        uint64_t t, d;
  90
  91        /* Lookup and decode.  */
  92        t = mmu->rams[RAM_TAG][i];
  93        D(qemu_log("TLB %" PRId64 " valid=%d\n", i, t & TLB_VALID));
  94        if (t & TLB_VALID) {
  95            tlb_size = tlb_decode_size((t & TLB_PAGESZ_MASK) >> 7);
  96            if (tlb_size < TARGET_PAGE_SIZE) {
  97                qemu_log("%d pages not supported\n", tlb_size);
  98                abort();
  99            }
 100
 101            mask = ~((uint64_t)tlb_size - 1);
 102            tlb_tag = t & TLB_EPN_MASK;
 103            if ((vaddr & mask) != (tlb_tag & mask)) {
 104                D(qemu_log("TLB %d vaddr=%x != tag=%x\n",
 105                           i, vaddr & mask, tlb_tag & mask));
 106                continue;
 107            }
 108            if (mmu->tids[i]
 109                && ((mmu->regs[MMU_R_PID] & 0xff) != mmu->tids[i])) {
 110                D(qemu_log("TLB %d pid=%x != tid=%x\n",
 111                           i, mmu->regs[MMU_R_PID], mmu->tids[i]));
 112                continue;
 113            }
 114
 115            /* Bring in the data part.  */
 116            d = mmu->rams[RAM_DATA][i];
 117            tlb_ex = d & TLB_EX;
 118            tlb_wr = d & TLB_WR;
 119
 120            /* Now let's see if there is a zone that overrides the protbits.  */
 121            tlb_zsel = (d >> 4) & 0xf;
 122            t0 = mmu->regs[MMU_R_ZPR] >> (30 - (tlb_zsel * 2));
 123            t0 &= 0x3;
 124
 125            if (tlb_zsel > mmu->c_mmu_zones) {
 126                qemu_log_mask(LOG_GUEST_ERROR, "tlb zone select out of range! %d\n", tlb_zsel);
 127                t0 = 1; /* Ignore.  */
 128            }
 129
 130            if (mmu->c_mmu == 1) {
 131                t0 = 1; /* Zones are disabled.  */
 132            }
 133
 134            switch (t0) {
 135                case 0:
 136                    if (mmu_idx == MMU_USER_IDX)
 137                        continue;
 138                    break;
 139                case 2:
 140                    if (mmu_idx != MMU_USER_IDX) {
 141                        tlb_ex = 1;
 142                        tlb_wr = 1;
 143                    }
 144                    break;
 145                case 3:
 146                    tlb_ex = 1;
 147                    tlb_wr = 1;
 148                    break;
 149                default: break;
 150            }
 151
 152            lu->err = ERR_PROT;
 153            lu->prot = PAGE_READ;
 154            if (tlb_wr)
 155                lu->prot |= PAGE_WRITE;
 156            else if (rw == 1)
 157                goto done;
 158            if (tlb_ex)
 159                lu->prot |=PAGE_EXEC;
 160            else if (rw == 2) {
 161                goto done;
 162            }
 163
 164            tlb_rpn = d & TLB_RPN_MASK;
 165
 166            lu->vaddr = tlb_tag;
 167            lu->paddr = tlb_rpn & mmu->c_addr_mask;
 168            lu->paddr = tlb_rpn;
 169            lu->size = tlb_size;
 170            lu->err = ERR_HIT;
 171            lu->idx = i;
 172            hit = 1;
 173            goto done;
 174        }
 175    }
 176done:
 177    D(qemu_log("MMU vaddr=%x rw=%d tlb_wr=%d tlb_ex=%d hit=%d\n",
 178              vaddr, rw, tlb_wr, tlb_ex, hit));
 179    return hit;
 180}
 181
 182/* Writes/reads to the MMU's special regs end up here.  */
 183uint32_t mmu_read(CPUMBState *env, bool ext, uint32_t rn)
 184{
 185    unsigned int i;
 186    uint32_t r = 0;
 187
 188    if (env->mmu.c_mmu < 2 || !env->mmu.c_mmu_tlb_access) {
 189        qemu_log_mask(LOG_GUEST_ERROR, "MMU access on MMU-less system\n");
 190        return 0;
 191    }
 192    if (ext && rn != MMU_R_TLBLO) {
 193        qemu_log_mask(LOG_GUEST_ERROR, "Extended access only to TLBLO.\n");
 194        return 0;
 195    }
 196
 197    switch (rn) {
 198        /* Reads to HI/LO trig reads from the mmu rams.  */
 199        case MMU_R_TLBLO:
 200        case MMU_R_TLBHI:
 201            if (!(env->mmu.c_mmu_tlb_access & 1)) {
 202                qemu_log_mask(LOG_GUEST_ERROR, "Invalid access to MMU reg %d\n", rn);
 203                return 0;
 204            }
 205
 206            i = env->mmu.regs[MMU_R_TLBX] & 0xff;
 207            r = extract64(env->mmu.rams[rn & 1][i], ext * 32, 32);
 208            if (rn == MMU_R_TLBHI)
 209                env->mmu.regs[MMU_R_PID] = env->mmu.tids[i];
 210            break;
 211        case MMU_R_PID:
 212        case MMU_R_ZPR:
 213            if (!(env->mmu.c_mmu_tlb_access & 1)) {
 214                qemu_log_mask(LOG_GUEST_ERROR, "Invalid access to MMU reg %d\n", rn);
 215                return 0;
 216            }
 217            r = env->mmu.regs[rn];
 218            break;
 219        case MMU_R_TLBX:
 220            r = env->mmu.regs[rn];
 221            break;
 222        case MMU_R_TLBSX:
 223            qemu_log_mask(LOG_GUEST_ERROR, "TLBSX is write-only.\n");
 224            break;
 225        default:
 226            qemu_log_mask(LOG_GUEST_ERROR, "Invalid MMU register %d.\n", rn);
 227            break;
 228    }
 229    D(qemu_log("%s rn=%d=%x\n", __func__, rn, r));
 230    return r;
 231}
 232
 233void mmu_write(CPUMBState *env, bool ext, uint32_t rn, uint32_t v)
 234{
 235    MicroBlazeCPU *cpu = mb_env_get_cpu(env);
 236    uint64_t tmp64;
 237    unsigned int i;
 238    D(qemu_log("%s rn=%d=%x old=%x\n", __func__, rn, v, env->mmu.regs[rn]));
 239
 240    if (env->mmu.c_mmu < 2 || !env->mmu.c_mmu_tlb_access) {
 241        qemu_log_mask(LOG_GUEST_ERROR, "MMU access on MMU-less system\n");
 242        return;
 243    }
 244    if (ext && rn != MMU_R_TLBLO) {
 245        qemu_log_mask(LOG_GUEST_ERROR, "Extended access only to TLBLO.\n");
 246        return;
 247    }
 248
 249    switch (rn) {
 250        /* Writes to HI/LO trig writes to the mmu rams.  */
 251        case MMU_R_TLBLO:
 252        case MMU_R_TLBHI:
 253            i = env->mmu.regs[MMU_R_TLBX] & 0xff;
 254            if (rn == MMU_R_TLBHI) {
 255                if (i < 3 && !(v & TLB_VALID) && qemu_loglevel_mask(~0))
 256                    qemu_log_mask(LOG_GUEST_ERROR,
 257                             "invalidating index %x at pc=%" PRIx64 "\n",
 258                             i, env->sregs[SR_PC]);
 259                env->mmu.tids[i] = env->mmu.regs[MMU_R_PID] & 0xff;
 260                mmu_flush_idx(env, i);
 261            }
 262            tmp64 = env->mmu.rams[rn & 1][i];
 263            env->mmu.rams[rn & 1][i] = deposit64(tmp64, ext * 32, 32, v);
 264
 265            D(qemu_log("%s ram[%d][%d]=%x\n", __func__, rn & 1, i, v));
 266            break;
 267        case MMU_R_ZPR:
 268            if (env->mmu.c_mmu_tlb_access <= 1) {
 269                qemu_log_mask(LOG_GUEST_ERROR, "Invalid access to MMU reg %d\n", rn);
 270                return;
 271            }
 272
 273            /* Changes to the zone protection reg flush the QEMU TLB.
 274               Fortunately, these are very uncommon.  */
 275            if (v != env->mmu.regs[rn]) {
 276                tlb_flush(CPU(cpu));
 277            }
 278            env->mmu.regs[rn] = v;
 279            break;
 280        case MMU_R_PID:
 281            if (env->mmu.c_mmu_tlb_access <= 1) {
 282                qemu_log_mask(LOG_GUEST_ERROR, "Invalid access to MMU reg %d\n", rn);
 283                return;
 284            }
 285
 286            if (v != env->mmu.regs[rn]) {
 287                mmu_change_pid(env, v);
 288                env->mmu.regs[rn] = v;
 289            }
 290            break;
 291        case MMU_R_TLBX:
 292            /* Bit 31 is read-only.  */
 293            env->mmu.regs[rn] = deposit32(env->mmu.regs[rn], 0, 31, v);
 294            break;
 295        case MMU_R_TLBSX:
 296        {
 297            struct microblaze_mmu_lookup lu;
 298            int hit;
 299
 300            if (env->mmu.c_mmu_tlb_access <= 1) {
 301                qemu_log_mask(LOG_GUEST_ERROR, "Invalid access to MMU reg %d\n", rn);
 302                return;
 303            }
 304
 305            hit = mmu_translate(&env->mmu, &lu,
 306                                v & TLB_EPN_MASK, 0, cpu_mmu_index(env, false));
 307            if (hit) {
 308                env->mmu.regs[MMU_R_TLBX] = lu.idx;
 309            } else {
 310                env->mmu.regs[MMU_R_TLBX] |= R_TBLX_MISS_MASK;
 311            }
 312            break;
 313        }
 314        default:
 315            qemu_log_mask(LOG_GUEST_ERROR, "Invalid MMU register %d.\n", rn);
 316            break;
 317   }
 318}
 319
 320void mmu_init(struct microblaze_mmu *mmu)
 321{
 322    int i;
 323    for (i = 0; i < ARRAY_SIZE(mmu->regs); i++) {
 324        mmu->regs[i] = 0;
 325    }
 326}
 327