qemu/target/microblaze/mmu.c
<<
>>
Prefs
   1/*
   2 *  Microblaze MMU emulation for qemu.
   3 *
   4 *  Copyright (c) 2009 Edgar E. Iglesias
   5 *  Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
   6 *
   7 * This library is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU Lesser General Public
   9 * License as published by the Free Software Foundation; either
  10 * version 2 of the License, or (at your option) any later version.
  11 *
  12 * This library is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * Lesser General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU Lesser General Public
  18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21#include "qemu/osdep.h"
  22#include "cpu.h"
  23#include "exec/exec-all.h"
  24
  25static unsigned int tlb_decode_size(unsigned int f)
  26{
  27    static const unsigned int sizes[] = {
  28        1 * 1024, 4 * 1024, 16 * 1024, 64 * 1024, 256 * 1024,
  29        1 * 1024 * 1024, 4 * 1024 * 1024, 16 * 1024 * 1024
  30    };
  31    assert(f < ARRAY_SIZE(sizes));
  32    return sizes[f];
  33}
  34
  35static void mmu_flush_idx(CPUMBState *env, unsigned int idx)
  36{
  37    CPUState *cs = env_cpu(env);
  38    struct microblaze_mmu *mmu = &env->mmu;
  39    unsigned int tlb_size;
  40    uint32_t tlb_tag, end, t;
  41
  42    t = mmu->rams[RAM_TAG][idx];
  43    if (!(t & TLB_VALID))
  44        return;
  45
  46    tlb_tag = t & TLB_EPN_MASK;
  47    tlb_size = tlb_decode_size((t & TLB_PAGESZ_MASK) >> 7);
  48    end = tlb_tag + tlb_size;
  49
  50    while (tlb_tag < end) {
  51        tlb_flush_page(cs, tlb_tag);
  52        tlb_tag += TARGET_PAGE_SIZE;
  53    }
  54}
  55
  56static void mmu_change_pid(CPUMBState *env, unsigned int newpid) 
  57{
  58    struct microblaze_mmu *mmu = &env->mmu;
  59    unsigned int i;
  60    uint32_t t;
  61
  62    if (newpid & ~0xff)
  63        qemu_log_mask(LOG_GUEST_ERROR, "Illegal rpid=%x\n", newpid);
  64
  65    for (i = 0; i < ARRAY_SIZE(mmu->rams[RAM_TAG]); i++) {
  66        /* Lookup and decode.  */
  67        t = mmu->rams[RAM_TAG][i];
  68        if (t & TLB_VALID) {
  69            if (mmu->tids[i] && ((mmu->regs[MMU_R_PID] & 0xff) == mmu->tids[i]))
  70                mmu_flush_idx(env, i);
  71        }
  72    }
  73}
  74
  75/* rw - 0 = read, 1 = write, 2 = fetch.  */
  76unsigned int mmu_translate(struct microblaze_mmu *mmu,
  77                           struct microblaze_mmu_lookup *lu,
  78                           target_ulong vaddr, int rw, int mmu_idx)
  79{
  80    unsigned int i, hit = 0;
  81    unsigned int tlb_ex = 0, tlb_wr = 0, tlb_zsel;
  82    uint64_t tlb_tag, tlb_rpn, mask;
  83    uint32_t tlb_size, t0;
  84
  85    lu->err = ERR_MISS;
  86    for (i = 0; i < ARRAY_SIZE(mmu->rams[RAM_TAG]); i++) {
  87        uint64_t t, d;
  88
  89        /* Lookup and decode.  */
  90        t = mmu->rams[RAM_TAG][i];
  91        if (t & TLB_VALID) {
  92            tlb_size = tlb_decode_size((t & TLB_PAGESZ_MASK) >> 7);
  93            if (tlb_size < TARGET_PAGE_SIZE) {
  94                qemu_log_mask(LOG_UNIMP, "%d pages not supported\n", tlb_size);
  95                abort();
  96            }
  97
  98            mask = ~((uint64_t)tlb_size - 1);
  99            tlb_tag = t & TLB_EPN_MASK;
 100            if ((vaddr & mask) != (tlb_tag & mask)) {
 101                continue;
 102            }
 103            if (mmu->tids[i]
 104                && ((mmu->regs[MMU_R_PID] & 0xff) != mmu->tids[i])) {
 105                continue;
 106            }
 107
 108            /* Bring in the data part.  */
 109            d = mmu->rams[RAM_DATA][i];
 110            tlb_ex = d & TLB_EX;
 111            tlb_wr = d & TLB_WR;
 112
 113            /* Now let's see if there is a zone that overrides the protbits.  */
 114            tlb_zsel = (d >> 4) & 0xf;
 115            t0 = mmu->regs[MMU_R_ZPR] >> (30 - (tlb_zsel * 2));
 116            t0 &= 0x3;
 117
 118            if (tlb_zsel > mmu->c_mmu_zones) {
 119                qemu_log_mask(LOG_GUEST_ERROR,
 120                              "tlb zone select out of range! %d\n", tlb_zsel);
 121                t0 = 1; /* Ignore.  */
 122            }
 123
 124            if (mmu->c_mmu == 1) {
 125                t0 = 1; /* Zones are disabled.  */
 126            }
 127
 128            switch (t0) {
 129                case 0:
 130                    if (mmu_idx == MMU_USER_IDX)
 131                        continue;
 132                    break;
 133                case 2:
 134                    if (mmu_idx != MMU_USER_IDX) {
 135                        tlb_ex = 1;
 136                        tlb_wr = 1;
 137                    }
 138                    break;
 139                case 3:
 140                    tlb_ex = 1;
 141                    tlb_wr = 1;
 142                    break;
 143                default: break;
 144            }
 145
 146            lu->err = ERR_PROT;
 147            lu->prot = PAGE_READ;
 148            if (tlb_wr)
 149                lu->prot |= PAGE_WRITE;
 150            else if (rw == 1)
 151                goto done;
 152            if (tlb_ex)
 153                lu->prot |=PAGE_EXEC;
 154            else if (rw == 2) {
 155                goto done;
 156            }
 157
 158            tlb_rpn = d & TLB_RPN_MASK;
 159
 160            lu->vaddr = tlb_tag;
 161            lu->paddr = tlb_rpn & mmu->c_addr_mask;
 162            lu->size = tlb_size;
 163            lu->err = ERR_HIT;
 164            lu->idx = i;
 165            hit = 1;
 166            goto done;
 167        }
 168    }
 169done:
 170    qemu_log_mask(CPU_LOG_MMU,
 171                  "MMU vaddr=%" PRIx64 " rw=%d tlb_wr=%d tlb_ex=%d hit=%d\n",
 172                  vaddr, rw, tlb_wr, tlb_ex, hit);
 173    return hit;
 174}
 175
 176/* Writes/reads to the MMU's special regs end up here.  */
 177uint32_t mmu_read(CPUMBState *env, bool ext, uint32_t rn)
 178{
 179    unsigned int i;
 180    uint32_t r = 0;
 181
 182    if (env->mmu.c_mmu < 2 || !env->mmu.c_mmu_tlb_access) {
 183        qemu_log_mask(LOG_GUEST_ERROR, "MMU access on MMU-less system\n");
 184        return 0;
 185    }
 186    if (ext && rn != MMU_R_TLBLO) {
 187        qemu_log_mask(LOG_GUEST_ERROR, "Extended access only to TLBLO.\n");
 188        return 0;
 189    }
 190
 191    switch (rn) {
 192        /* Reads to HI/LO trig reads from the mmu rams.  */
 193        case MMU_R_TLBLO:
 194        case MMU_R_TLBHI:
 195            if (!(env->mmu.c_mmu_tlb_access & 1)) {
 196                qemu_log_mask(LOG_GUEST_ERROR,
 197                              "Invalid access to MMU reg %d\n", rn);
 198                return 0;
 199            }
 200
 201            i = env->mmu.regs[MMU_R_TLBX] & 0xff;
 202            r = extract64(env->mmu.rams[rn & 1][i], ext * 32, 32);
 203            if (rn == MMU_R_TLBHI)
 204                env->mmu.regs[MMU_R_PID] = env->mmu.tids[i];
 205            break;
 206        case MMU_R_PID:
 207        case MMU_R_ZPR:
 208            if (!(env->mmu.c_mmu_tlb_access & 1)) {
 209                qemu_log_mask(LOG_GUEST_ERROR,
 210                              "Invalid access to MMU reg %d\n", rn);
 211                return 0;
 212            }
 213            r = env->mmu.regs[rn];
 214            break;
 215        case MMU_R_TLBX:
 216            r = env->mmu.regs[rn];
 217            break;
 218        case MMU_R_TLBSX:
 219            qemu_log_mask(LOG_GUEST_ERROR, "TLBSX is write-only.\n");
 220            break;
 221        default:
 222            qemu_log_mask(LOG_GUEST_ERROR, "Invalid MMU register %d.\n", rn);
 223            break;
 224    }
 225    qemu_log_mask(CPU_LOG_MMU, "%s rn=%d=%x\n", __func__, rn, r);
 226    return r;
 227}
 228
 229void mmu_write(CPUMBState *env, bool ext, uint32_t rn, uint32_t v)
 230{
 231    uint64_t tmp64;
 232    unsigned int i;
 233    qemu_log_mask(CPU_LOG_MMU,
 234                  "%s rn=%d=%x old=%x\n", __func__, rn, v, env->mmu.regs[rn]);
 235
 236    if (env->mmu.c_mmu < 2 || !env->mmu.c_mmu_tlb_access) {
 237        qemu_log_mask(LOG_GUEST_ERROR, "MMU access on MMU-less system\n");
 238        return;
 239    }
 240    if (ext && rn != MMU_R_TLBLO) {
 241        qemu_log_mask(LOG_GUEST_ERROR, "Extended access only to TLBLO.\n");
 242        return;
 243    }
 244
 245    switch (rn) {
 246        /* Writes to HI/LO trig writes to the mmu rams.  */
 247        case MMU_R_TLBLO:
 248        case MMU_R_TLBHI:
 249            i = env->mmu.regs[MMU_R_TLBX] & 0xff;
 250            if (rn == MMU_R_TLBHI) {
 251                if (i < 3 && !(v & TLB_VALID) && qemu_loglevel_mask(~0))
 252                    qemu_log_mask(LOG_GUEST_ERROR,
 253                             "invalidating index %x at pc=%" PRIx64 "\n",
 254                             i, env->sregs[SR_PC]);
 255                env->mmu.tids[i] = env->mmu.regs[MMU_R_PID] & 0xff;
 256                mmu_flush_idx(env, i);
 257            }
 258            tmp64 = env->mmu.rams[rn & 1][i];
 259            env->mmu.rams[rn & 1][i] = deposit64(tmp64, ext * 32, 32, v);
 260            break;
 261        case MMU_R_ZPR:
 262            if (env->mmu.c_mmu_tlb_access <= 1) {
 263                qemu_log_mask(LOG_GUEST_ERROR,
 264                              "Invalid access to MMU reg %d\n", rn);
 265                return;
 266            }
 267
 268            /* Changes to the zone protection reg flush the QEMU TLB.
 269               Fortunately, these are very uncommon.  */
 270            if (v != env->mmu.regs[rn]) {
 271                tlb_flush(env_cpu(env));
 272            }
 273            env->mmu.regs[rn] = v;
 274            break;
 275        case MMU_R_PID:
 276            if (env->mmu.c_mmu_tlb_access <= 1) {
 277                qemu_log_mask(LOG_GUEST_ERROR,
 278                              "Invalid access to MMU reg %d\n", rn);
 279                return;
 280            }
 281
 282            if (v != env->mmu.regs[rn]) {
 283                mmu_change_pid(env, v);
 284                env->mmu.regs[rn] = v;
 285            }
 286            break;
 287        case MMU_R_TLBX:
 288            /* Bit 31 is read-only.  */
 289            env->mmu.regs[rn] = deposit32(env->mmu.regs[rn], 0, 31, v);
 290            break;
 291        case MMU_R_TLBSX:
 292        {
 293            struct microblaze_mmu_lookup lu;
 294            int hit;
 295
 296            if (env->mmu.c_mmu_tlb_access <= 1) {
 297                qemu_log_mask(LOG_GUEST_ERROR,
 298                              "Invalid access to MMU reg %d\n", rn);
 299                return;
 300            }
 301
 302            hit = mmu_translate(&env->mmu, &lu,
 303                                v & TLB_EPN_MASK, 0, cpu_mmu_index(env, false));
 304            if (hit) {
 305                env->mmu.regs[MMU_R_TLBX] = lu.idx;
 306            } else {
 307                env->mmu.regs[MMU_R_TLBX] |= R_TBLX_MISS_MASK;
 308            }
 309            break;
 310        }
 311        default:
 312            qemu_log_mask(LOG_GUEST_ERROR, "Invalid MMU register %d.\n", rn);
 313            break;
 314   }
 315}
 316
 317void mmu_init(struct microblaze_mmu *mmu)
 318{
 319    int i;
 320    for (i = 0; i < ARRAY_SIZE(mmu->regs); i++) {
 321        mmu->regs[i] = 0;
 322    }
 323}
 324