qemu/target/mips/tcg/sysemu/tlb_helper.c
<<
>>
Prefs
   1/*
   2 * MIPS TLB (Translation lookaside buffer) helpers.
   3 *
   4 *  Copyright (c) 2004-2005 Jocelyn Mayer
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2.1 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#include "qemu/osdep.h"
  20#include "qemu/bitops.h"
  21
  22#include "cpu.h"
  23#include "internal.h"
  24#include "exec/exec-all.h"
  25#include "exec/cpu_ldst.h"
  26#include "exec/log.h"
  27#include "hw/mips/cpudevs.h"
  28#include "exec/helper-proto.h"
  29
  30/* TLB management */
  31static void r4k_mips_tlb_flush_extra(CPUMIPSState *env, int first)
  32{
  33    /* Discard entries from env->tlb[first] onwards.  */
  34    while (env->tlb->tlb_in_use > first) {
  35        r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
  36    }
  37}
  38
  39static inline uint64_t get_tlb_pfn_from_entrylo(uint64_t entrylo)
  40{
  41#if defined(TARGET_MIPS64)
  42    return extract64(entrylo, 6, 54);
  43#else
  44    return extract64(entrylo, 6, 24) | /* PFN */
  45           (extract64(entrylo, 32, 32) << 24); /* PFNX */
  46#endif
  47}
  48
  49static void r4k_fill_tlb(CPUMIPSState *env, int idx)
  50{
  51    r4k_tlb_t *tlb;
  52    uint64_t mask = env->CP0_PageMask >> (TARGET_PAGE_BITS + 1);
  53
  54    /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
  55    tlb = &env->tlb->mmu.r4k.tlb[idx];
  56    if (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) {
  57        tlb->EHINV = 1;
  58        return;
  59    }
  60    tlb->EHINV = 0;
  61    tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
  62#if defined(TARGET_MIPS64)
  63    tlb->VPN &= env->SEGMask;
  64#endif
  65    tlb->ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
  66    tlb->MMID = env->CP0_MemoryMapID;
  67    tlb->PageMask = env->CP0_PageMask;
  68    tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
  69    tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
  70    tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
  71    tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
  72    tlb->XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) & 1;
  73    tlb->RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) & 1;
  74    tlb->PFN[0] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo0) & ~mask) << 12;
  75    tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
  76    tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
  77    tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
  78    tlb->XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) & 1;
  79    tlb->RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) & 1;
  80    tlb->PFN[1] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo1) & ~mask) << 12;
  81}
  82
  83static void r4k_helper_tlbinv(CPUMIPSState *env)
  84{
  85    bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
  86    uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
  87    uint32_t MMID = env->CP0_MemoryMapID;
  88    uint32_t tlb_mmid;
  89    r4k_tlb_t *tlb;
  90    int idx;
  91
  92    MMID = mi ? MMID : (uint32_t) ASID;
  93    for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
  94        tlb = &env->tlb->mmu.r4k.tlb[idx];
  95        tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
  96        if (!tlb->G && tlb_mmid == MMID) {
  97            tlb->EHINV = 1;
  98        }
  99    }
 100    cpu_mips_tlb_flush(env);
 101}
 102
 103static void r4k_helper_tlbinvf(CPUMIPSState *env)
 104{
 105    int idx;
 106
 107    for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
 108        env->tlb->mmu.r4k.tlb[idx].EHINV = 1;
 109    }
 110    cpu_mips_tlb_flush(env);
 111}
 112
 113static void r4k_helper_tlbwi(CPUMIPSState *env)
 114{
 115    bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
 116    target_ulong VPN;
 117    uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
 118    uint32_t MMID = env->CP0_MemoryMapID;
 119    uint32_t tlb_mmid;
 120    bool EHINV, G, V0, D0, V1, D1, XI0, XI1, RI0, RI1;
 121    r4k_tlb_t *tlb;
 122    int idx;
 123
 124    MMID = mi ? MMID : (uint32_t) ASID;
 125
 126    idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
 127    tlb = &env->tlb->mmu.r4k.tlb[idx];
 128    VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
 129#if defined(TARGET_MIPS64)
 130    VPN &= env->SEGMask;
 131#endif
 132    EHINV = (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) != 0;
 133    G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
 134    V0 = (env->CP0_EntryLo0 & 2) != 0;
 135    D0 = (env->CP0_EntryLo0 & 4) != 0;
 136    XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) &1;
 137    RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) &1;
 138    V1 = (env->CP0_EntryLo1 & 2) != 0;
 139    D1 = (env->CP0_EntryLo1 & 4) != 0;
 140    XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) &1;
 141    RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) &1;
 142
 143    tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
 144    /*
 145     * Discard cached TLB entries, unless tlbwi is just upgrading access
 146     * permissions on the current entry.
 147     */
 148    if (tlb->VPN != VPN || tlb_mmid != MMID || tlb->G != G ||
 149        (!tlb->EHINV && EHINV) ||
 150        (tlb->V0 && !V0) || (tlb->D0 && !D0) ||
 151        (!tlb->XI0 && XI0) || (!tlb->RI0 && RI0) ||
 152        (tlb->V1 && !V1) || (tlb->D1 && !D1) ||
 153        (!tlb->XI1 && XI1) || (!tlb->RI1 && RI1)) {
 154        r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
 155    }
 156
 157    r4k_invalidate_tlb(env, idx, 0);
 158    r4k_fill_tlb(env, idx);
 159}
 160
 161static void r4k_helper_tlbwr(CPUMIPSState *env)
 162{
 163    int r = cpu_mips_get_random(env);
 164
 165    r4k_invalidate_tlb(env, r, 1);
 166    r4k_fill_tlb(env, r);
 167}
 168
 169static void r4k_helper_tlbp(CPUMIPSState *env)
 170{
 171    bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
 172    r4k_tlb_t *tlb;
 173    target_ulong mask;
 174    target_ulong tag;
 175    target_ulong VPN;
 176    uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
 177    uint32_t MMID = env->CP0_MemoryMapID;
 178    uint32_t tlb_mmid;
 179    int i;
 180
 181    MMID = mi ? MMID : (uint32_t) ASID;
 182    for (i = 0; i < env->tlb->nb_tlb; i++) {
 183        tlb = &env->tlb->mmu.r4k.tlb[i];
 184        /* 1k pages are not supported. */
 185        mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
 186        tag = env->CP0_EntryHi & ~mask;
 187        VPN = tlb->VPN & ~mask;
 188#if defined(TARGET_MIPS64)
 189        tag &= env->SEGMask;
 190#endif
 191        tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
 192        /* Check ASID/MMID, virtual page number & size */
 193        if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag && !tlb->EHINV) {
 194            /* TLB match */
 195            env->CP0_Index = i;
 196            break;
 197        }
 198    }
 199    if (i == env->tlb->nb_tlb) {
 200        /* No match.  Discard any shadow entries, if any of them match.  */
 201        for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
 202            tlb = &env->tlb->mmu.r4k.tlb[i];
 203            /* 1k pages are not supported. */
 204            mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
 205            tag = env->CP0_EntryHi & ~mask;
 206            VPN = tlb->VPN & ~mask;
 207#if defined(TARGET_MIPS64)
 208            tag &= env->SEGMask;
 209#endif
 210            tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
 211            /* Check ASID/MMID, virtual page number & size */
 212            if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag) {
 213                r4k_mips_tlb_flush_extra(env, i);
 214                break;
 215            }
 216        }
 217
 218        env->CP0_Index |= 0x80000000;
 219    }
 220}
 221
 222static inline uint64_t get_entrylo_pfn_from_tlb(uint64_t tlb_pfn)
 223{
 224#if defined(TARGET_MIPS64)
 225    return tlb_pfn << 6;
 226#else
 227    return (extract64(tlb_pfn, 0, 24) << 6) | /* PFN */
 228           (extract64(tlb_pfn, 24, 32) << 32); /* PFNX */
 229#endif
 230}
 231
 232static void r4k_helper_tlbr(CPUMIPSState *env)
 233{
 234    bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
 235    uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
 236    uint32_t MMID = env->CP0_MemoryMapID;
 237    uint32_t tlb_mmid;
 238    r4k_tlb_t *tlb;
 239    int idx;
 240
 241    MMID = mi ? MMID : (uint32_t) ASID;
 242    idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
 243    tlb = &env->tlb->mmu.r4k.tlb[idx];
 244
 245    tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
 246    /* If this will change the current ASID/MMID, flush qemu's TLB.  */
 247    if (MMID != tlb_mmid) {
 248        cpu_mips_tlb_flush(env);
 249    }
 250
 251    r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
 252
 253    if (tlb->EHINV) {
 254        env->CP0_EntryHi = 1 << CP0EnHi_EHINV;
 255        env->CP0_PageMask = 0;
 256        env->CP0_EntryLo0 = 0;
 257        env->CP0_EntryLo1 = 0;
 258    } else {
 259        env->CP0_EntryHi = mi ? tlb->VPN : tlb->VPN | tlb->ASID;
 260        env->CP0_MemoryMapID = tlb->MMID;
 261        env->CP0_PageMask = tlb->PageMask;
 262        env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
 263                        ((uint64_t)tlb->RI0 << CP0EnLo_RI) |
 264                        ((uint64_t)tlb->XI0 << CP0EnLo_XI) | (tlb->C0 << 3) |
 265                        get_entrylo_pfn_from_tlb(tlb->PFN[0] >> 12);
 266        env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
 267                        ((uint64_t)tlb->RI1 << CP0EnLo_RI) |
 268                        ((uint64_t)tlb->XI1 << CP0EnLo_XI) | (tlb->C1 << 3) |
 269                        get_entrylo_pfn_from_tlb(tlb->PFN[1] >> 12);
 270    }
 271}
 272
 273void helper_tlbwi(CPUMIPSState *env)
 274{
 275    env->tlb->helper_tlbwi(env);
 276}
 277
 278void helper_tlbwr(CPUMIPSState *env)
 279{
 280    env->tlb->helper_tlbwr(env);
 281}
 282
 283void helper_tlbp(CPUMIPSState *env)
 284{
 285    env->tlb->helper_tlbp(env);
 286}
 287
 288void helper_tlbr(CPUMIPSState *env)
 289{
 290    env->tlb->helper_tlbr(env);
 291}
 292
 293void helper_tlbinv(CPUMIPSState *env)
 294{
 295    env->tlb->helper_tlbinv(env);
 296}
 297
 298void helper_tlbinvf(CPUMIPSState *env)
 299{
 300    env->tlb->helper_tlbinvf(env);
 301}
 302
 303static void global_invalidate_tlb(CPUMIPSState *env,
 304                           uint32_t invMsgVPN2,
 305                           uint8_t invMsgR,
 306                           uint32_t invMsgMMid,
 307                           bool invAll,
 308                           bool invVAMMid,
 309                           bool invMMid,
 310                           bool invVA)
 311{
 312
 313    int idx;
 314    r4k_tlb_t *tlb;
 315    bool VAMatch;
 316    bool MMidMatch;
 317
 318    for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
 319        tlb = &env->tlb->mmu.r4k.tlb[idx];
 320        VAMatch =
 321            (((tlb->VPN & ~tlb->PageMask) == (invMsgVPN2 & ~tlb->PageMask))
 322#ifdef TARGET_MIPS64
 323            &&
 324            (extract64(env->CP0_EntryHi, 62, 2) == invMsgR)
 325#endif
 326            );
 327        MMidMatch = tlb->MMID == invMsgMMid;
 328        if ((invAll && (idx > env->CP0_Wired)) ||
 329            (VAMatch && invVAMMid && (tlb->G || MMidMatch)) ||
 330            (VAMatch && invVA) ||
 331            (MMidMatch && !(tlb->G) && invMMid)) {
 332            tlb->EHINV = 1;
 333        }
 334    }
 335    cpu_mips_tlb_flush(env);
 336}
 337
 338void helper_ginvt(CPUMIPSState *env, target_ulong arg, uint32_t type)
 339{
 340    bool invAll = type == 0;
 341    bool invVA = type == 1;
 342    bool invMMid = type == 2;
 343    bool invVAMMid = type == 3;
 344    uint32_t invMsgVPN2 = arg & (TARGET_PAGE_MASK << 1);
 345    uint8_t invMsgR = 0;
 346    uint32_t invMsgMMid = env->CP0_MemoryMapID;
 347    CPUState *other_cs = first_cpu;
 348
 349#ifdef TARGET_MIPS64
 350    invMsgR = extract64(arg, 62, 2);
 351#endif
 352
 353    CPU_FOREACH(other_cs) {
 354        MIPSCPU *other_cpu = MIPS_CPU(other_cs);
 355        global_invalidate_tlb(&other_cpu->env, invMsgVPN2, invMsgR, invMsgMMid,
 356                              invAll, invVAMMid, invMMid, invVA);
 357    }
 358}
 359
 360/* no MMU emulation */
 361static int no_mmu_map_address(CPUMIPSState *env, hwaddr *physical, int *prot,
 362                              target_ulong address, MMUAccessType access_type)
 363{
 364    *physical = address;
 365    *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
 366    return TLBRET_MATCH;
 367}
 368
 369/* fixed mapping MMU emulation */
 370static int fixed_mmu_map_address(CPUMIPSState *env, hwaddr *physical,
 371                                 int *prot, target_ulong address,
 372                                 MMUAccessType access_type)
 373{
 374    if (address <= (int32_t)0x7FFFFFFFUL) {
 375        if (!(env->CP0_Status & (1 << CP0St_ERL))) {
 376            *physical = address + 0x40000000UL;
 377        } else {
 378            *physical = address;
 379        }
 380    } else if (address <= (int32_t)0xBFFFFFFFUL) {
 381        *physical = address & 0x1FFFFFFF;
 382    } else {
 383        *physical = address;
 384    }
 385
 386    *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
 387    return TLBRET_MATCH;
 388}
 389
 390/* MIPS32/MIPS64 R4000-style MMU emulation */
 391static int r4k_map_address(CPUMIPSState *env, hwaddr *physical, int *prot,
 392                           target_ulong address, MMUAccessType access_type)
 393{
 394    uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
 395    uint32_t MMID = env->CP0_MemoryMapID;
 396    bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
 397    uint32_t tlb_mmid;
 398    int i;
 399
 400    MMID = mi ? MMID : (uint32_t) ASID;
 401
 402    for (i = 0; i < env->tlb->tlb_in_use; i++) {
 403        r4k_tlb_t *tlb = &env->tlb->mmu.r4k.tlb[i];
 404        /* 1k pages are not supported. */
 405        target_ulong mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
 406        target_ulong tag = address & ~mask;
 407        target_ulong VPN = tlb->VPN & ~mask;
 408#if defined(TARGET_MIPS64)
 409        tag &= env->SEGMask;
 410#endif
 411
 412        /* Check ASID/MMID, virtual page number & size */
 413        tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
 414        if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag && !tlb->EHINV) {
 415            /* TLB match */
 416            int n = !!(address & mask & ~(mask >> 1));
 417            /* Check access rights */
 418            if (!(n ? tlb->V1 : tlb->V0)) {
 419                return TLBRET_INVALID;
 420            }
 421            if (access_type == MMU_INST_FETCH && (n ? tlb->XI1 : tlb->XI0)) {
 422                return TLBRET_XI;
 423            }
 424            if (access_type == MMU_DATA_LOAD && (n ? tlb->RI1 : tlb->RI0)) {
 425                return TLBRET_RI;
 426            }
 427            if (access_type != MMU_DATA_STORE || (n ? tlb->D1 : tlb->D0)) {
 428                *physical = tlb->PFN[n] | (address & (mask >> 1));
 429                *prot = PAGE_READ;
 430                if (n ? tlb->D1 : tlb->D0) {
 431                    *prot |= PAGE_WRITE;
 432                }
 433                if (!(n ? tlb->XI1 : tlb->XI0)) {
 434                    *prot |= PAGE_EXEC;
 435                }
 436                return TLBRET_MATCH;
 437            }
 438            return TLBRET_DIRTY;
 439        }
 440    }
 441    return TLBRET_NOMATCH;
 442}
 443
 444static void no_mmu_init(CPUMIPSState *env, const mips_def_t *def)
 445{
 446    env->tlb->nb_tlb = 1;
 447    env->tlb->map_address = &no_mmu_map_address;
 448}
 449
 450static void fixed_mmu_init(CPUMIPSState *env, const mips_def_t *def)
 451{
 452    env->tlb->nb_tlb = 1;
 453    env->tlb->map_address = &fixed_mmu_map_address;
 454}
 455
 456static void r4k_mmu_init(CPUMIPSState *env, const mips_def_t *def)
 457{
 458    env->tlb->nb_tlb = 1 + ((def->CP0_Config1 >> CP0C1_MMU) & 63);
 459    env->tlb->map_address = &r4k_map_address;
 460    env->tlb->helper_tlbwi = r4k_helper_tlbwi;
 461    env->tlb->helper_tlbwr = r4k_helper_tlbwr;
 462    env->tlb->helper_tlbp = r4k_helper_tlbp;
 463    env->tlb->helper_tlbr = r4k_helper_tlbr;
 464    env->tlb->helper_tlbinv = r4k_helper_tlbinv;
 465    env->tlb->helper_tlbinvf = r4k_helper_tlbinvf;
 466}
 467
 468void mmu_init(CPUMIPSState *env, const mips_def_t *def)
 469{
 470    env->tlb = g_malloc0(sizeof(CPUMIPSTLBContext));
 471
 472    switch (def->mmu_type) {
 473    case MMU_TYPE_NONE:
 474        no_mmu_init(env, def);
 475        break;
 476    case MMU_TYPE_R4000:
 477        r4k_mmu_init(env, def);
 478        break;
 479    case MMU_TYPE_FMT:
 480        fixed_mmu_init(env, def);
 481        break;
 482    case MMU_TYPE_R3000:
 483    case MMU_TYPE_R6000:
 484    case MMU_TYPE_R8000:
 485    default:
 486        cpu_abort(env_cpu(env), "MMU type not supported\n");
 487    }
 488}
 489
 490void cpu_mips_tlb_flush(CPUMIPSState *env)
 491{
 492    /* Flush qemu's TLB and discard all shadowed entries.  */
 493    tlb_flush(env_cpu(env));
 494    env->tlb->tlb_in_use = env->tlb->nb_tlb;
 495}
 496
 497static void raise_mmu_exception(CPUMIPSState *env, target_ulong address,
 498                                MMUAccessType access_type, int tlb_error)
 499{
 500    CPUState *cs = env_cpu(env);
 501    int exception = 0, error_code = 0;
 502
 503    if (access_type == MMU_INST_FETCH) {
 504        error_code |= EXCP_INST_NOTAVAIL;
 505    }
 506
 507    switch (tlb_error) {
 508    default:
 509    case TLBRET_BADADDR:
 510        /* Reference to kernel address from user mode or supervisor mode */
 511        /* Reference to supervisor address from user mode */
 512        if (access_type == MMU_DATA_STORE) {
 513            exception = EXCP_AdES;
 514        } else {
 515            exception = EXCP_AdEL;
 516        }
 517        break;
 518    case TLBRET_NOMATCH:
 519        /* No TLB match for a mapped address */
 520        if (access_type == MMU_DATA_STORE) {
 521            exception = EXCP_TLBS;
 522        } else {
 523            exception = EXCP_TLBL;
 524        }
 525        error_code |= EXCP_TLB_NOMATCH;
 526        break;
 527    case TLBRET_INVALID:
 528        /* TLB match with no valid bit */
 529        if (access_type == MMU_DATA_STORE) {
 530            exception = EXCP_TLBS;
 531        } else {
 532            exception = EXCP_TLBL;
 533        }
 534        break;
 535    case TLBRET_DIRTY:
 536        /* TLB match but 'D' bit is cleared */
 537        exception = EXCP_LTLBL;
 538        break;
 539    case TLBRET_XI:
 540        /* Execute-Inhibit Exception */
 541        if (env->CP0_PageGrain & (1 << CP0PG_IEC)) {
 542            exception = EXCP_TLBXI;
 543        } else {
 544            exception = EXCP_TLBL;
 545        }
 546        break;
 547    case TLBRET_RI:
 548        /* Read-Inhibit Exception */
 549        if (env->CP0_PageGrain & (1 << CP0PG_IEC)) {
 550            exception = EXCP_TLBRI;
 551        } else {
 552            exception = EXCP_TLBL;
 553        }
 554        break;
 555    }
 556    /* Raise exception */
 557    if (!(env->hflags & MIPS_HFLAG_DM)) {
 558        env->CP0_BadVAddr = address;
 559    }
 560    env->CP0_Context = (env->CP0_Context & ~0x007fffff) |
 561                       ((address >> 9) & 0x007ffff0);
 562    env->CP0_EntryHi = (env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask) |
 563                       (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) |
 564                       (address & (TARGET_PAGE_MASK << 1));
 565#if defined(TARGET_MIPS64)
 566    env->CP0_EntryHi &= env->SEGMask;
 567    env->CP0_XContext =
 568        (env->CP0_XContext & ((~0ULL) << (env->SEGBITS - 7))) | /* PTEBase */
 569        (extract64(address, 62, 2) << (env->SEGBITS - 9)) |     /* R       */
 570        (extract64(address, 13, env->SEGBITS - 13) << 4);       /* BadVPN2 */
 571#endif
 572    cs->exception_index = exception;
 573    env->error_code = error_code;
 574}
 575
 576#if !defined(TARGET_MIPS64)
 577
 578/*
 579 * Perform hardware page table walk
 580 *
 581 * Memory accesses are performed using the KERNEL privilege level.
 582 * Synchronous exceptions detected on memory accesses cause a silent exit
 583 * from page table walking, resulting in a TLB or XTLB Refill exception.
 584 *
 585 * Implementations are not required to support page table walk memory
 586 * accesses from mapped memory regions. When an unsupported access is
 587 * attempted, a silent exit is taken, resulting in a TLB or XTLB Refill
 588 * exception.
 589 *
 590 * Note that if an exception is caused by AddressTranslation or LoadMemory
 591 * functions, the exception is not taken, a silent exit is taken,
 592 * resulting in a TLB or XTLB Refill exception.
 593 */
 594
 595static bool get_pte(CPUMIPSState *env, uint64_t vaddr, int entry_size,
 596        uint64_t *pte)
 597{
 598    if ((vaddr & ((entry_size >> 3) - 1)) != 0) {
 599        return false;
 600    }
 601    if (entry_size == 64) {
 602        *pte = cpu_ldq_code(env, vaddr);
 603    } else {
 604        *pte = cpu_ldl_code(env, vaddr);
 605    }
 606    return true;
 607}
 608
 609static uint64_t get_tlb_entry_layout(CPUMIPSState *env, uint64_t entry,
 610        int entry_size, int ptei)
 611{
 612    uint64_t result = entry;
 613    uint64_t rixi;
 614    if (ptei > entry_size) {
 615        ptei -= 32;
 616    }
 617    result >>= (ptei - 2);
 618    rixi = result & 3;
 619    result >>= 2;
 620    result |= rixi << CP0EnLo_XI;
 621    return result;
 622}
 623
 624static int walk_directory(CPUMIPSState *env, uint64_t *vaddr,
 625        int directory_index, bool *huge_page, bool *hgpg_directory_hit,
 626        uint64_t *pw_entrylo0, uint64_t *pw_entrylo1,
 627        unsigned directory_shift, unsigned leaf_shift)
 628{
 629    int dph = (env->CP0_PWCtl >> CP0PC_DPH) & 0x1;
 630    int psn = (env->CP0_PWCtl >> CP0PC_PSN) & 0x3F;
 631    int hugepg = (env->CP0_PWCtl >> CP0PC_HUGEPG) & 0x1;
 632    int pf_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F;
 633    uint32_t direntry_size = 1 << (directory_shift + 3);
 634    uint32_t leafentry_size = 1 << (leaf_shift + 3);
 635    uint64_t entry;
 636    uint64_t paddr;
 637    int prot;
 638    uint64_t lsb = 0;
 639    uint64_t w = 0;
 640
 641    if (get_physical_address(env, &paddr, &prot, *vaddr, MMU_DATA_LOAD,
 642                             cpu_mmu_index(env, false)) !=
 643                             TLBRET_MATCH) {
 644        /* wrong base address */
 645        return 0;
 646    }
 647    if (!get_pte(env, *vaddr, direntry_size, &entry)) {
 648        return 0;
 649    }
 650
 651    if ((entry & (1 << psn)) && hugepg) {
 652        *huge_page = true;
 653        *hgpg_directory_hit = true;
 654        entry = get_tlb_entry_layout(env, entry, leafentry_size, pf_ptew);
 655        w = directory_index - 1;
 656        if (directory_index & 0x1) {
 657            /* Generate adjacent page from same PTE for odd TLB page */
 658            lsb = BIT_ULL(w) >> 6;
 659            *pw_entrylo0 = entry & ~lsb; /* even page */
 660            *pw_entrylo1 = entry | lsb; /* odd page */
 661        } else if (dph) {
 662            int oddpagebit = 1 << leaf_shift;
 663            uint64_t vaddr2 = *vaddr ^ oddpagebit;
 664            if (*vaddr & oddpagebit) {
 665                *pw_entrylo1 = entry;
 666            } else {
 667                *pw_entrylo0 = entry;
 668            }
 669            if (get_physical_address(env, &paddr, &prot, vaddr2, MMU_DATA_LOAD,
 670                                     cpu_mmu_index(env, false)) !=
 671                                     TLBRET_MATCH) {
 672                return 0;
 673            }
 674            if (!get_pte(env, vaddr2, leafentry_size, &entry)) {
 675                return 0;
 676            }
 677            entry = get_tlb_entry_layout(env, entry, leafentry_size, pf_ptew);
 678            if (*vaddr & oddpagebit) {
 679                *pw_entrylo0 = entry;
 680            } else {
 681                *pw_entrylo1 = entry;
 682            }
 683        } else {
 684            return 0;
 685        }
 686        return 1;
 687    } else {
 688        *vaddr = entry;
 689        return 2;
 690    }
 691}
 692
 693static bool page_table_walk_refill(CPUMIPSState *env, vaddr address,
 694                                   int mmu_idx)
 695{
 696    int gdw = (env->CP0_PWSize >> CP0PS_GDW) & 0x3F;
 697    int udw = (env->CP0_PWSize >> CP0PS_UDW) & 0x3F;
 698    int mdw = (env->CP0_PWSize >> CP0PS_MDW) & 0x3F;
 699    int ptw = (env->CP0_PWSize >> CP0PS_PTW) & 0x3F;
 700    int ptew = (env->CP0_PWSize >> CP0PS_PTEW) & 0x3F;
 701
 702    /* Initial values */
 703    bool huge_page = false;
 704    bool hgpg_bdhit = false;
 705    bool hgpg_gdhit = false;
 706    bool hgpg_udhit = false;
 707    bool hgpg_mdhit = false;
 708
 709    int32_t pw_pagemask = 0;
 710    target_ulong pw_entryhi = 0;
 711    uint64_t pw_entrylo0 = 0;
 712    uint64_t pw_entrylo1 = 0;
 713
 714    /* Native pointer size */
 715    /*For the 32-bit architectures, this bit is fixed to 0.*/
 716    int native_shift = (((env->CP0_PWSize >> CP0PS_PS) & 1) == 0) ? 2 : 3;
 717
 718    /* Indices from PWField */
 719    int pf_gdw = (env->CP0_PWField >> CP0PF_GDW) & 0x3F;
 720    int pf_udw = (env->CP0_PWField >> CP0PF_UDW) & 0x3F;
 721    int pf_mdw = (env->CP0_PWField >> CP0PF_MDW) & 0x3F;
 722    int pf_ptw = (env->CP0_PWField >> CP0PF_PTW) & 0x3F;
 723    int pf_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F;
 724
 725    /* Indices computed from faulting address */
 726    int gindex = (address >> pf_gdw) & ((1 << gdw) - 1);
 727    int uindex = (address >> pf_udw) & ((1 << udw) - 1);
 728    int mindex = (address >> pf_mdw) & ((1 << mdw) - 1);
 729    int ptindex = (address >> pf_ptw) & ((1 << ptw) - 1);
 730
 731    /* Other HTW configs */
 732    int hugepg = (env->CP0_PWCtl >> CP0PC_HUGEPG) & 0x1;
 733    unsigned directory_shift, leaf_shift;
 734
 735    /* Offsets into tables */
 736    unsigned goffset, uoffset, moffset, ptoffset0, ptoffset1;
 737    uint32_t leafentry_size;
 738
 739    /* Starting address - Page Table Base */
 740    uint64_t vaddr = env->CP0_PWBase;
 741
 742    uint64_t dir_entry;
 743    uint64_t paddr;
 744    int prot;
 745    int m;
 746
 747    if (!(env->CP0_Config3 & (1 << CP0C3_PW))) {
 748        /* walker is unimplemented */
 749        return false;
 750    }
 751    if (!(env->CP0_PWCtl & (1 << CP0PC_PWEN))) {
 752        /* walker is disabled */
 753        return false;
 754    }
 755    if (!(gdw > 0 || udw > 0 || mdw > 0)) {
 756        /* no structure to walk */
 757        return false;
 758    }
 759    if (ptew > 1) {
 760        return false;
 761    }
 762
 763    /* HTW Shift values (depend on entry size) */
 764    directory_shift = (hugepg && (ptew == 1)) ? native_shift + 1 : native_shift;
 765    leaf_shift = (ptew == 1) ? native_shift + 1 : native_shift;
 766
 767    goffset = gindex << directory_shift;
 768    uoffset = uindex << directory_shift;
 769    moffset = mindex << directory_shift;
 770    ptoffset0 = (ptindex >> 1) << (leaf_shift + 1);
 771    ptoffset1 = ptoffset0 | (1 << (leaf_shift));
 772
 773    leafentry_size = 1 << (leaf_shift + 3);
 774
 775    /* Global Directory */
 776    if (gdw > 0) {
 777        vaddr |= goffset;
 778        switch (walk_directory(env, &vaddr, pf_gdw, &huge_page, &hgpg_gdhit,
 779                               &pw_entrylo0, &pw_entrylo1,
 780                               directory_shift, leaf_shift))
 781        {
 782        case 0:
 783            return false;
 784        case 1:
 785            goto refill;
 786        case 2:
 787        default:
 788            break;
 789        }
 790    }
 791
 792    /* Upper directory */
 793    if (udw > 0) {
 794        vaddr |= uoffset;
 795        switch (walk_directory(env, &vaddr, pf_udw, &huge_page, &hgpg_udhit,
 796                               &pw_entrylo0, &pw_entrylo1,
 797                               directory_shift, leaf_shift))
 798        {
 799        case 0:
 800            return false;
 801        case 1:
 802            goto refill;
 803        case 2:
 804        default:
 805            break;
 806        }
 807    }
 808
 809    /* Middle directory */
 810    if (mdw > 0) {
 811        vaddr |= moffset;
 812        switch (walk_directory(env, &vaddr, pf_mdw, &huge_page, &hgpg_mdhit,
 813                               &pw_entrylo0, &pw_entrylo1,
 814                               directory_shift, leaf_shift))
 815        {
 816        case 0:
 817            return false;
 818        case 1:
 819            goto refill;
 820        case 2:
 821        default:
 822            break;
 823        }
 824    }
 825
 826    /* Leaf Level Page Table - First half of PTE pair */
 827    vaddr |= ptoffset0;
 828    if (get_physical_address(env, &paddr, &prot, vaddr, MMU_DATA_LOAD,
 829                             cpu_mmu_index(env, false)) !=
 830                             TLBRET_MATCH) {
 831        return false;
 832    }
 833    if (!get_pte(env, vaddr, leafentry_size, &dir_entry)) {
 834        return false;
 835    }
 836    dir_entry = get_tlb_entry_layout(env, dir_entry, leafentry_size, pf_ptew);
 837    pw_entrylo0 = dir_entry;
 838
 839    /* Leaf Level Page Table - Second half of PTE pair */
 840    vaddr |= ptoffset1;
 841    if (get_physical_address(env, &paddr, &prot, vaddr, MMU_DATA_LOAD,
 842                             cpu_mmu_index(env, false)) !=
 843                             TLBRET_MATCH) {
 844        return false;
 845    }
 846    if (!get_pte(env, vaddr, leafentry_size, &dir_entry)) {
 847        return false;
 848    }
 849    dir_entry = get_tlb_entry_layout(env, dir_entry, leafentry_size, pf_ptew);
 850    pw_entrylo1 = dir_entry;
 851
 852refill:
 853
 854    m = (1 << pf_ptw) - 1;
 855
 856    if (huge_page) {
 857        switch (hgpg_bdhit << 3 | hgpg_gdhit << 2 | hgpg_udhit << 1 |
 858                hgpg_mdhit)
 859        {
 860        case 4:
 861            m = (1 << pf_gdw) - 1;
 862            if (pf_gdw & 1) {
 863                m >>= 1;
 864            }
 865            break;
 866        case 2:
 867            m = (1 << pf_udw) - 1;
 868            if (pf_udw & 1) {
 869                m >>= 1;
 870            }
 871            break;
 872        case 1:
 873            m = (1 << pf_mdw) - 1;
 874            if (pf_mdw & 1) {
 875                m >>= 1;
 876            }
 877            break;
 878        }
 879    }
 880    pw_pagemask = m >> TARGET_PAGE_BITS_MIN;
 881    update_pagemask(env, pw_pagemask << CP0PM_MASK, &pw_pagemask);
 882    pw_entryhi = (address & ~0x1fff) | (env->CP0_EntryHi & 0xFF);
 883    {
 884        target_ulong tmp_entryhi = env->CP0_EntryHi;
 885        int32_t tmp_pagemask = env->CP0_PageMask;
 886        uint64_t tmp_entrylo0 = env->CP0_EntryLo0;
 887        uint64_t tmp_entrylo1 = env->CP0_EntryLo1;
 888
 889        env->CP0_EntryHi = pw_entryhi;
 890        env->CP0_PageMask = pw_pagemask;
 891        env->CP0_EntryLo0 = pw_entrylo0;
 892        env->CP0_EntryLo1 = pw_entrylo1;
 893
 894        /*
 895         * The hardware page walker inserts a page into the TLB in a manner
 896         * identical to a TLBWR instruction as executed by the software refill
 897         * handler.
 898         */
 899        r4k_helper_tlbwr(env);
 900
 901        env->CP0_EntryHi = tmp_entryhi;
 902        env->CP0_PageMask = tmp_pagemask;
 903        env->CP0_EntryLo0 = tmp_entrylo0;
 904        env->CP0_EntryLo1 = tmp_entrylo1;
 905    }
 906    return true;
 907}
 908#endif
 909
 910bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
 911                       MMUAccessType access_type, int mmu_idx,
 912                       bool probe, uintptr_t retaddr)
 913{
 914    MIPSCPU *cpu = MIPS_CPU(cs);
 915    CPUMIPSState *env = &cpu->env;
 916    hwaddr physical;
 917    int prot;
 918    int ret = TLBRET_BADADDR;
 919
 920    /* data access */
 921    /* XXX: put correct access by using cpu_restore_state() correctly */
 922    ret = get_physical_address(env, &physical, &prot, address,
 923                               access_type, mmu_idx);
 924    switch (ret) {
 925    case TLBRET_MATCH:
 926        qemu_log_mask(CPU_LOG_MMU,
 927                      "%s address=%" VADDR_PRIx " physical " HWADDR_FMT_plx
 928                      " prot %d\n", __func__, address, physical, prot);
 929        break;
 930    default:
 931        qemu_log_mask(CPU_LOG_MMU,
 932                      "%s address=%" VADDR_PRIx " ret %d\n", __func__, address,
 933                      ret);
 934        break;
 935    }
 936    if (ret == TLBRET_MATCH) {
 937        tlb_set_page(cs, address & TARGET_PAGE_MASK,
 938                     physical & TARGET_PAGE_MASK, prot,
 939                     mmu_idx, TARGET_PAGE_SIZE);
 940        return true;
 941    }
 942#if !defined(TARGET_MIPS64)
 943    if ((ret == TLBRET_NOMATCH) && (env->tlb->nb_tlb > 1)) {
 944        /*
 945         * Memory reads during hardware page table walking are performed
 946         * as if they were kernel-mode load instructions.
 947         */
 948        int mode = (env->hflags & MIPS_HFLAG_KSU);
 949        bool ret_walker;
 950        env->hflags &= ~MIPS_HFLAG_KSU;
 951        ret_walker = page_table_walk_refill(env, address, mmu_idx);
 952        env->hflags |= mode;
 953        if (ret_walker) {
 954            ret = get_physical_address(env, &physical, &prot, address,
 955                                       access_type, mmu_idx);
 956            if (ret == TLBRET_MATCH) {
 957                tlb_set_page(cs, address & TARGET_PAGE_MASK,
 958                             physical & TARGET_PAGE_MASK, prot,
 959                             mmu_idx, TARGET_PAGE_SIZE);
 960                return true;
 961            }
 962        }
 963    }
 964#endif
 965    if (probe) {
 966        return false;
 967    }
 968
 969    raise_mmu_exception(env, address, access_type, ret);
 970    do_raise_exception_err(env, cs->exception_index, env->error_code, retaddr);
 971}
 972
 973hwaddr cpu_mips_translate_address(CPUMIPSState *env, target_ulong address,
 974                                  MMUAccessType access_type, uintptr_t retaddr)
 975{
 976    hwaddr physical;
 977    int prot;
 978    int ret = 0;
 979    CPUState *cs = env_cpu(env);
 980
 981    /* data access */
 982    ret = get_physical_address(env, &physical, &prot, address, access_type,
 983                               cpu_mmu_index(env, false));
 984    if (ret == TLBRET_MATCH) {
 985        return physical;
 986    }
 987
 988    raise_mmu_exception(env, address, access_type, ret);
 989    cpu_loop_exit_restore(cs, retaddr);
 990}
 991
 992static void set_hflags_for_handler(CPUMIPSState *env)
 993{
 994    /* Exception handlers are entered in 32-bit mode.  */
 995    env->hflags &= ~(MIPS_HFLAG_M16);
 996    /* ...except that microMIPS lets you choose.  */
 997    if (env->insn_flags & ASE_MICROMIPS) {
 998        env->hflags |= (!!(env->CP0_Config3 &
 999                           (1 << CP0C3_ISA_ON_EXC))
1000                        << MIPS_HFLAG_M16_SHIFT);
1001    }
1002}
1003
1004static inline void set_badinstr_registers(CPUMIPSState *env)
1005{
1006    if (env->insn_flags & ISA_NANOMIPS32) {
1007        if (env->CP0_Config3 & (1 << CP0C3_BI)) {
1008            uint32_t instr = (cpu_lduw_code(env, env->active_tc.PC)) << 16;
1009            if ((instr & 0x10000000) == 0) {
1010                instr |= cpu_lduw_code(env, env->active_tc.PC + 2);
1011            }
1012            env->CP0_BadInstr = instr;
1013
1014            if ((instr & 0xFC000000) == 0x60000000) {
1015                instr = cpu_lduw_code(env, env->active_tc.PC + 4) << 16;
1016                env->CP0_BadInstrX = instr;
1017            }
1018        }
1019        return;
1020    }
1021
1022    if (env->hflags & MIPS_HFLAG_M16) {
1023        /* TODO: add BadInstr support for microMIPS */
1024        return;
1025    }
1026    if (env->CP0_Config3 & (1 << CP0C3_BI)) {
1027        env->CP0_BadInstr = cpu_ldl_code(env, env->active_tc.PC);
1028    }
1029    if ((env->CP0_Config3 & (1 << CP0C3_BP)) &&
1030        (env->hflags & MIPS_HFLAG_BMASK)) {
1031        env->CP0_BadInstrP = cpu_ldl_code(env, env->active_tc.PC - 4);
1032    }
1033}
1034
1035void mips_cpu_do_interrupt(CPUState *cs)
1036{
1037    MIPSCPU *cpu = MIPS_CPU(cs);
1038    CPUMIPSState *env = &cpu->env;
1039    bool update_badinstr = 0;
1040    target_ulong offset;
1041    int cause = -1;
1042
1043    if (qemu_loglevel_mask(CPU_LOG_INT)
1044        && cs->exception_index != EXCP_EXT_INTERRUPT) {
1045        qemu_log("%s enter: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx
1046                 " %s exception\n",
1047                 __func__, env->active_tc.PC, env->CP0_EPC,
1048                 mips_exception_name(cs->exception_index));
1049    }
1050    if (cs->exception_index == EXCP_EXT_INTERRUPT &&
1051        (env->hflags & MIPS_HFLAG_DM)) {
1052        cs->exception_index = EXCP_DINT;
1053    }
1054    offset = 0x180;
1055    switch (cs->exception_index) {
1056    case EXCP_SEMIHOST:
1057        cs->exception_index = EXCP_NONE;
1058        mips_semihosting(env);
1059        env->active_tc.PC += env->error_code;
1060        return;
1061    case EXCP_DSS:
1062        env->CP0_Debug |= 1 << CP0DB_DSS;
1063        /*
1064         * Debug single step cannot be raised inside a delay slot and
1065         * resume will always occur on the next instruction
1066         * (but we assume the pc has always been updated during
1067         * code translation).
1068         */
1069        env->CP0_DEPC = env->active_tc.PC | !!(env->hflags & MIPS_HFLAG_M16);
1070        goto enter_debug_mode;
1071    case EXCP_DINT:
1072        env->CP0_Debug |= 1 << CP0DB_DINT;
1073        goto set_DEPC;
1074    case EXCP_DIB:
1075        env->CP0_Debug |= 1 << CP0DB_DIB;
1076        goto set_DEPC;
1077    case EXCP_DBp:
1078        env->CP0_Debug |= 1 << CP0DB_DBp;
1079        /* Setup DExcCode - SDBBP instruction */
1080        env->CP0_Debug = (env->CP0_Debug & ~(0x1fULL << CP0DB_DEC)) |
1081                         (9 << CP0DB_DEC);
1082        goto set_DEPC;
1083    case EXCP_DDBS:
1084        env->CP0_Debug |= 1 << CP0DB_DDBS;
1085        goto set_DEPC;
1086    case EXCP_DDBL:
1087        env->CP0_Debug |= 1 << CP0DB_DDBL;
1088    set_DEPC:
1089        env->CP0_DEPC = exception_resume_pc(env);
1090        env->hflags &= ~MIPS_HFLAG_BMASK;
1091 enter_debug_mode:
1092        if (env->insn_flags & ISA_MIPS3) {
1093            env->hflags |= MIPS_HFLAG_64;
1094            if (!(env->insn_flags & ISA_MIPS_R6) ||
1095                env->CP0_Status & (1 << CP0St_KX)) {
1096                env->hflags &= ~MIPS_HFLAG_AWRAP;
1097            }
1098        }
1099        env->hflags |= MIPS_HFLAG_DM | MIPS_HFLAG_CP0;
1100        env->hflags &= ~(MIPS_HFLAG_KSU);
1101        /* EJTAG probe trap enable is not implemented... */
1102        if (!(env->CP0_Status & (1 << CP0St_EXL))) {
1103            env->CP0_Cause &= ~(1U << CP0Ca_BD);
1104        }
1105        env->active_tc.PC = env->exception_base + 0x480;
1106        set_hflags_for_handler(env);
1107        break;
1108    case EXCP_RESET:
1109        cpu_reset(CPU(cpu));
1110        break;
1111    case EXCP_SRESET:
1112        env->CP0_Status |= (1 << CP0St_SR);
1113        memset(env->CP0_WatchLo, 0, sizeof(env->CP0_WatchLo));
1114        goto set_error_EPC;
1115    case EXCP_NMI:
1116        env->CP0_Status |= (1 << CP0St_NMI);
1117 set_error_EPC:
1118        env->CP0_ErrorEPC = exception_resume_pc(env);
1119        env->hflags &= ~MIPS_HFLAG_BMASK;
1120        env->CP0_Status |= (1 << CP0St_ERL) | (1 << CP0St_BEV);
1121        if (env->insn_flags & ISA_MIPS3) {
1122            env->hflags |= MIPS_HFLAG_64;
1123            if (!(env->insn_flags & ISA_MIPS_R6) ||
1124                env->CP0_Status & (1 << CP0St_KX)) {
1125                env->hflags &= ~MIPS_HFLAG_AWRAP;
1126            }
1127        }
1128        env->hflags |= MIPS_HFLAG_CP0;
1129        env->hflags &= ~(MIPS_HFLAG_KSU);
1130        if (!(env->CP0_Status & (1 << CP0St_EXL))) {
1131            env->CP0_Cause &= ~(1U << CP0Ca_BD);
1132        }
1133        env->active_tc.PC = env->exception_base;
1134        set_hflags_for_handler(env);
1135        break;
1136    case EXCP_EXT_INTERRUPT:
1137        cause = 0;
1138        if (env->CP0_Cause & (1 << CP0Ca_IV)) {
1139            uint32_t spacing = (env->CP0_IntCtl >> CP0IntCtl_VS) & 0x1f;
1140
1141            if ((env->CP0_Status & (1 << CP0St_BEV)) || spacing == 0) {
1142                offset = 0x200;
1143            } else {
1144                uint32_t vector = 0;
1145                uint32_t pending = (env->CP0_Cause & CP0Ca_IP_mask) >> CP0Ca_IP;
1146
1147                if (env->CP0_Config3 & (1 << CP0C3_VEIC)) {
1148                    /*
1149                     * For VEIC mode, the external interrupt controller feeds
1150                     * the vector through the CP0Cause IP lines.
1151                     */
1152                    vector = pending;
1153                } else {
1154                    /*
1155                     * Vectored Interrupts
1156                     * Mask with Status.IM7-IM0 to get enabled interrupts.
1157                     */
1158                    pending &= (env->CP0_Status >> CP0St_IM) & 0xff;
1159                    /* Find the highest-priority interrupt. */
1160                    while (pending >>= 1) {
1161                        vector++;
1162                    }
1163                }
1164                offset = 0x200 + (vector * (spacing << 5));
1165            }
1166        }
1167        goto set_EPC;
1168    case EXCP_LTLBL:
1169        cause = 1;
1170        update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
1171        goto set_EPC;
1172    case EXCP_TLBL:
1173        cause = 2;
1174        update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
1175        if ((env->error_code & EXCP_TLB_NOMATCH) &&
1176            !(env->CP0_Status & (1 << CP0St_EXL))) {
1177#if defined(TARGET_MIPS64)
1178            int R = env->CP0_BadVAddr >> 62;
1179            int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0;
1180            int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0;
1181
1182            if ((R != 0 || UX) && (R != 3 || KX) &&
1183                (!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)))) {
1184                offset = 0x080;
1185            } else {
1186#endif
1187                offset = 0x000;
1188#if defined(TARGET_MIPS64)
1189            }
1190#endif
1191        }
1192        goto set_EPC;
1193    case EXCP_TLBS:
1194        cause = 3;
1195        update_badinstr = 1;
1196        if ((env->error_code & EXCP_TLB_NOMATCH) &&
1197            !(env->CP0_Status & (1 << CP0St_EXL))) {
1198#if defined(TARGET_MIPS64)
1199            int R = env->CP0_BadVAddr >> 62;
1200            int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0;
1201            int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0;
1202
1203            if ((R != 0 || UX) && (R != 3 || KX) &&
1204                (!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)))) {
1205                offset = 0x080;
1206            } else {
1207#endif
1208                offset = 0x000;
1209#if defined(TARGET_MIPS64)
1210            }
1211#endif
1212        }
1213        goto set_EPC;
1214    case EXCP_AdEL:
1215        cause = 4;
1216        update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
1217        goto set_EPC;
1218    case EXCP_AdES:
1219        cause = 5;
1220        update_badinstr = 1;
1221        goto set_EPC;
1222    case EXCP_IBE:
1223        cause = 6;
1224        goto set_EPC;
1225    case EXCP_DBE:
1226        cause = 7;
1227        goto set_EPC;
1228    case EXCP_SYSCALL:
1229        cause = 8;
1230        update_badinstr = 1;
1231        goto set_EPC;
1232    case EXCP_BREAK:
1233        cause = 9;
1234        update_badinstr = 1;
1235        goto set_EPC;
1236    case EXCP_RI:
1237        cause = 10;
1238        update_badinstr = 1;
1239        goto set_EPC;
1240    case EXCP_CpU:
1241        cause = 11;
1242        update_badinstr = 1;
1243        env->CP0_Cause = (env->CP0_Cause & ~(0x3 << CP0Ca_CE)) |
1244                         (env->error_code << CP0Ca_CE);
1245        goto set_EPC;
1246    case EXCP_OVERFLOW:
1247        cause = 12;
1248        update_badinstr = 1;
1249        goto set_EPC;
1250    case EXCP_TRAP:
1251        cause = 13;
1252        update_badinstr = 1;
1253        goto set_EPC;
1254    case EXCP_MSAFPE:
1255        cause = 14;
1256        update_badinstr = 1;
1257        goto set_EPC;
1258    case EXCP_FPE:
1259        cause = 15;
1260        update_badinstr = 1;
1261        goto set_EPC;
1262    case EXCP_C2E:
1263        cause = 18;
1264        goto set_EPC;
1265    case EXCP_TLBRI:
1266        cause = 19;
1267        update_badinstr = 1;
1268        goto set_EPC;
1269    case EXCP_TLBXI:
1270        cause = 20;
1271        goto set_EPC;
1272    case EXCP_MSADIS:
1273        cause = 21;
1274        update_badinstr = 1;
1275        goto set_EPC;
1276    case EXCP_MDMX:
1277        cause = 22;
1278        goto set_EPC;
1279    case EXCP_DWATCH:
1280        cause = 23;
1281        /* XXX: TODO: manage deferred watch exceptions */
1282        goto set_EPC;
1283    case EXCP_MCHECK:
1284        cause = 24;
1285        goto set_EPC;
1286    case EXCP_THREAD:
1287        cause = 25;
1288        goto set_EPC;
1289    case EXCP_DSPDIS:
1290        cause = 26;
1291        goto set_EPC;
1292    case EXCP_CACHE:
1293        cause = 30;
1294        offset = 0x100;
1295 set_EPC:
1296        if (!(env->CP0_Status & (1 << CP0St_EXL))) {
1297            env->CP0_EPC = exception_resume_pc(env);
1298            if (update_badinstr) {
1299                set_badinstr_registers(env);
1300            }
1301            if (env->hflags & MIPS_HFLAG_BMASK) {
1302                env->CP0_Cause |= (1U << CP0Ca_BD);
1303            } else {
1304                env->CP0_Cause &= ~(1U << CP0Ca_BD);
1305            }
1306            env->CP0_Status |= (1 << CP0St_EXL);
1307            if (env->insn_flags & ISA_MIPS3) {
1308                env->hflags |= MIPS_HFLAG_64;
1309                if (!(env->insn_flags & ISA_MIPS_R6) ||
1310                    env->CP0_Status & (1 << CP0St_KX)) {
1311                    env->hflags &= ~MIPS_HFLAG_AWRAP;
1312                }
1313            }
1314            env->hflags |= MIPS_HFLAG_CP0;
1315            env->hflags &= ~(MIPS_HFLAG_KSU);
1316        }
1317        env->hflags &= ~MIPS_HFLAG_BMASK;
1318        if (env->CP0_Status & (1 << CP0St_BEV)) {
1319            env->active_tc.PC = env->exception_base + 0x200;
1320        } else if (cause == 30 && !(env->CP0_Config3 & (1 << CP0C3_SC) &&
1321                                    env->CP0_Config5 & (1 << CP0C5_CV))) {
1322            /* Force KSeg1 for cache errors */
1323            env->active_tc.PC = KSEG1_BASE | (env->CP0_EBase & 0x1FFFF000);
1324        } else {
1325            env->active_tc.PC = env->CP0_EBase & ~0xfff;
1326        }
1327
1328        env->active_tc.PC += offset;
1329        set_hflags_for_handler(env);
1330        env->CP0_Cause = (env->CP0_Cause & ~(0x1f << CP0Ca_EC)) |
1331                         (cause << CP0Ca_EC);
1332        break;
1333    default:
1334        abort();
1335    }
1336    if (qemu_loglevel_mask(CPU_LOG_INT)
1337        && cs->exception_index != EXCP_EXT_INTERRUPT) {
1338        qemu_log("%s: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx " cause %d\n"
1339                 "    S %08x C %08x A " TARGET_FMT_lx " D " TARGET_FMT_lx "\n",
1340                 __func__, env->active_tc.PC, env->CP0_EPC, cause,
1341                 env->CP0_Status, env->CP0_Cause, env->CP0_BadVAddr,
1342                 env->CP0_DEPC);
1343    }
1344    cs->exception_index = EXCP_NONE;
1345}
1346
1347bool mips_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1348{
1349    if (interrupt_request & CPU_INTERRUPT_HARD) {
1350        MIPSCPU *cpu = MIPS_CPU(cs);
1351        CPUMIPSState *env = &cpu->env;
1352
1353        if (cpu_mips_hw_interrupts_enabled(env) &&
1354            cpu_mips_hw_interrupts_pending(env)) {
1355            /* Raise it */
1356            cs->exception_index = EXCP_EXT_INTERRUPT;
1357            env->error_code = 0;
1358            mips_cpu_do_interrupt(cs);
1359            return true;
1360        }
1361    }
1362    return false;
1363}
1364
1365void r4k_invalidate_tlb(CPUMIPSState *env, int idx, int use_extra)
1366{
1367    CPUState *cs = env_cpu(env);
1368    r4k_tlb_t *tlb;
1369    target_ulong addr;
1370    target_ulong end;
1371    uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
1372    uint32_t MMID = env->CP0_MemoryMapID;
1373    bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
1374    uint32_t tlb_mmid;
1375    target_ulong mask;
1376
1377    MMID = mi ? MMID : (uint32_t) ASID;
1378
1379    tlb = &env->tlb->mmu.r4k.tlb[idx];
1380    /*
1381     * The qemu TLB is flushed when the ASID/MMID changes, so no need to
1382     * flush these entries again.
1383     */
1384    tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
1385    if (tlb->G == 0 && tlb_mmid != MMID) {
1386        return;
1387    }
1388
1389    if (use_extra && env->tlb->tlb_in_use < MIPS_TLB_MAX) {
1390        /*
1391         * For tlbwr, we can shadow the discarded entry into
1392         * a new (fake) TLB entry, as long as the guest can not
1393         * tell that it's there.
1394         */
1395        env->tlb->mmu.r4k.tlb[env->tlb->tlb_in_use] = *tlb;
1396        env->tlb->tlb_in_use++;
1397        return;
1398    }
1399
1400    /* 1k pages are not supported. */
1401    mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1402    if (tlb->V0) {
1403        addr = tlb->VPN & ~mask;
1404#if defined(TARGET_MIPS64)
1405        if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) {
1406            addr |= 0x3FFFFF0000000000ULL;
1407        }
1408#endif
1409        end = addr | (mask >> 1);
1410        while (addr < end) {
1411            tlb_flush_page(cs, addr);
1412            addr += TARGET_PAGE_SIZE;
1413        }
1414    }
1415    if (tlb->V1) {
1416        addr = (tlb->VPN & ~mask) | ((mask >> 1) + 1);
1417#if defined(TARGET_MIPS64)
1418        if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) {
1419            addr |= 0x3FFFFF0000000000ULL;
1420        }
1421#endif
1422        end = addr | mask;
1423        while (addr - 1 < end) {
1424            tlb_flush_page(cs, addr);
1425            addr += TARGET_PAGE_SIZE;
1426        }
1427    }
1428}
1429