qemu/target/mips/helper.c
<<
>>
Prefs
   1/*
   2 *  MIPS emulation helpers for qemu.
   3 *
   4 *  Copyright (c) 2004-2005 Jocelyn Mayer
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#include "qemu/osdep.h"
  20
  21#include "cpu.h"
  22#include "exec/exec-all.h"
  23#include "exec/cpu_ldst.h"
  24#include "exec/log.h"
  25#include "hw/mips/cpudevs.h"
  26
  27enum {
  28    TLBRET_XI = -6,
  29    TLBRET_RI = -5,
  30    TLBRET_DIRTY = -4,
  31    TLBRET_INVALID = -3,
  32    TLBRET_NOMATCH = -2,
  33    TLBRET_BADADDR = -1,
  34    TLBRET_MATCH = 0
  35};
  36
  37#if !defined(CONFIG_USER_ONLY)
  38
  39/* no MMU emulation */
  40int no_mmu_map_address (CPUMIPSState *env, hwaddr *physical, int *prot,
  41                        target_ulong address, int rw, int access_type)
  42{
  43    *physical = address;
  44    *prot = PAGE_READ | PAGE_WRITE;
  45    return TLBRET_MATCH;
  46}
  47
  48/* fixed mapping MMU emulation */
  49int fixed_mmu_map_address (CPUMIPSState *env, hwaddr *physical, int *prot,
  50                           target_ulong address, int rw, int access_type)
  51{
  52    if (address <= (int32_t)0x7FFFFFFFUL) {
  53        if (!(env->CP0_Status & (1 << CP0St_ERL)))
  54            *physical = address + 0x40000000UL;
  55        else
  56            *physical = address;
  57    } else if (address <= (int32_t)0xBFFFFFFFUL)
  58        *physical = address & 0x1FFFFFFF;
  59    else
  60        *physical = address;
  61
  62    *prot = PAGE_READ | PAGE_WRITE;
  63    return TLBRET_MATCH;
  64}
  65
  66/* MIPS32/MIPS64 R4000-style MMU emulation */
  67int r4k_map_address (CPUMIPSState *env, hwaddr *physical, int *prot,
  68                     target_ulong address, int rw, int access_type)
  69{
  70    uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
  71    int i;
  72
  73    for (i = 0; i < env->tlb->tlb_in_use; i++) {
  74        r4k_tlb_t *tlb = &env->tlb->mmu.r4k.tlb[i];
  75        /* 1k pages are not supported. */
  76        target_ulong mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
  77        target_ulong tag = address & ~mask;
  78        target_ulong VPN = tlb->VPN & ~mask;
  79#if defined(TARGET_MIPS64)
  80        tag &= env->SEGMask;
  81#endif
  82
  83        /* Check ASID, virtual page number & size */
  84        if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag && !tlb->EHINV) {
  85            /* TLB match */
  86            int n = !!(address & mask & ~(mask >> 1));
  87            /* Check access rights */
  88            if (!(n ? tlb->V1 : tlb->V0)) {
  89                return TLBRET_INVALID;
  90            }
  91            if (rw == MMU_INST_FETCH && (n ? tlb->XI1 : tlb->XI0)) {
  92                return TLBRET_XI;
  93            }
  94            if (rw == MMU_DATA_LOAD && (n ? tlb->RI1 : tlb->RI0)) {
  95                return TLBRET_RI;
  96            }
  97            if (rw != MMU_DATA_STORE || (n ? tlb->D1 : tlb->D0)) {
  98                *physical = tlb->PFN[n] | (address & (mask >> 1));
  99                *prot = PAGE_READ;
 100                if (n ? tlb->D1 : tlb->D0)
 101                    *prot |= PAGE_WRITE;
 102                return TLBRET_MATCH;
 103            }
 104            return TLBRET_DIRTY;
 105        }
 106    }
 107    return TLBRET_NOMATCH;
 108}
 109
 110static int is_seg_am_mapped(unsigned int am, bool eu, int mmu_idx)
 111{
 112    /*
 113     * Interpret access control mode and mmu_idx.
 114     *           AdE?     TLB?
 115     *      AM  K S U E  K S U E
 116     * UK    0  0 1 1 0  0 - - 0
 117     * MK    1  0 1 1 0  1 - - !eu
 118     * MSK   2  0 0 1 0  1 1 - !eu
 119     * MUSK  3  0 0 0 0  1 1 1 !eu
 120     * MUSUK 4  0 0 0 0  0 1 1 0
 121     * USK   5  0 0 1 0  0 0 - 0
 122     * -     6  - - - -  - - - -
 123     * UUSK  7  0 0 0 0  0 0 0 0
 124     */
 125    int32_t adetlb_mask;
 126
 127    switch (mmu_idx) {
 128    case 3 /* ERL */:
 129        /* If EU is set, always unmapped */
 130        if (eu) {
 131            return 0;
 132        }
 133        /* fall through */
 134    case MIPS_HFLAG_KM:
 135        /* Never AdE, TLB mapped if AM={1,2,3} */
 136        adetlb_mask = 0x70000000;
 137        goto check_tlb;
 138
 139    case MIPS_HFLAG_SM:
 140        /* AdE if AM={0,1}, TLB mapped if AM={2,3,4} */
 141        adetlb_mask = 0xc0380000;
 142        goto check_ade;
 143
 144    case MIPS_HFLAG_UM:
 145        /* AdE if AM={0,1,2,5}, TLB mapped if AM={3,4} */
 146        adetlb_mask = 0xe4180000;
 147        /* fall through */
 148    check_ade:
 149        /* does this AM cause AdE in current execution mode */
 150        if ((adetlb_mask << am) < 0) {
 151            return TLBRET_BADADDR;
 152        }
 153        adetlb_mask <<= 8;
 154        /* fall through */
 155    check_tlb:
 156        /* is this AM mapped in current execution mode */
 157        return ((adetlb_mask << am) < 0);
 158    default:
 159        assert(0);
 160        return TLBRET_BADADDR;
 161    };
 162}
 163
 164static int get_seg_physical_address(CPUMIPSState *env, hwaddr *physical,
 165                                    int *prot, target_ulong real_address,
 166                                    int rw, int access_type, int mmu_idx,
 167                                    unsigned int am, bool eu,
 168                                    target_ulong segmask,
 169                                    hwaddr physical_base)
 170{
 171    int mapped = is_seg_am_mapped(am, eu, mmu_idx);
 172
 173    if (mapped < 0) {
 174        /* is_seg_am_mapped can report TLBRET_BADADDR */
 175        return mapped;
 176    } else if (mapped) {
 177        /* The segment is TLB mapped */
 178        return env->tlb->map_address(env, physical, prot, real_address, rw,
 179                                     access_type);
 180    } else {
 181        /* The segment is unmapped */
 182        *physical = physical_base | (real_address & segmask);
 183        *prot = PAGE_READ | PAGE_WRITE;
 184        return TLBRET_MATCH;
 185    }
 186}
 187
 188static int get_segctl_physical_address(CPUMIPSState *env, hwaddr *physical,
 189                                       int *prot, target_ulong real_address,
 190                                       int rw, int access_type, int mmu_idx,
 191                                       uint16_t segctl, target_ulong segmask)
 192{
 193    unsigned int am = (segctl & CP0SC_AM_MASK) >> CP0SC_AM;
 194    bool eu = (segctl >> CP0SC_EU) & 1;
 195    hwaddr pa = ((hwaddr)segctl & CP0SC_PA_MASK) << 20;
 196
 197    return get_seg_physical_address(env, physical, prot, real_address, rw,
 198                                    access_type, mmu_idx, am, eu, segmask,
 199                                    pa & ~(hwaddr)segmask);
 200}
 201
 202static int get_physical_address (CPUMIPSState *env, hwaddr *physical,
 203                                int *prot, target_ulong real_address,
 204                                int rw, int access_type, int mmu_idx)
 205{
 206    /* User mode can only access useg/xuseg */
 207#if defined(TARGET_MIPS64)
 208    int user_mode = mmu_idx == MIPS_HFLAG_UM;
 209    int supervisor_mode = mmu_idx == MIPS_HFLAG_SM;
 210    int kernel_mode = !user_mode && !supervisor_mode;
 211    int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0;
 212    int SX = (env->CP0_Status & (1 << CP0St_SX)) != 0;
 213    int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0;
 214#endif
 215    int ret = TLBRET_MATCH;
 216    /* effective address (modified for KVM T&E kernel segments) */
 217    target_ulong address = real_address;
 218
 219#define USEG_LIMIT      ((target_ulong)(int32_t)0x7FFFFFFFUL)
 220#define KSEG0_BASE      ((target_ulong)(int32_t)0x80000000UL)
 221#define KSEG1_BASE      ((target_ulong)(int32_t)0xA0000000UL)
 222#define KSEG2_BASE      ((target_ulong)(int32_t)0xC0000000UL)
 223#define KSEG3_BASE      ((target_ulong)(int32_t)0xE0000000UL)
 224
 225#define KVM_KSEG0_BASE  ((target_ulong)(int32_t)0x40000000UL)
 226#define KVM_KSEG2_BASE  ((target_ulong)(int32_t)0x60000000UL)
 227
 228    if (mips_um_ksegs_enabled()) {
 229        /* KVM T&E adds guest kernel segments in useg */
 230        if (real_address >= KVM_KSEG0_BASE) {
 231            if (real_address < KVM_KSEG2_BASE) {
 232                /* kseg0 */
 233                address += KSEG0_BASE - KVM_KSEG0_BASE;
 234            } else if (real_address <= USEG_LIMIT) {
 235                /* kseg2/3 */
 236                address += KSEG2_BASE - KVM_KSEG2_BASE;
 237            }
 238        }
 239    }
 240
 241    if (address <= USEG_LIMIT) {
 242        /* useg */
 243        uint16_t segctl;
 244
 245        if (address >= 0x40000000UL) {
 246            segctl = env->CP0_SegCtl2;
 247        } else {
 248            segctl = env->CP0_SegCtl2 >> 16;
 249        }
 250        ret = get_segctl_physical_address(env, physical, prot, real_address, rw,
 251                                          access_type, mmu_idx, segctl,
 252                                          0x3FFFFFFF);
 253#if defined(TARGET_MIPS64)
 254    } else if (address < 0x4000000000000000ULL) {
 255        /* xuseg */
 256        if (UX && address <= (0x3FFFFFFFFFFFFFFFULL & env->SEGMask)) {
 257            ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type);
 258        } else {
 259            ret = TLBRET_BADADDR;
 260        }
 261    } else if (address < 0x8000000000000000ULL) {
 262        /* xsseg */
 263        if ((supervisor_mode || kernel_mode) &&
 264            SX && address <= (0x7FFFFFFFFFFFFFFFULL & env->SEGMask)) {
 265            ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type);
 266        } else {
 267            ret = TLBRET_BADADDR;
 268        }
 269    } else if (address < 0xC000000000000000ULL) {
 270        /* xkphys */
 271        if ((address & 0x07FFFFFFFFFFFFFFULL) <= env->PAMask) {
 272            /* KX/SX/UX bit to check for each xkphys EVA access mode */
 273            static const uint8_t am_ksux[8] = {
 274                [CP0SC_AM_UK]    = (1u << CP0St_KX),
 275                [CP0SC_AM_MK]    = (1u << CP0St_KX),
 276                [CP0SC_AM_MSK]   = (1u << CP0St_SX),
 277                [CP0SC_AM_MUSK]  = (1u << CP0St_UX),
 278                [CP0SC_AM_MUSUK] = (1u << CP0St_UX),
 279                [CP0SC_AM_USK]   = (1u << CP0St_SX),
 280                [6]              = (1u << CP0St_KX),
 281                [CP0SC_AM_UUSK]  = (1u << CP0St_UX),
 282            };
 283            unsigned int am = CP0SC_AM_UK;
 284            unsigned int xr = (env->CP0_SegCtl2 & CP0SC2_XR_MASK) >> CP0SC2_XR;
 285
 286            if (xr & (1 << ((address >> 59) & 0x7))) {
 287                am = (env->CP0_SegCtl1 & CP0SC1_XAM_MASK) >> CP0SC1_XAM;
 288            }
 289            /* Does CP0_Status.KX/SX/UX permit the access mode (am) */
 290            if (env->CP0_Status & am_ksux[am]) {
 291                ret = get_seg_physical_address(env, physical, prot,
 292                                               real_address, rw, access_type,
 293                                               mmu_idx, am, false, env->PAMask,
 294                                               0);
 295            } else {
 296                ret = TLBRET_BADADDR;
 297            }
 298        } else {
 299            ret = TLBRET_BADADDR;
 300        }
 301    } else if (address < 0xFFFFFFFF80000000ULL) {
 302        /* xkseg */
 303        if (kernel_mode && KX &&
 304            address <= (0xFFFFFFFF7FFFFFFFULL & env->SEGMask)) {
 305            ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type);
 306        } else {
 307            ret = TLBRET_BADADDR;
 308        }
 309#endif
 310    } else if (address < KSEG1_BASE) {
 311        /* kseg0 */
 312        ret = get_segctl_physical_address(env, physical, prot, real_address, rw,
 313                                          access_type, mmu_idx,
 314                                          env->CP0_SegCtl1 >> 16, 0x1FFFFFFF);
 315    } else if (address < KSEG2_BASE) {
 316        /* kseg1 */
 317        ret = get_segctl_physical_address(env, physical, prot, real_address, rw,
 318                                          access_type, mmu_idx,
 319                                          env->CP0_SegCtl1, 0x1FFFFFFF);
 320    } else if (address < KSEG3_BASE) {
 321        /* sseg (kseg2) */
 322        ret = get_segctl_physical_address(env, physical, prot, real_address, rw,
 323                                          access_type, mmu_idx,
 324                                          env->CP0_SegCtl0 >> 16, 0x1FFFFFFF);
 325    } else {
 326        /* kseg3 */
 327        /* XXX: debug segment is not emulated */
 328        ret = get_segctl_physical_address(env, physical, prot, real_address, rw,
 329                                          access_type, mmu_idx,
 330                                          env->CP0_SegCtl0, 0x1FFFFFFF);
 331    }
 332    return ret;
 333}
 334
 335void cpu_mips_tlb_flush(CPUMIPSState *env)
 336{
 337    MIPSCPU *cpu = mips_env_get_cpu(env);
 338
 339    /* Flush qemu's TLB and discard all shadowed entries.  */
 340    tlb_flush(CPU(cpu));
 341    env->tlb->tlb_in_use = env->tlb->nb_tlb;
 342}
 343
 344/* Called for updates to CP0_Status.  */
 345void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu, int tc)
 346{
 347    int32_t tcstatus, *tcst;
 348    uint32_t v = cpu->CP0_Status;
 349    uint32_t cu, mx, asid, ksu;
 350    uint32_t mask = ((1 << CP0TCSt_TCU3)
 351                       | (1 << CP0TCSt_TCU2)
 352                       | (1 << CP0TCSt_TCU1)
 353                       | (1 << CP0TCSt_TCU0)
 354                       | (1 << CP0TCSt_TMX)
 355                       | (3 << CP0TCSt_TKSU)
 356                       | (0xff << CP0TCSt_TASID));
 357
 358    cu = (v >> CP0St_CU0) & 0xf;
 359    mx = (v >> CP0St_MX) & 0x1;
 360    ksu = (v >> CP0St_KSU) & 0x3;
 361    asid = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
 362
 363    tcstatus = cu << CP0TCSt_TCU0;
 364    tcstatus |= mx << CP0TCSt_TMX;
 365    tcstatus |= ksu << CP0TCSt_TKSU;
 366    tcstatus |= asid;
 367
 368    if (tc == cpu->current_tc) {
 369        tcst = &cpu->active_tc.CP0_TCStatus;
 370    } else {
 371        tcst = &cpu->tcs[tc].CP0_TCStatus;
 372    }
 373
 374    *tcst &= ~mask;
 375    *tcst |= tcstatus;
 376    compute_hflags(cpu);
 377}
 378
 379void cpu_mips_store_status(CPUMIPSState *env, target_ulong val)
 380{
 381    uint32_t mask = env->CP0_Status_rw_bitmask;
 382    target_ulong old = env->CP0_Status;
 383
 384    if (env->insn_flags & ISA_MIPS32R6) {
 385        bool has_supervisor = extract32(mask, CP0St_KSU, 2) == 0x3;
 386#if defined(TARGET_MIPS64)
 387        uint32_t ksux = (1 << CP0St_KX) & val;
 388        ksux |= (ksux >> 1) & val; /* KX = 0 forces SX to be 0 */
 389        ksux |= (ksux >> 1) & val; /* SX = 0 forces UX to be 0 */
 390        val = (val & ~(7 << CP0St_UX)) | ksux;
 391#endif
 392        if (has_supervisor && extract32(val, CP0St_KSU, 2) == 0x3) {
 393            mask &= ~(3 << CP0St_KSU);
 394        }
 395        mask &= ~(((1 << CP0St_SR) | (1 << CP0St_NMI)) & val);
 396    }
 397
 398    env->CP0_Status = (old & ~mask) | (val & mask);
 399#if defined(TARGET_MIPS64)
 400    if ((env->CP0_Status ^ old) & (old & (7 << CP0St_UX))) {
 401        /* Access to at least one of the 64-bit segments has been disabled */
 402        tlb_flush(CPU(mips_env_get_cpu(env)));
 403    }
 404#endif
 405    if (env->CP0_Config3 & (1 << CP0C3_MT)) {
 406        sync_c0_status(env, env, env->current_tc);
 407    } else {
 408        compute_hflags(env);
 409    }
 410}
 411
 412void cpu_mips_store_cause(CPUMIPSState *env, target_ulong val)
 413{
 414    uint32_t mask = 0x00C00300;
 415    uint32_t old = env->CP0_Cause;
 416    int i;
 417
 418    if (env->insn_flags & ISA_MIPS32R2) {
 419        mask |= 1 << CP0Ca_DC;
 420    }
 421    if (env->insn_flags & ISA_MIPS32R6) {
 422        mask &= ~((1 << CP0Ca_WP) & val);
 423    }
 424
 425    env->CP0_Cause = (env->CP0_Cause & ~mask) | (val & mask);
 426
 427    if ((old ^ env->CP0_Cause) & (1 << CP0Ca_DC)) {
 428        if (env->CP0_Cause & (1 << CP0Ca_DC)) {
 429            cpu_mips_stop_count(env);
 430        } else {
 431            cpu_mips_start_count(env);
 432        }
 433    }
 434
 435    /* Set/reset software interrupts */
 436    for (i = 0 ; i < 2 ; i++) {
 437        if ((old ^ env->CP0_Cause) & (1 << (CP0Ca_IP + i))) {
 438            cpu_mips_soft_irq(env, i, env->CP0_Cause & (1 << (CP0Ca_IP + i)));
 439        }
 440    }
 441}
 442#endif
 443
 444static void raise_mmu_exception(CPUMIPSState *env, target_ulong address,
 445                                int rw, int tlb_error)
 446{
 447    CPUState *cs = CPU(mips_env_get_cpu(env));
 448    int exception = 0, error_code = 0;
 449
 450    if (rw == MMU_INST_FETCH) {
 451        error_code |= EXCP_INST_NOTAVAIL;
 452    }
 453
 454    switch (tlb_error) {
 455    default:
 456    case TLBRET_BADADDR:
 457        /* Reference to kernel address from user mode or supervisor mode */
 458        /* Reference to supervisor address from user mode */
 459        if (rw == MMU_DATA_STORE) {
 460            exception = EXCP_AdES;
 461        } else {
 462            exception = EXCP_AdEL;
 463        }
 464        break;
 465    case TLBRET_NOMATCH:
 466        /* No TLB match for a mapped address */
 467        if (rw == MMU_DATA_STORE) {
 468            exception = EXCP_TLBS;
 469        } else {
 470            exception = EXCP_TLBL;
 471        }
 472        error_code |= EXCP_TLB_NOMATCH;
 473        break;
 474    case TLBRET_INVALID:
 475        /* TLB match with no valid bit */
 476        if (rw == MMU_DATA_STORE) {
 477            exception = EXCP_TLBS;
 478        } else {
 479            exception = EXCP_TLBL;
 480        }
 481        break;
 482    case TLBRET_DIRTY:
 483        /* TLB match but 'D' bit is cleared */
 484        exception = EXCP_LTLBL;
 485        break;
 486    case TLBRET_XI:
 487        /* Execute-Inhibit Exception */
 488        if (env->CP0_PageGrain & (1 << CP0PG_IEC)) {
 489            exception = EXCP_TLBXI;
 490        } else {
 491            exception = EXCP_TLBL;
 492        }
 493        break;
 494    case TLBRET_RI:
 495        /* Read-Inhibit Exception */
 496        if (env->CP0_PageGrain & (1 << CP0PG_IEC)) {
 497            exception = EXCP_TLBRI;
 498        } else {
 499            exception = EXCP_TLBL;
 500        }
 501        break;
 502    }
 503    /* Raise exception */
 504    env->CP0_BadVAddr = address;
 505    env->CP0_Context = (env->CP0_Context & ~0x007fffff) |
 506                       ((address >> 9) & 0x007ffff0);
 507    env->CP0_EntryHi = (env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask) |
 508                       (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) |
 509                       (address & (TARGET_PAGE_MASK << 1));
 510#if defined(TARGET_MIPS64)
 511    env->CP0_EntryHi &= env->SEGMask;
 512    env->CP0_XContext =
 513        /* PTEBase */   (env->CP0_XContext & ((~0ULL) << (env->SEGBITS - 7))) |
 514        /* R */         (extract64(address, 62, 2) << (env->SEGBITS - 9)) |
 515        /* BadVPN2 */   (extract64(address, 13, env->SEGBITS - 13) << 4);
 516#endif
 517    cs->exception_index = exception;
 518    env->error_code = error_code;
 519}
 520
 521#if !defined(CONFIG_USER_ONLY)
 522hwaddr mips_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
 523{
 524    MIPSCPU *cpu = MIPS_CPU(cs);
 525    CPUMIPSState *env = &cpu->env;
 526    hwaddr phys_addr;
 527    int prot;
 528
 529    if (get_physical_address(env, &phys_addr, &prot, addr, 0, ACCESS_INT,
 530                             cpu_mmu_index(env, false)) != 0) {
 531        return -1;
 532    }
 533    return phys_addr;
 534}
 535#endif
 536
 537int mips_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
 538                              int mmu_idx)
 539{
 540    MIPSCPU *cpu = MIPS_CPU(cs);
 541    CPUMIPSState *env = &cpu->env;
 542#if !defined(CONFIG_USER_ONLY)
 543    hwaddr physical;
 544    int prot;
 545    int access_type;
 546#endif
 547    int ret = 0;
 548
 549#if 0
 550    log_cpu_state(cs, 0);
 551#endif
 552    qemu_log_mask(CPU_LOG_MMU,
 553              "%s pc " TARGET_FMT_lx " ad %" VADDR_PRIx " rw %d mmu_idx %d\n",
 554              __func__, env->active_tc.PC, address, rw, mmu_idx);
 555
 556    /* data access */
 557#if !defined(CONFIG_USER_ONLY)
 558    /* XXX: put correct access by using cpu_restore_state()
 559       correctly */
 560    access_type = ACCESS_INT;
 561    ret = get_physical_address(env, &physical, &prot,
 562                               address, rw, access_type, mmu_idx);
 563    switch (ret) {
 564    case TLBRET_MATCH:
 565        qemu_log_mask(CPU_LOG_MMU,
 566                      "%s address=%" VADDR_PRIx " physical " TARGET_FMT_plx
 567                      " prot %d\n", __func__, address, physical, prot);
 568        break;
 569    default:
 570        qemu_log_mask(CPU_LOG_MMU,
 571                      "%s address=%" VADDR_PRIx " ret %d\n", __func__, address,
 572                      ret);
 573        break;
 574    }
 575    if (ret == TLBRET_MATCH) {
 576        tlb_set_page(cs, address & TARGET_PAGE_MASK,
 577                     physical & TARGET_PAGE_MASK, prot | PAGE_EXEC,
 578                     mmu_idx, TARGET_PAGE_SIZE);
 579        ret = 0;
 580    } else if (ret < 0)
 581#endif
 582    {
 583        raise_mmu_exception(env, address, rw, ret);
 584        ret = 1;
 585    }
 586
 587    return ret;
 588}
 589
 590#if !defined(CONFIG_USER_ONLY)
 591hwaddr cpu_mips_translate_address(CPUMIPSState *env, target_ulong address, int rw)
 592{
 593    hwaddr physical;
 594    int prot;
 595    int access_type;
 596    int ret = 0;
 597
 598    /* data access */
 599    access_type = ACCESS_INT;
 600    ret = get_physical_address(env, &physical, &prot, address, rw, access_type,
 601                               cpu_mmu_index(env, false));
 602    if (ret != TLBRET_MATCH) {
 603        raise_mmu_exception(env, address, rw, ret);
 604        return -1LL;
 605    } else {
 606        return physical;
 607    }
 608}
 609
 610static const char * const excp_names[EXCP_LAST + 1] = {
 611    [EXCP_RESET] = "reset",
 612    [EXCP_SRESET] = "soft reset",
 613    [EXCP_DSS] = "debug single step",
 614    [EXCP_DINT] = "debug interrupt",
 615    [EXCP_NMI] = "non-maskable interrupt",
 616    [EXCP_MCHECK] = "machine check",
 617    [EXCP_EXT_INTERRUPT] = "interrupt",
 618    [EXCP_DFWATCH] = "deferred watchpoint",
 619    [EXCP_DIB] = "debug instruction breakpoint",
 620    [EXCP_IWATCH] = "instruction fetch watchpoint",
 621    [EXCP_AdEL] = "address error load",
 622    [EXCP_AdES] = "address error store",
 623    [EXCP_TLBF] = "TLB refill",
 624    [EXCP_IBE] = "instruction bus error",
 625    [EXCP_DBp] = "debug breakpoint",
 626    [EXCP_SYSCALL] = "syscall",
 627    [EXCP_BREAK] = "break",
 628    [EXCP_CpU] = "coprocessor unusable",
 629    [EXCP_RI] = "reserved instruction",
 630    [EXCP_OVERFLOW] = "arithmetic overflow",
 631    [EXCP_TRAP] = "trap",
 632    [EXCP_FPE] = "floating point",
 633    [EXCP_DDBS] = "debug data break store",
 634    [EXCP_DWATCH] = "data watchpoint",
 635    [EXCP_LTLBL] = "TLB modify",
 636    [EXCP_TLBL] = "TLB load",
 637    [EXCP_TLBS] = "TLB store",
 638    [EXCP_DBE] = "data bus error",
 639    [EXCP_DDBL] = "debug data break load",
 640    [EXCP_THREAD] = "thread",
 641    [EXCP_MDMX] = "MDMX",
 642    [EXCP_C2E] = "precise coprocessor 2",
 643    [EXCP_CACHE] = "cache error",
 644    [EXCP_TLBXI] = "TLB execute-inhibit",
 645    [EXCP_TLBRI] = "TLB read-inhibit",
 646    [EXCP_MSADIS] = "MSA disabled",
 647    [EXCP_MSAFPE] = "MSA floating point",
 648};
 649#endif
 650
 651target_ulong exception_resume_pc (CPUMIPSState *env)
 652{
 653    target_ulong bad_pc;
 654    target_ulong isa_mode;
 655
 656    isa_mode = !!(env->hflags & MIPS_HFLAG_M16);
 657    bad_pc = env->active_tc.PC | isa_mode;
 658    if (env->hflags & MIPS_HFLAG_BMASK) {
 659        /* If the exception was raised from a delay slot, come back to
 660           the jump.  */
 661        bad_pc -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
 662    }
 663
 664    return bad_pc;
 665}
 666
 667#if !defined(CONFIG_USER_ONLY)
 668static void set_hflags_for_handler (CPUMIPSState *env)
 669{
 670    /* Exception handlers are entered in 32-bit mode.  */
 671    env->hflags &= ~(MIPS_HFLAG_M16);
 672    /* ...except that microMIPS lets you choose.  */
 673    if (env->insn_flags & ASE_MICROMIPS) {
 674        env->hflags |= (!!(env->CP0_Config3
 675                           & (1 << CP0C3_ISA_ON_EXC))
 676                        << MIPS_HFLAG_M16_SHIFT);
 677    }
 678}
 679
 680static inline void set_badinstr_registers(CPUMIPSState *env)
 681{
 682    if (env->hflags & MIPS_HFLAG_M16) {
 683        /* TODO: add BadInstr support for microMIPS */
 684        return;
 685    }
 686    if (env->CP0_Config3 & (1 << CP0C3_BI)) {
 687        env->CP0_BadInstr = cpu_ldl_code(env, env->active_tc.PC);
 688    }
 689    if ((env->CP0_Config3 & (1 << CP0C3_BP)) &&
 690        (env->hflags & MIPS_HFLAG_BMASK)) {
 691        env->CP0_BadInstrP = cpu_ldl_code(env, env->active_tc.PC - 4);
 692    }
 693}
 694#endif
 695
 696void mips_cpu_do_interrupt(CPUState *cs)
 697{
 698#if !defined(CONFIG_USER_ONLY)
 699    MIPSCPU *cpu = MIPS_CPU(cs);
 700    CPUMIPSState *env = &cpu->env;
 701    bool update_badinstr = 0;
 702    target_ulong offset;
 703    int cause = -1;
 704    const char *name;
 705
 706    if (qemu_loglevel_mask(CPU_LOG_INT)
 707        && cs->exception_index != EXCP_EXT_INTERRUPT) {
 708        if (cs->exception_index < 0 || cs->exception_index > EXCP_LAST) {
 709            name = "unknown";
 710        } else {
 711            name = excp_names[cs->exception_index];
 712        }
 713
 714        qemu_log("%s enter: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx
 715                 " %s exception\n",
 716                 __func__, env->active_tc.PC, env->CP0_EPC, name);
 717    }
 718    if (cs->exception_index == EXCP_EXT_INTERRUPT &&
 719        (env->hflags & MIPS_HFLAG_DM)) {
 720        cs->exception_index = EXCP_DINT;
 721    }
 722    offset = 0x180;
 723    switch (cs->exception_index) {
 724    case EXCP_DSS:
 725        env->CP0_Debug |= 1 << CP0DB_DSS;
 726        /* Debug single step cannot be raised inside a delay slot and
 727           resume will always occur on the next instruction
 728           (but we assume the pc has always been updated during
 729           code translation). */
 730        env->CP0_DEPC = env->active_tc.PC | !!(env->hflags & MIPS_HFLAG_M16);
 731        goto enter_debug_mode;
 732    case EXCP_DINT:
 733        env->CP0_Debug |= 1 << CP0DB_DINT;
 734        goto set_DEPC;
 735    case EXCP_DIB:
 736        env->CP0_Debug |= 1 << CP0DB_DIB;
 737        goto set_DEPC;
 738    case EXCP_DBp:
 739        env->CP0_Debug |= 1 << CP0DB_DBp;
 740        /* Setup DExcCode - SDBBP instruction */
 741        env->CP0_Debug = (env->CP0_Debug & ~(0x1fULL << CP0DB_DEC)) | 9 << CP0DB_DEC;
 742        goto set_DEPC;
 743    case EXCP_DDBS:
 744        env->CP0_Debug |= 1 << CP0DB_DDBS;
 745        goto set_DEPC;
 746    case EXCP_DDBL:
 747        env->CP0_Debug |= 1 << CP0DB_DDBL;
 748    set_DEPC:
 749        env->CP0_DEPC = exception_resume_pc(env);
 750        env->hflags &= ~MIPS_HFLAG_BMASK;
 751 enter_debug_mode:
 752        if (env->insn_flags & ISA_MIPS3) {
 753            env->hflags |= MIPS_HFLAG_64;
 754            if (!(env->insn_flags & ISA_MIPS64R6) ||
 755                env->CP0_Status & (1 << CP0St_KX)) {
 756                env->hflags &= ~MIPS_HFLAG_AWRAP;
 757            }
 758        }
 759        env->hflags |= MIPS_HFLAG_DM | MIPS_HFLAG_CP0;
 760        env->hflags &= ~(MIPS_HFLAG_KSU);
 761        /* EJTAG probe trap enable is not implemented... */
 762        if (!(env->CP0_Status & (1 << CP0St_EXL)))
 763            env->CP0_Cause &= ~(1U << CP0Ca_BD);
 764        env->active_tc.PC = env->exception_base + 0x480;
 765        set_hflags_for_handler(env);
 766        break;
 767    case EXCP_RESET:
 768        cpu_reset(CPU(cpu));
 769        break;
 770    case EXCP_SRESET:
 771        env->CP0_Status |= (1 << CP0St_SR);
 772        memset(env->CP0_WatchLo, 0, sizeof(env->CP0_WatchLo));
 773        goto set_error_EPC;
 774    case EXCP_NMI:
 775        env->CP0_Status |= (1 << CP0St_NMI);
 776 set_error_EPC:
 777        env->CP0_ErrorEPC = exception_resume_pc(env);
 778        env->hflags &= ~MIPS_HFLAG_BMASK;
 779        env->CP0_Status |= (1 << CP0St_ERL) | (1 << CP0St_BEV);
 780        if (env->insn_flags & ISA_MIPS3) {
 781            env->hflags |= MIPS_HFLAG_64;
 782            if (!(env->insn_flags & ISA_MIPS64R6) ||
 783                env->CP0_Status & (1 << CP0St_KX)) {
 784                env->hflags &= ~MIPS_HFLAG_AWRAP;
 785            }
 786        }
 787        env->hflags |= MIPS_HFLAG_CP0;
 788        env->hflags &= ~(MIPS_HFLAG_KSU);
 789        if (!(env->CP0_Status & (1 << CP0St_EXL)))
 790            env->CP0_Cause &= ~(1U << CP0Ca_BD);
 791        env->active_tc.PC = env->exception_base;
 792        set_hflags_for_handler(env);
 793        break;
 794    case EXCP_EXT_INTERRUPT:
 795        cause = 0;
 796        if (env->CP0_Cause & (1 << CP0Ca_IV)) {
 797            uint32_t spacing = (env->CP0_IntCtl >> CP0IntCtl_VS) & 0x1f;
 798
 799            if ((env->CP0_Status & (1 << CP0St_BEV)) || spacing == 0) {
 800                offset = 0x200;
 801            } else {
 802                uint32_t vector = 0;
 803                uint32_t pending = (env->CP0_Cause & CP0Ca_IP_mask) >> CP0Ca_IP;
 804
 805                if (env->CP0_Config3 & (1 << CP0C3_VEIC)) {
 806                    /* For VEIC mode, the external interrupt controller feeds
 807                     * the vector through the CP0Cause IP lines.  */
 808                    vector = pending;
 809                } else {
 810                    /* Vectored Interrupts
 811                     * Mask with Status.IM7-IM0 to get enabled interrupts. */
 812                    pending &= (env->CP0_Status >> CP0St_IM) & 0xff;
 813                    /* Find the highest-priority interrupt. */
 814                    while (pending >>= 1) {
 815                        vector++;
 816                    }
 817                }
 818                offset = 0x200 + (vector * (spacing << 5));
 819            }
 820        }
 821        goto set_EPC;
 822    case EXCP_LTLBL:
 823        cause = 1;
 824        update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
 825        goto set_EPC;
 826    case EXCP_TLBL:
 827        cause = 2;
 828        update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
 829        if ((env->error_code & EXCP_TLB_NOMATCH) &&
 830            !(env->CP0_Status & (1 << CP0St_EXL))) {
 831#if defined(TARGET_MIPS64)
 832            int R = env->CP0_BadVAddr >> 62;
 833            int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0;
 834            int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0;
 835
 836            if ((R != 0 || UX) && (R != 3 || KX) &&
 837                (!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)))) {
 838                offset = 0x080;
 839            } else {
 840#endif
 841                offset = 0x000;
 842#if defined(TARGET_MIPS64)
 843            }
 844#endif
 845        }
 846        goto set_EPC;
 847    case EXCP_TLBS:
 848        cause = 3;
 849        update_badinstr = 1;
 850        if ((env->error_code & EXCP_TLB_NOMATCH) &&
 851            !(env->CP0_Status & (1 << CP0St_EXL))) {
 852#if defined(TARGET_MIPS64)
 853            int R = env->CP0_BadVAddr >> 62;
 854            int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0;
 855            int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0;
 856
 857            if ((R != 0 || UX) && (R != 3 || KX) &&
 858                (!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)))) {
 859                offset = 0x080;
 860            } else {
 861#endif
 862                offset = 0x000;
 863#if defined(TARGET_MIPS64)
 864            }
 865#endif
 866        }
 867        goto set_EPC;
 868    case EXCP_AdEL:
 869        cause = 4;
 870        update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
 871        goto set_EPC;
 872    case EXCP_AdES:
 873        cause = 5;
 874        update_badinstr = 1;
 875        goto set_EPC;
 876    case EXCP_IBE:
 877        cause = 6;
 878        goto set_EPC;
 879    case EXCP_DBE:
 880        cause = 7;
 881        goto set_EPC;
 882    case EXCP_SYSCALL:
 883        cause = 8;
 884        update_badinstr = 1;
 885        goto set_EPC;
 886    case EXCP_BREAK:
 887        cause = 9;
 888        update_badinstr = 1;
 889        goto set_EPC;
 890    case EXCP_RI:
 891        cause = 10;
 892        update_badinstr = 1;
 893        goto set_EPC;
 894    case EXCP_CpU:
 895        cause = 11;
 896        update_badinstr = 1;
 897        env->CP0_Cause = (env->CP0_Cause & ~(0x3 << CP0Ca_CE)) |
 898                         (env->error_code << CP0Ca_CE);
 899        goto set_EPC;
 900    case EXCP_OVERFLOW:
 901        cause = 12;
 902        update_badinstr = 1;
 903        goto set_EPC;
 904    case EXCP_TRAP:
 905        cause = 13;
 906        update_badinstr = 1;
 907        goto set_EPC;
 908    case EXCP_MSAFPE:
 909        cause = 14;
 910        update_badinstr = 1;
 911        goto set_EPC;
 912    case EXCP_FPE:
 913        cause = 15;
 914        update_badinstr = 1;
 915        goto set_EPC;
 916    case EXCP_C2E:
 917        cause = 18;
 918        goto set_EPC;
 919    case EXCP_TLBRI:
 920        cause = 19;
 921        update_badinstr = 1;
 922        goto set_EPC;
 923    case EXCP_TLBXI:
 924        cause = 20;
 925        goto set_EPC;
 926    case EXCP_MSADIS:
 927        cause = 21;
 928        update_badinstr = 1;
 929        goto set_EPC;
 930    case EXCP_MDMX:
 931        cause = 22;
 932        goto set_EPC;
 933    case EXCP_DWATCH:
 934        cause = 23;
 935        /* XXX: TODO: manage deferred watch exceptions */
 936        goto set_EPC;
 937    case EXCP_MCHECK:
 938        cause = 24;
 939        goto set_EPC;
 940    case EXCP_THREAD:
 941        cause = 25;
 942        goto set_EPC;
 943    case EXCP_DSPDIS:
 944        cause = 26;
 945        goto set_EPC;
 946    case EXCP_CACHE:
 947        cause = 30;
 948        offset = 0x100;
 949 set_EPC:
 950        if (!(env->CP0_Status & (1 << CP0St_EXL))) {
 951            env->CP0_EPC = exception_resume_pc(env);
 952            if (update_badinstr) {
 953                set_badinstr_registers(env);
 954            }
 955            if (env->hflags & MIPS_HFLAG_BMASK) {
 956                env->CP0_Cause |= (1U << CP0Ca_BD);
 957            } else {
 958                env->CP0_Cause &= ~(1U << CP0Ca_BD);
 959            }
 960            env->CP0_Status |= (1 << CP0St_EXL);
 961            if (env->insn_flags & ISA_MIPS3) {
 962                env->hflags |= MIPS_HFLAG_64;
 963                if (!(env->insn_flags & ISA_MIPS64R6) ||
 964                    env->CP0_Status & (1 << CP0St_KX)) {
 965                    env->hflags &= ~MIPS_HFLAG_AWRAP;
 966                }
 967            }
 968            env->hflags |= MIPS_HFLAG_CP0;
 969            env->hflags &= ~(MIPS_HFLAG_KSU);
 970        }
 971        env->hflags &= ~MIPS_HFLAG_BMASK;
 972        if (env->CP0_Status & (1 << CP0St_BEV)) {
 973            env->active_tc.PC = env->exception_base + 0x200;
 974        } else if (cause == 30 && !(env->CP0_Config3 & (1 << CP0C3_SC) &&
 975                                    env->CP0_Config5 & (1 << CP0C5_CV))) {
 976            /* Force KSeg1 for cache errors */
 977            env->active_tc.PC = KSEG1_BASE | (env->CP0_EBase & 0x1FFFF000);
 978        } else {
 979            env->active_tc.PC = env->CP0_EBase & ~0xfff;
 980        }
 981
 982        env->active_tc.PC += offset;
 983        set_hflags_for_handler(env);
 984        env->CP0_Cause = (env->CP0_Cause & ~(0x1f << CP0Ca_EC)) | (cause << CP0Ca_EC);
 985        break;
 986    default:
 987        abort();
 988    }
 989    if (qemu_loglevel_mask(CPU_LOG_INT)
 990        && cs->exception_index != EXCP_EXT_INTERRUPT) {
 991        qemu_log("%s: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx " cause %d\n"
 992                 "    S %08x C %08x A " TARGET_FMT_lx " D " TARGET_FMT_lx "\n",
 993                 __func__, env->active_tc.PC, env->CP0_EPC, cause,
 994                 env->CP0_Status, env->CP0_Cause, env->CP0_BadVAddr,
 995                 env->CP0_DEPC);
 996    }
 997#endif
 998    cs->exception_index = EXCP_NONE;
 999}
1000
1001bool mips_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1002{
1003    if (interrupt_request & CPU_INTERRUPT_HARD) {
1004        MIPSCPU *cpu = MIPS_CPU(cs);
1005        CPUMIPSState *env = &cpu->env;
1006
1007        if (cpu_mips_hw_interrupts_enabled(env) &&
1008            cpu_mips_hw_interrupts_pending(env)) {
1009            /* Raise it */
1010            cs->exception_index = EXCP_EXT_INTERRUPT;
1011            env->error_code = 0;
1012            mips_cpu_do_interrupt(cs);
1013            return true;
1014        }
1015    }
1016    return false;
1017}
1018
1019#if !defined(CONFIG_USER_ONLY)
1020void r4k_invalidate_tlb (CPUMIPSState *env, int idx, int use_extra)
1021{
1022    MIPSCPU *cpu = mips_env_get_cpu(env);
1023    CPUState *cs;
1024    r4k_tlb_t *tlb;
1025    target_ulong addr;
1026    target_ulong end;
1027    uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
1028    target_ulong mask;
1029
1030    tlb = &env->tlb->mmu.r4k.tlb[idx];
1031    /* The qemu TLB is flushed when the ASID changes, so no need to
1032       flush these entries again.  */
1033    if (tlb->G == 0 && tlb->ASID != ASID) {
1034        return;
1035    }
1036
1037    if (use_extra && env->tlb->tlb_in_use < MIPS_TLB_MAX) {
1038        /* For tlbwr, we can shadow the discarded entry into
1039           a new (fake) TLB entry, as long as the guest can not
1040           tell that it's there.  */
1041        env->tlb->mmu.r4k.tlb[env->tlb->tlb_in_use] = *tlb;
1042        env->tlb->tlb_in_use++;
1043        return;
1044    }
1045
1046    /* 1k pages are not supported. */
1047    mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1048    if (tlb->V0) {
1049        cs = CPU(cpu);
1050        addr = tlb->VPN & ~mask;
1051#if defined(TARGET_MIPS64)
1052        if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) {
1053            addr |= 0x3FFFFF0000000000ULL;
1054        }
1055#endif
1056        end = addr | (mask >> 1);
1057        while (addr < end) {
1058            tlb_flush_page(cs, addr);
1059            addr += TARGET_PAGE_SIZE;
1060        }
1061    }
1062    if (tlb->V1) {
1063        cs = CPU(cpu);
1064        addr = (tlb->VPN & ~mask) | ((mask >> 1) + 1);
1065#if defined(TARGET_MIPS64)
1066        if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) {
1067            addr |= 0x3FFFFF0000000000ULL;
1068        }
1069#endif
1070        end = addr | mask;
1071        while (addr - 1 < end) {
1072            tlb_flush_page(cs, addr);
1073            addr += TARGET_PAGE_SIZE;
1074        }
1075    }
1076}
1077#endif
1078
1079void QEMU_NORETURN do_raise_exception_err(CPUMIPSState *env,
1080                                          uint32_t exception,
1081                                          int error_code,
1082                                          uintptr_t pc)
1083{
1084    CPUState *cs = CPU(mips_env_get_cpu(env));
1085
1086    if (exception < EXCP_SC) {
1087        qemu_log_mask(CPU_LOG_INT, "%s: %d %d\n",
1088                      __func__, exception, error_code);
1089    }
1090    cs->exception_index = exception;
1091    env->error_code = error_code;
1092
1093    cpu_loop_exit_restore(cs, pc);
1094}
1095