qemu/target/riscv/cpu_helper.c
<<
>>
Prefs
   1/*
   2 * RISC-V CPU helpers for qemu.
   3 *
   4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
   5 * Copyright (c) 2017-2018 SiFive, Inc.
   6 *
   7 * This program is free software; you can redistribute it and/or modify it
   8 * under the terms and conditions of the GNU General Public License,
   9 * version 2 or later, as published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope it will be useful, but WITHOUT
  12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  14 * more details.
  15 *
  16 * You should have received a copy of the GNU General Public License along with
  17 * this program.  If not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#include "qemu/osdep.h"
  21#include "qemu/log.h"
  22#include "qemu/main-loop.h"
  23#include "cpu.h"
  24#include "exec/exec-all.h"
  25#include "tcg/tcg-op.h"
  26#include "trace.h"
  27#include "semihosting/common-semi.h"
  28
  29int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch)
  30{
  31#ifdef CONFIG_USER_ONLY
  32    return 0;
  33#else
  34    return env->priv;
  35#endif
  36}
  37
  38void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
  39                          target_ulong *cs_base, uint32_t *pflags)
  40{
  41    CPUState *cs = env_cpu(env);
  42    RISCVCPU *cpu = RISCV_CPU(cs);
  43
  44    uint32_t flags = 0;
  45
  46    *pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc;
  47    *cs_base = 0;
  48
  49    if (riscv_has_ext(env, RVV) || cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) {
  50        /*
  51         * If env->vl equals to VLMAX, we can use generic vector operation
  52         * expanders (GVEC) to accerlate the vector operations.
  53         * However, as LMUL could be a fractional number. The maximum
  54         * vector size can be operated might be less than 8 bytes,
  55         * which is not supported by GVEC. So we set vl_eq_vlmax flag to true
  56         * only when maxsz >= 8 bytes.
  57         */
  58        uint32_t vlmax = vext_get_vlmax(env_archcpu(env), env->vtype);
  59        uint32_t sew = FIELD_EX64(env->vtype, VTYPE, VSEW);
  60        uint32_t maxsz = vlmax << sew;
  61        bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) &&
  62                           (maxsz >= 8);
  63        flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill);
  64        flags = FIELD_DP32(flags, TB_FLAGS, SEW, sew);
  65        flags = FIELD_DP32(flags, TB_FLAGS, LMUL,
  66                    FIELD_EX64(env->vtype, VTYPE, VLMUL));
  67        flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax);
  68        flags = FIELD_DP32(flags, TB_FLAGS, VTA,
  69                    FIELD_EX64(env->vtype, VTYPE, VTA));
  70    } else {
  71        flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1);
  72    }
  73
  74#ifdef CONFIG_USER_ONLY
  75    flags |= TB_FLAGS_MSTATUS_FS;
  76    flags |= TB_FLAGS_MSTATUS_VS;
  77#else
  78    flags |= cpu_mmu_index(env, 0);
  79    if (riscv_cpu_fp_enabled(env)) {
  80        flags |= env->mstatus & MSTATUS_FS;
  81    }
  82
  83    if (riscv_cpu_vector_enabled(env)) {
  84        flags |= env->mstatus & MSTATUS_VS;
  85    }
  86
  87    if (riscv_has_ext(env, RVH)) {
  88        if (env->priv == PRV_M ||
  89            (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) ||
  90            (env->priv == PRV_U && !riscv_cpu_virt_enabled(env) &&
  91                get_field(env->hstatus, HSTATUS_HU))) {
  92            flags = FIELD_DP32(flags, TB_FLAGS, HLSX, 1);
  93        }
  94
  95        flags = FIELD_DP32(flags, TB_FLAGS, MSTATUS_HS_FS,
  96                           get_field(env->mstatus_hs, MSTATUS_FS));
  97
  98        flags = FIELD_DP32(flags, TB_FLAGS, MSTATUS_HS_VS,
  99                           get_field(env->mstatus_hs, MSTATUS_VS));
 100    }
 101#endif
 102
 103    flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl);
 104    if (env->cur_pmmask < (env->xl == MXL_RV32 ? UINT32_MAX : UINT64_MAX)) {
 105        flags = FIELD_DP32(flags, TB_FLAGS, PM_MASK_ENABLED, 1);
 106    }
 107    if (env->cur_pmbase != 0) {
 108        flags = FIELD_DP32(flags, TB_FLAGS, PM_BASE_ENABLED, 1);
 109    }
 110
 111    *pflags = flags;
 112}
 113
 114void riscv_cpu_update_mask(CPURISCVState *env)
 115{
 116    target_ulong mask = -1, base = 0;
 117    /*
 118     * TODO: Current RVJ spec does not specify
 119     * how the extension interacts with XLEN.
 120     */
 121#ifndef CONFIG_USER_ONLY
 122    if (riscv_has_ext(env, RVJ)) {
 123        switch (env->priv) {
 124        case PRV_M:
 125            if (env->mmte & M_PM_ENABLE) {
 126                mask = env->mpmmask;
 127                base = env->mpmbase;
 128            }
 129            break;
 130        case PRV_S:
 131            if (env->mmte & S_PM_ENABLE) {
 132                mask = env->spmmask;
 133                base = env->spmbase;
 134            }
 135            break;
 136        case PRV_U:
 137            if (env->mmte & U_PM_ENABLE) {
 138                mask = env->upmmask;
 139                base = env->upmbase;
 140            }
 141            break;
 142        default:
 143            g_assert_not_reached();
 144        }
 145    }
 146#endif
 147    if (env->xl == MXL_RV32) {
 148        env->cur_pmmask = mask & UINT32_MAX;
 149        env->cur_pmbase = base & UINT32_MAX;
 150    } else {
 151        env->cur_pmmask = mask;
 152        env->cur_pmbase = base;
 153    }
 154}
 155
 156#ifndef CONFIG_USER_ONLY
 157
 158/*
 159 * The HS-mode is allowed to configure priority only for the
 160 * following VS-mode local interrupts:
 161 *
 162 * 0  (Reserved interrupt, reads as zero)
 163 * 1  Supervisor software interrupt
 164 * 4  (Reserved interrupt, reads as zero)
 165 * 5  Supervisor timer interrupt
 166 * 8  (Reserved interrupt, reads as zero)
 167 * 13 (Reserved interrupt)
 168 * 14 "
 169 * 15 "
 170 * 16 "
 171 * 17 "
 172 * 18 "
 173 * 19 "
 174 * 20 "
 175 * 21 "
 176 * 22 "
 177 * 23 "
 178 */
 179
 180static const int hviprio_index2irq[] = {
 181    0, 1, 4, 5, 8, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 };
 182static const int hviprio_index2rdzero[] = {
 183    1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
 184
 185int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero)
 186{
 187    if (index < 0 || ARRAY_SIZE(hviprio_index2irq) <= index) {
 188        return -EINVAL;
 189    }
 190
 191    if (out_irq) {
 192        *out_irq = hviprio_index2irq[index];
 193    }
 194
 195    if (out_rdzero) {
 196        *out_rdzero = hviprio_index2rdzero[index];
 197    }
 198
 199    return 0;
 200}
 201
 202/*
 203 * Default priorities of local interrupts are defined in the
 204 * RISC-V Advanced Interrupt Architecture specification.
 205 *
 206 * ----------------------------------------------------------------
 207 *  Default  |
 208 *  Priority | Major Interrupt Numbers
 209 * ----------------------------------------------------------------
 210 *  Highest  | 47, 23, 46, 45, 22, 44,
 211 *           | 43, 21, 42, 41, 20, 40
 212 *           |
 213 *           | 11 (0b),  3 (03),  7 (07)
 214 *           |  9 (09),  1 (01),  5 (05)
 215 *           | 12 (0c)
 216 *           | 10 (0a),  2 (02),  6 (06)
 217 *           |
 218 *           | 39, 19, 38, 37, 18, 36,
 219 *  Lowest   | 35, 17, 34, 33, 16, 32
 220 * ----------------------------------------------------------------
 221 */
 222static const uint8_t default_iprio[64] = {
 223 /* Custom interrupts 48 to 63 */
 224 [63] = IPRIO_MMAXIPRIO,
 225 [62] = IPRIO_MMAXIPRIO,
 226 [61] = IPRIO_MMAXIPRIO,
 227 [60] = IPRIO_MMAXIPRIO,
 228 [59] = IPRIO_MMAXIPRIO,
 229 [58] = IPRIO_MMAXIPRIO,
 230 [57] = IPRIO_MMAXIPRIO,
 231 [56] = IPRIO_MMAXIPRIO,
 232 [55] = IPRIO_MMAXIPRIO,
 233 [54] = IPRIO_MMAXIPRIO,
 234 [53] = IPRIO_MMAXIPRIO,
 235 [52] = IPRIO_MMAXIPRIO,
 236 [51] = IPRIO_MMAXIPRIO,
 237 [50] = IPRIO_MMAXIPRIO,
 238 [49] = IPRIO_MMAXIPRIO,
 239 [48] = IPRIO_MMAXIPRIO,
 240
 241 /* Custom interrupts 24 to 31 */
 242 [31] = IPRIO_MMAXIPRIO,
 243 [30] = IPRIO_MMAXIPRIO,
 244 [29] = IPRIO_MMAXIPRIO,
 245 [28] = IPRIO_MMAXIPRIO,
 246 [27] = IPRIO_MMAXIPRIO,
 247 [26] = IPRIO_MMAXIPRIO,
 248 [25] = IPRIO_MMAXIPRIO,
 249 [24] = IPRIO_MMAXIPRIO,
 250
 251 [47] = IPRIO_DEFAULT_UPPER,
 252 [23] = IPRIO_DEFAULT_UPPER + 1,
 253 [46] = IPRIO_DEFAULT_UPPER + 2,
 254 [45] = IPRIO_DEFAULT_UPPER + 3,
 255 [22] = IPRIO_DEFAULT_UPPER + 4,
 256 [44] = IPRIO_DEFAULT_UPPER + 5,
 257
 258 [43] = IPRIO_DEFAULT_UPPER + 6,
 259 [21] = IPRIO_DEFAULT_UPPER + 7,
 260 [42] = IPRIO_DEFAULT_UPPER + 8,
 261 [41] = IPRIO_DEFAULT_UPPER + 9,
 262 [20] = IPRIO_DEFAULT_UPPER + 10,
 263 [40] = IPRIO_DEFAULT_UPPER + 11,
 264
 265 [11] = IPRIO_DEFAULT_M,
 266 [3]  = IPRIO_DEFAULT_M + 1,
 267 [7]  = IPRIO_DEFAULT_M + 2,
 268
 269 [9]  = IPRIO_DEFAULT_S,
 270 [1]  = IPRIO_DEFAULT_S + 1,
 271 [5]  = IPRIO_DEFAULT_S + 2,
 272
 273 [12] = IPRIO_DEFAULT_SGEXT,
 274
 275 [10] = IPRIO_DEFAULT_VS,
 276 [2]  = IPRIO_DEFAULT_VS + 1,
 277 [6]  = IPRIO_DEFAULT_VS + 2,
 278
 279 [39] = IPRIO_DEFAULT_LOWER,
 280 [19] = IPRIO_DEFAULT_LOWER + 1,
 281 [38] = IPRIO_DEFAULT_LOWER + 2,
 282 [37] = IPRIO_DEFAULT_LOWER + 3,
 283 [18] = IPRIO_DEFAULT_LOWER + 4,
 284 [36] = IPRIO_DEFAULT_LOWER + 5,
 285
 286 [35] = IPRIO_DEFAULT_LOWER + 6,
 287 [17] = IPRIO_DEFAULT_LOWER + 7,
 288 [34] = IPRIO_DEFAULT_LOWER + 8,
 289 [33] = IPRIO_DEFAULT_LOWER + 9,
 290 [16] = IPRIO_DEFAULT_LOWER + 10,
 291 [32] = IPRIO_DEFAULT_LOWER + 11,
 292};
 293
 294uint8_t riscv_cpu_default_priority(int irq)
 295{
 296    if (irq < 0 || irq > 63) {
 297        return IPRIO_MMAXIPRIO;
 298    }
 299
 300    return default_iprio[irq] ? default_iprio[irq] : IPRIO_MMAXIPRIO;
 301};
 302
 303static int riscv_cpu_pending_to_irq(CPURISCVState *env,
 304                                    int extirq, unsigned int extirq_def_prio,
 305                                    uint64_t pending, uint8_t *iprio)
 306{
 307    int irq, best_irq = RISCV_EXCP_NONE;
 308    unsigned int prio, best_prio = UINT_MAX;
 309
 310    if (!pending) {
 311        return RISCV_EXCP_NONE;
 312    }
 313
 314    irq = ctz64(pending);
 315    if (!riscv_feature(env, RISCV_FEATURE_AIA)) {
 316        return irq;
 317    }
 318
 319    pending = pending >> irq;
 320    while (pending) {
 321        prio = iprio[irq];
 322        if (!prio) {
 323            if (irq == extirq) {
 324                prio = extirq_def_prio;
 325            } else {
 326                prio = (riscv_cpu_default_priority(irq) < extirq_def_prio) ?
 327                       1 : IPRIO_MMAXIPRIO;
 328            }
 329        }
 330        if ((pending & 0x1) && (prio <= best_prio)) {
 331            best_irq = irq;
 332            best_prio = prio;
 333        }
 334        irq++;
 335        pending = pending >> 1;
 336    }
 337
 338    return best_irq;
 339}
 340
 341uint64_t riscv_cpu_all_pending(CPURISCVState *env)
 342{
 343    uint32_t gein = get_field(env->hstatus, HSTATUS_VGEIN);
 344    uint64_t vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
 345
 346    return (env->mip | vsgein) & env->mie;
 347}
 348
 349int riscv_cpu_mirq_pending(CPURISCVState *env)
 350{
 351    uint64_t irqs = riscv_cpu_all_pending(env) & ~env->mideleg &
 352                    ~(MIP_SGEIP | MIP_VSSIP | MIP_VSTIP | MIP_VSEIP);
 353
 354    return riscv_cpu_pending_to_irq(env, IRQ_M_EXT, IPRIO_DEFAULT_M,
 355                                    irqs, env->miprio);
 356}
 357
 358int riscv_cpu_sirq_pending(CPURISCVState *env)
 359{
 360    uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg &
 361                    ~(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP);
 362
 363    return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
 364                                    irqs, env->siprio);
 365}
 366
 367int riscv_cpu_vsirq_pending(CPURISCVState *env)
 368{
 369    uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg &
 370                    (MIP_VSSIP | MIP_VSTIP | MIP_VSEIP);
 371
 372    return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
 373                                    irqs >> 1, env->hviprio);
 374}
 375
 376static int riscv_cpu_local_irq_pending(CPURISCVState *env)
 377{
 378    int virq;
 379    uint64_t irqs, pending, mie, hsie, vsie;
 380
 381    /* Determine interrupt enable state of all privilege modes */
 382    if (riscv_cpu_virt_enabled(env)) {
 383        mie = 1;
 384        hsie = 1;
 385        vsie = (env->priv < PRV_S) ||
 386               (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_SIE));
 387    } else {
 388        mie = (env->priv < PRV_M) ||
 389              (env->priv == PRV_M && get_field(env->mstatus, MSTATUS_MIE));
 390        hsie = (env->priv < PRV_S) ||
 391               (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_SIE));
 392        vsie = 0;
 393    }
 394
 395    /* Determine all pending interrupts */
 396    pending = riscv_cpu_all_pending(env);
 397
 398    /* Check M-mode interrupts */
 399    irqs = pending & ~env->mideleg & -mie;
 400    if (irqs) {
 401        return riscv_cpu_pending_to_irq(env, IRQ_M_EXT, IPRIO_DEFAULT_M,
 402                                        irqs, env->miprio);
 403    }
 404
 405    /* Check HS-mode interrupts */
 406    irqs = pending & env->mideleg & ~env->hideleg & -hsie;
 407    if (irqs) {
 408        return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
 409                                        irqs, env->siprio);
 410    }
 411
 412    /* Check VS-mode interrupts */
 413    irqs = pending & env->mideleg & env->hideleg & -vsie;
 414    if (irqs) {
 415        virq = riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
 416                                        irqs >> 1, env->hviprio);
 417        return (virq <= 0) ? virq : virq + 1;
 418    }
 419
 420    /* Indicate no pending interrupt */
 421    return RISCV_EXCP_NONE;
 422}
 423
 424bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
 425{
 426    if (interrupt_request & CPU_INTERRUPT_HARD) {
 427        RISCVCPU *cpu = RISCV_CPU(cs);
 428        CPURISCVState *env = &cpu->env;
 429        int interruptno = riscv_cpu_local_irq_pending(env);
 430        if (interruptno >= 0) {
 431            cs->exception_index = RISCV_EXCP_INT_FLAG | interruptno;
 432            riscv_cpu_do_interrupt(cs);
 433            return true;
 434        }
 435    }
 436    return false;
 437}
 438
 439/* Return true is floating point support is currently enabled */
 440bool riscv_cpu_fp_enabled(CPURISCVState *env)
 441{
 442    if (env->mstatus & MSTATUS_FS) {
 443        if (riscv_cpu_virt_enabled(env) && !(env->mstatus_hs & MSTATUS_FS)) {
 444            return false;
 445        }
 446        return true;
 447    }
 448
 449    return false;
 450}
 451
 452/* Return true is vector support is currently enabled */
 453bool riscv_cpu_vector_enabled(CPURISCVState *env)
 454{
 455    if (env->mstatus & MSTATUS_VS) {
 456        if (riscv_cpu_virt_enabled(env) && !(env->mstatus_hs & MSTATUS_VS)) {
 457            return false;
 458        }
 459        return true;
 460    }
 461
 462    return false;
 463}
 464
 465void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env)
 466{
 467    uint64_t mstatus_mask = MSTATUS_MXR | MSTATUS_SUM |
 468                            MSTATUS_SPP | MSTATUS_SPIE | MSTATUS_SIE |
 469                            MSTATUS64_UXL | MSTATUS_VS;
 470
 471    if (riscv_has_ext(env, RVF)) {
 472        mstatus_mask |= MSTATUS_FS;
 473    }
 474    bool current_virt = riscv_cpu_virt_enabled(env);
 475
 476    g_assert(riscv_has_ext(env, RVH));
 477
 478    if (current_virt) {
 479        /* Current V=1 and we are about to change to V=0 */
 480        env->vsstatus = env->mstatus & mstatus_mask;
 481        env->mstatus &= ~mstatus_mask;
 482        env->mstatus |= env->mstatus_hs;
 483
 484        env->vstvec = env->stvec;
 485        env->stvec = env->stvec_hs;
 486
 487        env->vsscratch = env->sscratch;
 488        env->sscratch = env->sscratch_hs;
 489
 490        env->vsepc = env->sepc;
 491        env->sepc = env->sepc_hs;
 492
 493        env->vscause = env->scause;
 494        env->scause = env->scause_hs;
 495
 496        env->vstval = env->stval;
 497        env->stval = env->stval_hs;
 498
 499        env->vsatp = env->satp;
 500        env->satp = env->satp_hs;
 501    } else {
 502        /* Current V=0 and we are about to change to V=1 */
 503        env->mstatus_hs = env->mstatus & mstatus_mask;
 504        env->mstatus &= ~mstatus_mask;
 505        env->mstatus |= env->vsstatus;
 506
 507        env->stvec_hs = env->stvec;
 508        env->stvec = env->vstvec;
 509
 510        env->sscratch_hs = env->sscratch;
 511        env->sscratch = env->vsscratch;
 512
 513        env->sepc_hs = env->sepc;
 514        env->sepc = env->vsepc;
 515
 516        env->scause_hs = env->scause;
 517        env->scause = env->vscause;
 518
 519        env->stval_hs = env->stval;
 520        env->stval = env->vstval;
 521
 522        env->satp_hs = env->satp;
 523        env->satp = env->vsatp;
 524    }
 525}
 526
 527target_ulong riscv_cpu_get_geilen(CPURISCVState *env)
 528{
 529    if (!riscv_has_ext(env, RVH)) {
 530        return 0;
 531    }
 532
 533    return env->geilen;
 534}
 535
 536void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen)
 537{
 538    if (!riscv_has_ext(env, RVH)) {
 539        return;
 540    }
 541
 542    if (geilen > (TARGET_LONG_BITS - 1)) {
 543        return;
 544    }
 545
 546    env->geilen = geilen;
 547}
 548
 549bool riscv_cpu_virt_enabled(CPURISCVState *env)
 550{
 551    if (!riscv_has_ext(env, RVH)) {
 552        return false;
 553    }
 554
 555    return get_field(env->virt, VIRT_ONOFF);
 556}
 557
 558void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable)
 559{
 560    if (!riscv_has_ext(env, RVH)) {
 561        return;
 562    }
 563
 564    /* Flush the TLB on all virt mode changes. */
 565    if (get_field(env->virt, VIRT_ONOFF) != enable) {
 566        tlb_flush(env_cpu(env));
 567    }
 568
 569    env->virt = set_field(env->virt, VIRT_ONOFF, enable);
 570
 571    if (enable) {
 572        /*
 573         * The guest external interrupts from an interrupt controller are
 574         * delivered only when the Guest/VM is running (i.e. V=1). This means
 575         * any guest external interrupt which is triggered while the Guest/VM
 576         * is not running (i.e. V=0) will be missed on QEMU resulting in guest
 577         * with sluggish response to serial console input and other I/O events.
 578         *
 579         * To solve this, we check and inject interrupt after setting V=1.
 580         */
 581        riscv_cpu_update_mip(env_archcpu(env), 0, 0);
 582    }
 583}
 584
 585bool riscv_cpu_two_stage_lookup(int mmu_idx)
 586{
 587    return mmu_idx & TB_FLAGS_PRIV_HYP_ACCESS_MASK;
 588}
 589
 590int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts)
 591{
 592    CPURISCVState *env = &cpu->env;
 593    if (env->miclaim & interrupts) {
 594        return -1;
 595    } else {
 596        env->miclaim |= interrupts;
 597        return 0;
 598    }
 599}
 600
 601uint64_t riscv_cpu_update_mip(RISCVCPU *cpu, uint64_t mask, uint64_t value)
 602{
 603    CPURISCVState *env = &cpu->env;
 604    CPUState *cs = CPU(cpu);
 605    uint64_t gein, vsgein = 0, old = env->mip;
 606    bool locked = false;
 607
 608    if (riscv_cpu_virt_enabled(env)) {
 609        gein = get_field(env->hstatus, HSTATUS_VGEIN);
 610        vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
 611    }
 612
 613    if (!qemu_mutex_iothread_locked()) {
 614        locked = true;
 615        qemu_mutex_lock_iothread();
 616    }
 617
 618    env->mip = (env->mip & ~mask) | (value & mask);
 619
 620    if (env->mip | vsgein) {
 621        cpu_interrupt(cs, CPU_INTERRUPT_HARD);
 622    } else {
 623        cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
 624    }
 625
 626    if (locked) {
 627        qemu_mutex_unlock_iothread();
 628    }
 629
 630    return old;
 631}
 632
 633void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *),
 634                             void *arg)
 635{
 636    env->rdtime_fn = fn;
 637    env->rdtime_fn_arg = arg;
 638}
 639
 640void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
 641                                   int (*rmw_fn)(void *arg,
 642                                                 target_ulong reg,
 643                                                 target_ulong *val,
 644                                                 target_ulong new_val,
 645                                                 target_ulong write_mask),
 646                                   void *rmw_fn_arg)
 647{
 648    if (priv <= PRV_M) {
 649        env->aia_ireg_rmw_fn[priv] = rmw_fn;
 650        env->aia_ireg_rmw_fn_arg[priv] = rmw_fn_arg;
 651    }
 652}
 653
 654void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv)
 655{
 656    if (newpriv > PRV_M) {
 657        g_assert_not_reached();
 658    }
 659    if (newpriv == PRV_H) {
 660        newpriv = PRV_U;
 661    }
 662    /* tlb_flush is unnecessary as mode is contained in mmu_idx */
 663    env->priv = newpriv;
 664    env->xl = cpu_recompute_xl(env);
 665    riscv_cpu_update_mask(env);
 666
 667    /*
 668     * Clear the load reservation - otherwise a reservation placed in one
 669     * context/process can be used by another, resulting in an SC succeeding
 670     * incorrectly. Version 2.2 of the ISA specification explicitly requires
 671     * this behaviour, while later revisions say that the kernel "should" use
 672     * an SC instruction to force the yielding of a load reservation on a
 673     * preemptive context switch. As a result, do both.
 674     */
 675    env->load_res = -1;
 676}
 677
 678/*
 679 * get_physical_address_pmp - check PMP permission for this physical address
 680 *
 681 * Match the PMP region and check permission for this physical address and it's
 682 * TLB page. Returns 0 if the permission checking was successful
 683 *
 684 * @env: CPURISCVState
 685 * @prot: The returned protection attributes
 686 * @tlb_size: TLB page size containing addr. It could be modified after PMP
 687 *            permission checking. NULL if not set TLB page for addr.
 688 * @addr: The physical address to be checked permission
 689 * @access_type: The type of MMU access
 690 * @mode: Indicates current privilege level.
 691 */
 692static int get_physical_address_pmp(CPURISCVState *env, int *prot,
 693                                    target_ulong *tlb_size, hwaddr addr,
 694                                    int size, MMUAccessType access_type,
 695                                    int mode)
 696{
 697    pmp_priv_t pmp_priv;
 698    target_ulong tlb_size_pmp = 0;
 699
 700    if (!riscv_feature(env, RISCV_FEATURE_PMP)) {
 701        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
 702        return TRANSLATE_SUCCESS;
 703    }
 704
 705    if (!pmp_hart_has_privs(env, addr, size, 1 << access_type, &pmp_priv,
 706                            mode)) {
 707        *prot = 0;
 708        return TRANSLATE_PMP_FAIL;
 709    }
 710
 711    *prot = pmp_priv_to_page_prot(pmp_priv);
 712    if (tlb_size != NULL) {
 713        if (pmp_is_range_in_tlb(env, addr & ~(*tlb_size - 1), &tlb_size_pmp)) {
 714            *tlb_size = tlb_size_pmp;
 715        }
 716    }
 717
 718    return TRANSLATE_SUCCESS;
 719}
 720
 721/* get_physical_address - get the physical address for this virtual address
 722 *
 723 * Do a page table walk to obtain the physical address corresponding to a
 724 * virtual address. Returns 0 if the translation was successful
 725 *
 726 * Adapted from Spike's mmu_t::translate and mmu_t::walk
 727 *
 728 * @env: CPURISCVState
 729 * @physical: This will be set to the calculated physical address
 730 * @prot: The returned protection attributes
 731 * @addr: The virtual address to be translated
 732 * @fault_pte_addr: If not NULL, this will be set to fault pte address
 733 *                  when a error occurs on pte address translation.
 734 *                  This will already be shifted to match htval.
 735 * @access_type: The type of MMU access
 736 * @mmu_idx: Indicates current privilege level
 737 * @first_stage: Are we in first stage translation?
 738 *               Second stage is used for hypervisor guest translation
 739 * @two_stage: Are we going to perform two stage translation
 740 * @is_debug: Is this access from a debugger or the monitor?
 741 */
 742static int get_physical_address(CPURISCVState *env, hwaddr *physical,
 743                                int *prot, target_ulong addr,
 744                                target_ulong *fault_pte_addr,
 745                                int access_type, int mmu_idx,
 746                                bool first_stage, bool two_stage,
 747                                bool is_debug)
 748{
 749    /* NOTE: the env->pc value visible here will not be
 750     * correct, but the value visible to the exception handler
 751     * (riscv_cpu_do_interrupt) is correct */
 752    MemTxResult res;
 753    MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
 754    int mode = mmu_idx & TB_FLAGS_PRIV_MMU_MASK;
 755    bool use_background = false;
 756    hwaddr ppn;
 757    RISCVCPU *cpu = env_archcpu(env);
 758    int napot_bits = 0;
 759    target_ulong napot_mask;
 760
 761    /*
 762     * Check if we should use the background registers for the two
 763     * stage translation. We don't need to check if we actually need
 764     * two stage translation as that happened before this function
 765     * was called. Background registers will be used if the guest has
 766     * forced a two stage translation to be on (in HS or M mode).
 767     */
 768    if (!riscv_cpu_virt_enabled(env) && two_stage) {
 769        use_background = true;
 770    }
 771
 772    /* MPRV does not affect the virtual-machine load/store
 773       instructions, HLV, HLVX, and HSV. */
 774    if (riscv_cpu_two_stage_lookup(mmu_idx)) {
 775        mode = get_field(env->hstatus, HSTATUS_SPVP);
 776    } else if (mode == PRV_M && access_type != MMU_INST_FETCH) {
 777        if (get_field(env->mstatus, MSTATUS_MPRV)) {
 778            mode = get_field(env->mstatus, MSTATUS_MPP);
 779        }
 780    }
 781
 782    if (first_stage == false) {
 783        /* We are in stage 2 translation, this is similar to stage 1. */
 784        /* Stage 2 is always taken as U-mode */
 785        mode = PRV_U;
 786    }
 787
 788    if (mode == PRV_M || !riscv_feature(env, RISCV_FEATURE_MMU)) {
 789        *physical = addr;
 790        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
 791        return TRANSLATE_SUCCESS;
 792    }
 793
 794    *prot = 0;
 795
 796    hwaddr base;
 797    int levels, ptidxbits, ptesize, vm, sum, mxr, widened;
 798
 799    if (first_stage == true) {
 800        mxr = get_field(env->mstatus, MSTATUS_MXR);
 801    } else {
 802        mxr = get_field(env->vsstatus, MSTATUS_MXR);
 803    }
 804
 805    if (first_stage == true) {
 806        if (use_background) {
 807            if (riscv_cpu_mxl(env) == MXL_RV32) {
 808                base = (hwaddr)get_field(env->vsatp, SATP32_PPN) << PGSHIFT;
 809                vm = get_field(env->vsatp, SATP32_MODE);
 810            } else {
 811                base = (hwaddr)get_field(env->vsatp, SATP64_PPN) << PGSHIFT;
 812                vm = get_field(env->vsatp, SATP64_MODE);
 813            }
 814        } else {
 815            if (riscv_cpu_mxl(env) == MXL_RV32) {
 816                base = (hwaddr)get_field(env->satp, SATP32_PPN) << PGSHIFT;
 817                vm = get_field(env->satp, SATP32_MODE);
 818            } else {
 819                base = (hwaddr)get_field(env->satp, SATP64_PPN) << PGSHIFT;
 820                vm = get_field(env->satp, SATP64_MODE);
 821            }
 822        }
 823        widened = 0;
 824    } else {
 825        if (riscv_cpu_mxl(env) == MXL_RV32) {
 826            base = (hwaddr)get_field(env->hgatp, SATP32_PPN) << PGSHIFT;
 827            vm = get_field(env->hgatp, SATP32_MODE);
 828        } else {
 829            base = (hwaddr)get_field(env->hgatp, SATP64_PPN) << PGSHIFT;
 830            vm = get_field(env->hgatp, SATP64_MODE);
 831        }
 832        widened = 2;
 833    }
 834    /* status.SUM will be ignored if execute on background */
 835    sum = get_field(env->mstatus, MSTATUS_SUM) || use_background || is_debug;
 836    switch (vm) {
 837    case VM_1_10_SV32:
 838      levels = 2; ptidxbits = 10; ptesize = 4; break;
 839    case VM_1_10_SV39:
 840      levels = 3; ptidxbits = 9; ptesize = 8; break;
 841    case VM_1_10_SV48:
 842      levels = 4; ptidxbits = 9; ptesize = 8; break;
 843    case VM_1_10_SV57:
 844      levels = 5; ptidxbits = 9; ptesize = 8; break;
 845    case VM_1_10_MBARE:
 846        *physical = addr;
 847        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
 848        return TRANSLATE_SUCCESS;
 849    default:
 850      g_assert_not_reached();
 851    }
 852
 853    CPUState *cs = env_cpu(env);
 854    int va_bits = PGSHIFT + levels * ptidxbits + widened;
 855    target_ulong mask, masked_msbs;
 856
 857    if (TARGET_LONG_BITS > (va_bits - 1)) {
 858        mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1;
 859    } else {
 860        mask = 0;
 861    }
 862    masked_msbs = (addr >> (va_bits - 1)) & mask;
 863
 864    if (masked_msbs != 0 && masked_msbs != mask) {
 865        return TRANSLATE_FAIL;
 866    }
 867
 868    int ptshift = (levels - 1) * ptidxbits;
 869    int i;
 870
 871#if !TCG_OVERSIZED_GUEST
 872restart:
 873#endif
 874    for (i = 0; i < levels; i++, ptshift -= ptidxbits) {
 875        target_ulong idx;
 876        if (i == 0) {
 877            idx = (addr >> (PGSHIFT + ptshift)) &
 878                           ((1 << (ptidxbits + widened)) - 1);
 879        } else {
 880            idx = (addr >> (PGSHIFT + ptshift)) &
 881                           ((1 << ptidxbits) - 1);
 882        }
 883
 884        /* check that physical address of PTE is legal */
 885        hwaddr pte_addr;
 886
 887        if (two_stage && first_stage) {
 888            int vbase_prot;
 889            hwaddr vbase;
 890
 891            /* Do the second stage translation on the base PTE address. */
 892            int vbase_ret = get_physical_address(env, &vbase, &vbase_prot,
 893                                                 base, NULL, MMU_DATA_LOAD,
 894                                                 mmu_idx, false, true,
 895                                                 is_debug);
 896
 897            if (vbase_ret != TRANSLATE_SUCCESS) {
 898                if (fault_pte_addr) {
 899                    *fault_pte_addr = (base + idx * ptesize) >> 2;
 900                }
 901                return TRANSLATE_G_STAGE_FAIL;
 902            }
 903
 904            pte_addr = vbase + idx * ptesize;
 905        } else {
 906            pte_addr = base + idx * ptesize;
 907        }
 908
 909        int pmp_prot;
 910        int pmp_ret = get_physical_address_pmp(env, &pmp_prot, NULL, pte_addr,
 911                                               sizeof(target_ulong),
 912                                               MMU_DATA_LOAD, PRV_S);
 913        if (pmp_ret != TRANSLATE_SUCCESS) {
 914            return TRANSLATE_PMP_FAIL;
 915        }
 916
 917        target_ulong pte;
 918        if (riscv_cpu_mxl(env) == MXL_RV32) {
 919            pte = address_space_ldl(cs->as, pte_addr, attrs, &res);
 920        } else {
 921            pte = address_space_ldq(cs->as, pte_addr, attrs, &res);
 922        }
 923
 924        if (res != MEMTX_OK) {
 925            return TRANSLATE_FAIL;
 926        }
 927
 928        if (riscv_cpu_sxl(env) == MXL_RV32) {
 929            ppn = pte >> PTE_PPN_SHIFT;
 930        } else if (cpu->cfg.ext_svpbmt || cpu->cfg.ext_svnapot) {
 931            ppn = (pte & (target_ulong)PTE_PPN_MASK) >> PTE_PPN_SHIFT;
 932        } else {
 933            ppn = pte >> PTE_PPN_SHIFT;
 934            if ((pte & ~(target_ulong)PTE_PPN_MASK) >> PTE_PPN_SHIFT) {
 935                return TRANSLATE_FAIL;
 936            }
 937        }
 938
 939        if (!(pte & PTE_V)) {
 940            /* Invalid PTE */
 941            return TRANSLATE_FAIL;
 942        } else if (!cpu->cfg.ext_svpbmt && (pte & PTE_PBMT)) {
 943            return TRANSLATE_FAIL;
 944        } else if (!(pte & (PTE_R | PTE_W | PTE_X))) {
 945            /* Inner PTE, continue walking */
 946            if (pte & (PTE_D | PTE_A | PTE_U | PTE_ATTR)) {
 947                return TRANSLATE_FAIL;
 948            }
 949            base = ppn << PGSHIFT;
 950        } else if ((pte & (PTE_R | PTE_W | PTE_X)) == PTE_W) {
 951            /* Reserved leaf PTE flags: PTE_W */
 952            return TRANSLATE_FAIL;
 953        } else if ((pte & (PTE_R | PTE_W | PTE_X)) == (PTE_W | PTE_X)) {
 954            /* Reserved leaf PTE flags: PTE_W + PTE_X */
 955            return TRANSLATE_FAIL;
 956        } else if ((pte & PTE_U) && ((mode != PRV_U) &&
 957                   (!sum || access_type == MMU_INST_FETCH))) {
 958            /* User PTE flags when not U mode and mstatus.SUM is not set,
 959               or the access type is an instruction fetch */
 960            return TRANSLATE_FAIL;
 961        } else if (!(pte & PTE_U) && (mode != PRV_S)) {
 962            /* Supervisor PTE flags when not S mode */
 963            return TRANSLATE_FAIL;
 964        } else if (ppn & ((1ULL << ptshift) - 1)) {
 965            /* Misaligned PPN */
 966            return TRANSLATE_FAIL;
 967        } else if (access_type == MMU_DATA_LOAD && !((pte & PTE_R) ||
 968                   ((pte & PTE_X) && mxr))) {
 969            /* Read access check failed */
 970            return TRANSLATE_FAIL;
 971        } else if (access_type == MMU_DATA_STORE && !(pte & PTE_W)) {
 972            /* Write access check failed */
 973            return TRANSLATE_FAIL;
 974        } else if (access_type == MMU_INST_FETCH && !(pte & PTE_X)) {
 975            /* Fetch access check failed */
 976            return TRANSLATE_FAIL;
 977        } else {
 978            /* if necessary, set accessed and dirty bits. */
 979            target_ulong updated_pte = pte | PTE_A |
 980                (access_type == MMU_DATA_STORE ? PTE_D : 0);
 981
 982            /* Page table updates need to be atomic with MTTCG enabled */
 983            if (updated_pte != pte) {
 984                /*
 985                 * - if accessed or dirty bits need updating, and the PTE is
 986                 *   in RAM, then we do so atomically with a compare and swap.
 987                 * - if the PTE is in IO space or ROM, then it can't be updated
 988                 *   and we return TRANSLATE_FAIL.
 989                 * - if the PTE changed by the time we went to update it, then
 990                 *   it is no longer valid and we must re-walk the page table.
 991                 */
 992                MemoryRegion *mr;
 993                hwaddr l = sizeof(target_ulong), addr1;
 994                mr = address_space_translate(cs->as, pte_addr,
 995                    &addr1, &l, false, MEMTXATTRS_UNSPECIFIED);
 996                if (memory_region_is_ram(mr)) {
 997                    target_ulong *pte_pa =
 998                        qemu_map_ram_ptr(mr->ram_block, addr1);
 999#if TCG_OVERSIZED_GUEST
1000                    /* MTTCG is not enabled on oversized TCG guests so
1001                     * page table updates do not need to be atomic */
1002                    *pte_pa = pte = updated_pte;
1003#else
1004                    target_ulong old_pte =
1005                        qatomic_cmpxchg(pte_pa, pte, updated_pte);
1006                    if (old_pte != pte) {
1007                        goto restart;
1008                    } else {
1009                        pte = updated_pte;
1010                    }
1011#endif
1012                } else {
1013                    /* misconfigured PTE in ROM (AD bits are not preset) or
1014                     * PTE is in IO space and can't be updated atomically */
1015                    return TRANSLATE_FAIL;
1016                }
1017            }
1018
1019            /* for superpage mappings, make a fake leaf PTE for the TLB's
1020               benefit. */
1021            target_ulong vpn = addr >> PGSHIFT;
1022
1023            if (cpu->cfg.ext_svnapot && (pte & PTE_N)) {
1024                napot_bits = ctzl(ppn) + 1;
1025                if ((i != (levels - 1)) || (napot_bits != 4)) {
1026                    return TRANSLATE_FAIL;
1027                }
1028            }
1029
1030            napot_mask = (1 << napot_bits) - 1;
1031            *physical = (((ppn & ~napot_mask) | (vpn & napot_mask) |
1032                          (vpn & (((target_ulong)1 << ptshift) - 1))
1033                         ) << PGSHIFT) | (addr & ~TARGET_PAGE_MASK);
1034
1035            /* set permissions on the TLB entry */
1036            if ((pte & PTE_R) || ((pte & PTE_X) && mxr)) {
1037                *prot |= PAGE_READ;
1038            }
1039            if ((pte & PTE_X)) {
1040                *prot |= PAGE_EXEC;
1041            }
1042            /* add write permission on stores or if the page is already dirty,
1043               so that we TLB miss on later writes to update the dirty bit */
1044            if ((pte & PTE_W) &&
1045                    (access_type == MMU_DATA_STORE || (pte & PTE_D))) {
1046                *prot |= PAGE_WRITE;
1047            }
1048            return TRANSLATE_SUCCESS;
1049        }
1050    }
1051    return TRANSLATE_FAIL;
1052}
1053
1054static void raise_mmu_exception(CPURISCVState *env, target_ulong address,
1055                                MMUAccessType access_type, bool pmp_violation,
1056                                bool first_stage, bool two_stage)
1057{
1058    CPUState *cs = env_cpu(env);
1059    int page_fault_exceptions, vm;
1060    uint64_t stap_mode;
1061
1062    if (riscv_cpu_mxl(env) == MXL_RV32) {
1063        stap_mode = SATP32_MODE;
1064    } else {
1065        stap_mode = SATP64_MODE;
1066    }
1067
1068    if (first_stage) {
1069        vm = get_field(env->satp, stap_mode);
1070    } else {
1071        vm = get_field(env->hgatp, stap_mode);
1072    }
1073
1074    page_fault_exceptions = vm != VM_1_10_MBARE && !pmp_violation;
1075
1076    switch (access_type) {
1077    case MMU_INST_FETCH:
1078        if (riscv_cpu_virt_enabled(env) && !first_stage) {
1079            cs->exception_index = RISCV_EXCP_INST_GUEST_PAGE_FAULT;
1080        } else {
1081            cs->exception_index = page_fault_exceptions ?
1082                RISCV_EXCP_INST_PAGE_FAULT : RISCV_EXCP_INST_ACCESS_FAULT;
1083        }
1084        break;
1085    case MMU_DATA_LOAD:
1086        if (two_stage && !first_stage) {
1087            cs->exception_index = RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT;
1088        } else {
1089            cs->exception_index = page_fault_exceptions ?
1090                RISCV_EXCP_LOAD_PAGE_FAULT : RISCV_EXCP_LOAD_ACCESS_FAULT;
1091        }
1092        break;
1093    case MMU_DATA_STORE:
1094        if (two_stage && !first_stage) {
1095            cs->exception_index = RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT;
1096        } else {
1097            cs->exception_index = page_fault_exceptions ?
1098                RISCV_EXCP_STORE_PAGE_FAULT : RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
1099        }
1100        break;
1101    default:
1102        g_assert_not_reached();
1103    }
1104    env->badaddr = address;
1105    env->two_stage_lookup = two_stage;
1106}
1107
1108hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
1109{
1110    RISCVCPU *cpu = RISCV_CPU(cs);
1111    CPURISCVState *env = &cpu->env;
1112    hwaddr phys_addr;
1113    int prot;
1114    int mmu_idx = cpu_mmu_index(&cpu->env, false);
1115
1116    if (get_physical_address(env, &phys_addr, &prot, addr, NULL, 0, mmu_idx,
1117                             true, riscv_cpu_virt_enabled(env), true)) {
1118        return -1;
1119    }
1120
1121    if (riscv_cpu_virt_enabled(env)) {
1122        if (get_physical_address(env, &phys_addr, &prot, phys_addr, NULL,
1123                                 0, mmu_idx, false, true, true)) {
1124            return -1;
1125        }
1126    }
1127
1128    return phys_addr & TARGET_PAGE_MASK;
1129}
1130
1131void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
1132                                     vaddr addr, unsigned size,
1133                                     MMUAccessType access_type,
1134                                     int mmu_idx, MemTxAttrs attrs,
1135                                     MemTxResult response, uintptr_t retaddr)
1136{
1137    RISCVCPU *cpu = RISCV_CPU(cs);
1138    CPURISCVState *env = &cpu->env;
1139
1140    if (access_type == MMU_DATA_STORE) {
1141        cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
1142    } else if (access_type == MMU_DATA_LOAD) {
1143        cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
1144    } else {
1145        cs->exception_index = RISCV_EXCP_INST_ACCESS_FAULT;
1146    }
1147
1148    env->badaddr = addr;
1149    env->two_stage_lookup = riscv_cpu_virt_enabled(env) ||
1150                            riscv_cpu_two_stage_lookup(mmu_idx);
1151    cpu_loop_exit_restore(cs, retaddr);
1152}
1153
1154void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
1155                                   MMUAccessType access_type, int mmu_idx,
1156                                   uintptr_t retaddr)
1157{
1158    RISCVCPU *cpu = RISCV_CPU(cs);
1159    CPURISCVState *env = &cpu->env;
1160    switch (access_type) {
1161    case MMU_INST_FETCH:
1162        cs->exception_index = RISCV_EXCP_INST_ADDR_MIS;
1163        break;
1164    case MMU_DATA_LOAD:
1165        cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS;
1166        break;
1167    case MMU_DATA_STORE:
1168        cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS;
1169        break;
1170    default:
1171        g_assert_not_reached();
1172    }
1173    env->badaddr = addr;
1174    env->two_stage_lookup = riscv_cpu_virt_enabled(env) ||
1175                            riscv_cpu_two_stage_lookup(mmu_idx);
1176    cpu_loop_exit_restore(cs, retaddr);
1177}
1178
1179bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
1180                        MMUAccessType access_type, int mmu_idx,
1181                        bool probe, uintptr_t retaddr)
1182{
1183    RISCVCPU *cpu = RISCV_CPU(cs);
1184    CPURISCVState *env = &cpu->env;
1185    vaddr im_address;
1186    hwaddr pa = 0;
1187    int prot, prot2, prot_pmp;
1188    bool pmp_violation = false;
1189    bool first_stage_error = true;
1190    bool two_stage_lookup = false;
1191    int ret = TRANSLATE_FAIL;
1192    int mode = mmu_idx;
1193    /* default TLB page size */
1194    target_ulong tlb_size = TARGET_PAGE_SIZE;
1195
1196    env->guest_phys_fault_addr = 0;
1197
1198    qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n",
1199                  __func__, address, access_type, mmu_idx);
1200
1201    /* MPRV does not affect the virtual-machine load/store
1202       instructions, HLV, HLVX, and HSV. */
1203    if (riscv_cpu_two_stage_lookup(mmu_idx)) {
1204        mode = get_field(env->hstatus, HSTATUS_SPVP);
1205    } else if (mode == PRV_M && access_type != MMU_INST_FETCH &&
1206               get_field(env->mstatus, MSTATUS_MPRV)) {
1207        mode = get_field(env->mstatus, MSTATUS_MPP);
1208        if (riscv_has_ext(env, RVH) && get_field(env->mstatus, MSTATUS_MPV)) {
1209            two_stage_lookup = true;
1210        }
1211    }
1212
1213    if (riscv_cpu_virt_enabled(env) ||
1214        ((riscv_cpu_two_stage_lookup(mmu_idx) || two_stage_lookup) &&
1215         access_type != MMU_INST_FETCH)) {
1216        /* Two stage lookup */
1217        ret = get_physical_address(env, &pa, &prot, address,
1218                                   &env->guest_phys_fault_addr, access_type,
1219                                   mmu_idx, true, true, false);
1220
1221        /*
1222         * A G-stage exception may be triggered during two state lookup.
1223         * And the env->guest_phys_fault_addr has already been set in
1224         * get_physical_address().
1225         */
1226        if (ret == TRANSLATE_G_STAGE_FAIL) {
1227            first_stage_error = false;
1228            access_type = MMU_DATA_LOAD;
1229        }
1230
1231        qemu_log_mask(CPU_LOG_MMU,
1232                      "%s 1st-stage address=%" VADDR_PRIx " ret %d physical "
1233                      TARGET_FMT_plx " prot %d\n",
1234                      __func__, address, ret, pa, prot);
1235
1236        if (ret == TRANSLATE_SUCCESS) {
1237            /* Second stage lookup */
1238            im_address = pa;
1239
1240            ret = get_physical_address(env, &pa, &prot2, im_address, NULL,
1241                                       access_type, mmu_idx, false, true,
1242                                       false);
1243
1244            qemu_log_mask(CPU_LOG_MMU,
1245                    "%s 2nd-stage address=%" VADDR_PRIx " ret %d physical "
1246                    TARGET_FMT_plx " prot %d\n",
1247                    __func__, im_address, ret, pa, prot2);
1248
1249            prot &= prot2;
1250
1251            if (ret == TRANSLATE_SUCCESS) {
1252                ret = get_physical_address_pmp(env, &prot_pmp, &tlb_size, pa,
1253                                               size, access_type, mode);
1254
1255                qemu_log_mask(CPU_LOG_MMU,
1256                              "%s PMP address=" TARGET_FMT_plx " ret %d prot"
1257                              " %d tlb_size " TARGET_FMT_lu "\n",
1258                              __func__, pa, ret, prot_pmp, tlb_size);
1259
1260                prot &= prot_pmp;
1261            }
1262
1263            if (ret != TRANSLATE_SUCCESS) {
1264                /*
1265                 * Guest physical address translation failed, this is a HS
1266                 * level exception
1267                 */
1268                first_stage_error = false;
1269                env->guest_phys_fault_addr = (im_address |
1270                                              (address &
1271                                               (TARGET_PAGE_SIZE - 1))) >> 2;
1272            }
1273        }
1274    } else {
1275        /* Single stage lookup */
1276        ret = get_physical_address(env, &pa, &prot, address, NULL,
1277                                   access_type, mmu_idx, true, false, false);
1278
1279        qemu_log_mask(CPU_LOG_MMU,
1280                      "%s address=%" VADDR_PRIx " ret %d physical "
1281                      TARGET_FMT_plx " prot %d\n",
1282                      __func__, address, ret, pa, prot);
1283
1284        if (ret == TRANSLATE_SUCCESS) {
1285            ret = get_physical_address_pmp(env, &prot_pmp, &tlb_size, pa,
1286                                           size, access_type, mode);
1287
1288            qemu_log_mask(CPU_LOG_MMU,
1289                          "%s PMP address=" TARGET_FMT_plx " ret %d prot"
1290                          " %d tlb_size " TARGET_FMT_lu "\n",
1291                          __func__, pa, ret, prot_pmp, tlb_size);
1292
1293            prot &= prot_pmp;
1294        }
1295    }
1296
1297    if (ret == TRANSLATE_PMP_FAIL) {
1298        pmp_violation = true;
1299    }
1300
1301    if (ret == TRANSLATE_SUCCESS) {
1302        tlb_set_page(cs, address & ~(tlb_size - 1), pa & ~(tlb_size - 1),
1303                     prot, mmu_idx, tlb_size);
1304        return true;
1305    } else if (probe) {
1306        return false;
1307    } else {
1308        raise_mmu_exception(env, address, access_type, pmp_violation,
1309                            first_stage_error,
1310                            riscv_cpu_virt_enabled(env) ||
1311                                riscv_cpu_two_stage_lookup(mmu_idx));
1312        cpu_loop_exit_restore(cs, retaddr);
1313    }
1314
1315    return true;
1316}
1317#endif /* !CONFIG_USER_ONLY */
1318
1319/*
1320 * Handle Traps
1321 *
1322 * Adapted from Spike's processor_t::take_trap.
1323 *
1324 */
1325void riscv_cpu_do_interrupt(CPUState *cs)
1326{
1327#if !defined(CONFIG_USER_ONLY)
1328
1329    RISCVCPU *cpu = RISCV_CPU(cs);
1330    CPURISCVState *env = &cpu->env;
1331    bool write_gva = false;
1332    uint64_t s;
1333
1334    /* cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide
1335     * so we mask off the MSB and separate into trap type and cause.
1336     */
1337    bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG);
1338    target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK;
1339    uint64_t deleg = async ? env->mideleg : env->medeleg;
1340    target_ulong tval = 0;
1341    target_ulong htval = 0;
1342    target_ulong mtval2 = 0;
1343
1344    if  (cause == RISCV_EXCP_SEMIHOST) {
1345        if (env->priv >= PRV_S) {
1346            do_common_semihosting(cs);
1347            env->pc += 4;
1348            return;
1349        }
1350        cause = RISCV_EXCP_BREAKPOINT;
1351    }
1352
1353    if (!async) {
1354        /* set tval to badaddr for traps with address information */
1355        switch (cause) {
1356        case RISCV_EXCP_INST_GUEST_PAGE_FAULT:
1357        case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT:
1358        case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT:
1359        case RISCV_EXCP_INST_ADDR_MIS:
1360        case RISCV_EXCP_INST_ACCESS_FAULT:
1361        case RISCV_EXCP_LOAD_ADDR_MIS:
1362        case RISCV_EXCP_STORE_AMO_ADDR_MIS:
1363        case RISCV_EXCP_LOAD_ACCESS_FAULT:
1364        case RISCV_EXCP_STORE_AMO_ACCESS_FAULT:
1365        case RISCV_EXCP_INST_PAGE_FAULT:
1366        case RISCV_EXCP_LOAD_PAGE_FAULT:
1367        case RISCV_EXCP_STORE_PAGE_FAULT:
1368            write_gva = env->two_stage_lookup;
1369            tval = env->badaddr;
1370            break;
1371        case RISCV_EXCP_ILLEGAL_INST:
1372        case RISCV_EXCP_VIRT_INSTRUCTION_FAULT:
1373            tval = env->bins;
1374            break;
1375        default:
1376            break;
1377        }
1378        /* ecall is dispatched as one cause so translate based on mode */
1379        if (cause == RISCV_EXCP_U_ECALL) {
1380            assert(env->priv <= 3);
1381
1382            if (env->priv == PRV_M) {
1383                cause = RISCV_EXCP_M_ECALL;
1384            } else if (env->priv == PRV_S && riscv_cpu_virt_enabled(env)) {
1385                cause = RISCV_EXCP_VS_ECALL;
1386            } else if (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) {
1387                cause = RISCV_EXCP_S_ECALL;
1388            } else if (env->priv == PRV_U) {
1389                cause = RISCV_EXCP_U_ECALL;
1390            }
1391        }
1392    }
1393
1394    trace_riscv_trap(env->mhartid, async, cause, env->pc, tval,
1395                     riscv_cpu_get_trap_name(cause, async));
1396
1397    qemu_log_mask(CPU_LOG_INT,
1398                  "%s: hart:"TARGET_FMT_ld", async:%d, cause:"TARGET_FMT_lx", "
1399                  "epc:0x"TARGET_FMT_lx", tval:0x"TARGET_FMT_lx", desc=%s\n",
1400                  __func__, env->mhartid, async, cause, env->pc, tval,
1401                  riscv_cpu_get_trap_name(cause, async));
1402
1403    if (env->priv <= PRV_S &&
1404            cause < TARGET_LONG_BITS && ((deleg >> cause) & 1)) {
1405        /* handle the trap in S-mode */
1406        if (riscv_has_ext(env, RVH)) {
1407            uint64_t hdeleg = async ? env->hideleg : env->hedeleg;
1408
1409            if (riscv_cpu_virt_enabled(env) && ((hdeleg >> cause) & 1)) {
1410                /* Trap to VS mode */
1411                /*
1412                 * See if we need to adjust cause. Yes if its VS mode interrupt
1413                 * no if hypervisor has delegated one of hs mode's interrupt
1414                 */
1415                if (cause == IRQ_VS_TIMER || cause == IRQ_VS_SOFT ||
1416                    cause == IRQ_VS_EXT) {
1417                    cause = cause - 1;
1418                }
1419                write_gva = false;
1420            } else if (riscv_cpu_virt_enabled(env)) {
1421                /* Trap into HS mode, from virt */
1422                riscv_cpu_swap_hypervisor_regs(env);
1423                env->hstatus = set_field(env->hstatus, HSTATUS_SPVP,
1424                                         env->priv);
1425                env->hstatus = set_field(env->hstatus, HSTATUS_SPV,
1426                                         riscv_cpu_virt_enabled(env));
1427
1428
1429                htval = env->guest_phys_fault_addr;
1430
1431                riscv_cpu_set_virt_enabled(env, 0);
1432            } else {
1433                /* Trap into HS mode */
1434                env->hstatus = set_field(env->hstatus, HSTATUS_SPV, false);
1435                htval = env->guest_phys_fault_addr;
1436            }
1437            env->hstatus = set_field(env->hstatus, HSTATUS_GVA, write_gva);
1438        }
1439
1440        s = env->mstatus;
1441        s = set_field(s, MSTATUS_SPIE, get_field(s, MSTATUS_SIE));
1442        s = set_field(s, MSTATUS_SPP, env->priv);
1443        s = set_field(s, MSTATUS_SIE, 0);
1444        env->mstatus = s;
1445        env->scause = cause | ((target_ulong)async << (TARGET_LONG_BITS - 1));
1446        env->sepc = env->pc;
1447        env->stval = tval;
1448        env->htval = htval;
1449        env->pc = (env->stvec >> 2 << 2) +
1450            ((async && (env->stvec & 3) == 1) ? cause * 4 : 0);
1451        riscv_cpu_set_mode(env, PRV_S);
1452    } else {
1453        /* handle the trap in M-mode */
1454        if (riscv_has_ext(env, RVH)) {
1455            if (riscv_cpu_virt_enabled(env)) {
1456                riscv_cpu_swap_hypervisor_regs(env);
1457            }
1458            env->mstatus = set_field(env->mstatus, MSTATUS_MPV,
1459                                     riscv_cpu_virt_enabled(env));
1460            if (riscv_cpu_virt_enabled(env) && tval) {
1461                env->mstatus = set_field(env->mstatus, MSTATUS_GVA, 1);
1462            }
1463
1464            mtval2 = env->guest_phys_fault_addr;
1465
1466            /* Trapping to M mode, virt is disabled */
1467            riscv_cpu_set_virt_enabled(env, 0);
1468        }
1469
1470        s = env->mstatus;
1471        s = set_field(s, MSTATUS_MPIE, get_field(s, MSTATUS_MIE));
1472        s = set_field(s, MSTATUS_MPP, env->priv);
1473        s = set_field(s, MSTATUS_MIE, 0);
1474        env->mstatus = s;
1475        env->mcause = cause | ~(((target_ulong)-1) >> async);
1476        env->mepc = env->pc;
1477        env->mtval = tval;
1478        env->mtval2 = mtval2;
1479        env->pc = (env->mtvec >> 2 << 2) +
1480            ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0);
1481        riscv_cpu_set_mode(env, PRV_M);
1482    }
1483
1484    /* NOTE: it is not necessary to yield load reservations here. It is only
1485     * necessary for an SC from "another hart" to cause a load reservation
1486     * to be yielded. Refer to the memory consistency model section of the
1487     * RISC-V ISA Specification.
1488     */
1489
1490    env->two_stage_lookup = false;
1491#endif
1492    cs->exception_index = RISCV_EXCP_NONE; /* mark handled to qemu */
1493}
1494