qemu/target/riscv/cpu_helper.c
<<
>>
Prefs
   1/*
   2 * RISC-V CPU helpers for qemu.
   3 *
   4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
   5 * Copyright (c) 2017-2018 SiFive, Inc.
   6 *
   7 * This program is free software; you can redistribute it and/or modify it
   8 * under the terms and conditions of the GNU General Public License,
   9 * version 2 or later, as published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope it will be useful, but WITHOUT
  12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  14 * more details.
  15 *
  16 * You should have received a copy of the GNU General Public License along with
  17 * this program.  If not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#include "qemu/osdep.h"
  21#include "qemu/log.h"
  22#include "qemu/main-loop.h"
  23#include "cpu.h"
  24#include "pmu.h"
  25#include "exec/exec-all.h"
  26#include "instmap.h"
  27#include "tcg/tcg-op.h"
  28#include "trace.h"
  29#include "semihosting/common-semi.h"
  30#include "cpu_bits.h"
  31
  32int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch)
  33{
  34#ifdef CONFIG_USER_ONLY
  35    return 0;
  36#else
  37    return env->priv;
  38#endif
  39}
  40
  41void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
  42                          target_ulong *cs_base, uint32_t *pflags)
  43{
  44    CPUState *cs = env_cpu(env);
  45    RISCVCPU *cpu = RISCV_CPU(cs);
  46
  47    uint32_t flags = 0;
  48
  49    *pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc;
  50    *cs_base = 0;
  51
  52    if (riscv_has_ext(env, RVV) || cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) {
  53        /*
  54         * If env->vl equals to VLMAX, we can use generic vector operation
  55         * expanders (GVEC) to accerlate the vector operations.
  56         * However, as LMUL could be a fractional number. The maximum
  57         * vector size can be operated might be less than 8 bytes,
  58         * which is not supported by GVEC. So we set vl_eq_vlmax flag to true
  59         * only when maxsz >= 8 bytes.
  60         */
  61        uint32_t vlmax = vext_get_vlmax(env_archcpu(env), env->vtype);
  62        uint32_t sew = FIELD_EX64(env->vtype, VTYPE, VSEW);
  63        uint32_t maxsz = vlmax << sew;
  64        bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) &&
  65                           (maxsz >= 8);
  66        flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill);
  67        flags = FIELD_DP32(flags, TB_FLAGS, SEW, sew);
  68        flags = FIELD_DP32(flags, TB_FLAGS, LMUL,
  69                    FIELD_EX64(env->vtype, VTYPE, VLMUL));
  70        flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax);
  71        flags = FIELD_DP32(flags, TB_FLAGS, VTA,
  72                    FIELD_EX64(env->vtype, VTYPE, VTA));
  73        flags = FIELD_DP32(flags, TB_FLAGS, VMA,
  74                    FIELD_EX64(env->vtype, VTYPE, VMA));
  75    } else {
  76        flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1);
  77    }
  78
  79#ifdef CONFIG_USER_ONLY
  80    flags |= TB_FLAGS_MSTATUS_FS;
  81    flags |= TB_FLAGS_MSTATUS_VS;
  82#else
  83    flags |= cpu_mmu_index(env, 0);
  84    if (riscv_cpu_fp_enabled(env)) {
  85        flags |= env->mstatus & MSTATUS_FS;
  86    }
  87
  88    if (riscv_cpu_vector_enabled(env)) {
  89        flags |= env->mstatus & MSTATUS_VS;
  90    }
  91
  92    if (riscv_has_ext(env, RVH)) {
  93        if (env->priv == PRV_M ||
  94            (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) ||
  95            (env->priv == PRV_U && !riscv_cpu_virt_enabled(env) &&
  96                get_field(env->hstatus, HSTATUS_HU))) {
  97            flags = FIELD_DP32(flags, TB_FLAGS, HLSX, 1);
  98        }
  99
 100        flags = FIELD_DP32(flags, TB_FLAGS, MSTATUS_HS_FS,
 101                           get_field(env->mstatus_hs, MSTATUS_FS));
 102
 103        flags = FIELD_DP32(flags, TB_FLAGS, MSTATUS_HS_VS,
 104                           get_field(env->mstatus_hs, MSTATUS_VS));
 105    }
 106#endif
 107
 108    flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl);
 109    if (env->cur_pmmask < (env->xl == MXL_RV32 ? UINT32_MAX : UINT64_MAX)) {
 110        flags = FIELD_DP32(flags, TB_FLAGS, PM_MASK_ENABLED, 1);
 111    }
 112    if (env->cur_pmbase != 0) {
 113        flags = FIELD_DP32(flags, TB_FLAGS, PM_BASE_ENABLED, 1);
 114    }
 115
 116    *pflags = flags;
 117}
 118
 119void riscv_cpu_update_mask(CPURISCVState *env)
 120{
 121    target_ulong mask = -1, base = 0;
 122    /*
 123     * TODO: Current RVJ spec does not specify
 124     * how the extension interacts with XLEN.
 125     */
 126#ifndef CONFIG_USER_ONLY
 127    if (riscv_has_ext(env, RVJ)) {
 128        switch (env->priv) {
 129        case PRV_M:
 130            if (env->mmte & M_PM_ENABLE) {
 131                mask = env->mpmmask;
 132                base = env->mpmbase;
 133            }
 134            break;
 135        case PRV_S:
 136            if (env->mmte & S_PM_ENABLE) {
 137                mask = env->spmmask;
 138                base = env->spmbase;
 139            }
 140            break;
 141        case PRV_U:
 142            if (env->mmte & U_PM_ENABLE) {
 143                mask = env->upmmask;
 144                base = env->upmbase;
 145            }
 146            break;
 147        default:
 148            g_assert_not_reached();
 149        }
 150    }
 151#endif
 152    if (env->xl == MXL_RV32) {
 153        env->cur_pmmask = mask & UINT32_MAX;
 154        env->cur_pmbase = base & UINT32_MAX;
 155    } else {
 156        env->cur_pmmask = mask;
 157        env->cur_pmbase = base;
 158    }
 159}
 160
 161#ifndef CONFIG_USER_ONLY
 162
 163/*
 164 * The HS-mode is allowed to configure priority only for the
 165 * following VS-mode local interrupts:
 166 *
 167 * 0  (Reserved interrupt, reads as zero)
 168 * 1  Supervisor software interrupt
 169 * 4  (Reserved interrupt, reads as zero)
 170 * 5  Supervisor timer interrupt
 171 * 8  (Reserved interrupt, reads as zero)
 172 * 13 (Reserved interrupt)
 173 * 14 "
 174 * 15 "
 175 * 16 "
 176 * 17 "
 177 * 18 "
 178 * 19 "
 179 * 20 "
 180 * 21 "
 181 * 22 "
 182 * 23 "
 183 */
 184
 185static const int hviprio_index2irq[] = {
 186    0, 1, 4, 5, 8, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 };
 187static const int hviprio_index2rdzero[] = {
 188    1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
 189
 190int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero)
 191{
 192    if (index < 0 || ARRAY_SIZE(hviprio_index2irq) <= index) {
 193        return -EINVAL;
 194    }
 195
 196    if (out_irq) {
 197        *out_irq = hviprio_index2irq[index];
 198    }
 199
 200    if (out_rdzero) {
 201        *out_rdzero = hviprio_index2rdzero[index];
 202    }
 203
 204    return 0;
 205}
 206
 207/*
 208 * Default priorities of local interrupts are defined in the
 209 * RISC-V Advanced Interrupt Architecture specification.
 210 *
 211 * ----------------------------------------------------------------
 212 *  Default  |
 213 *  Priority | Major Interrupt Numbers
 214 * ----------------------------------------------------------------
 215 *  Highest  | 47, 23, 46, 45, 22, 44,
 216 *           | 43, 21, 42, 41, 20, 40
 217 *           |
 218 *           | 11 (0b),  3 (03),  7 (07)
 219 *           |  9 (09),  1 (01),  5 (05)
 220 *           | 12 (0c)
 221 *           | 10 (0a),  2 (02),  6 (06)
 222 *           |
 223 *           | 39, 19, 38, 37, 18, 36,
 224 *  Lowest   | 35, 17, 34, 33, 16, 32
 225 * ----------------------------------------------------------------
 226 */
 227static const uint8_t default_iprio[64] = {
 228 /* Custom interrupts 48 to 63 */
 229 [63] = IPRIO_MMAXIPRIO,
 230 [62] = IPRIO_MMAXIPRIO,
 231 [61] = IPRIO_MMAXIPRIO,
 232 [60] = IPRIO_MMAXIPRIO,
 233 [59] = IPRIO_MMAXIPRIO,
 234 [58] = IPRIO_MMAXIPRIO,
 235 [57] = IPRIO_MMAXIPRIO,
 236 [56] = IPRIO_MMAXIPRIO,
 237 [55] = IPRIO_MMAXIPRIO,
 238 [54] = IPRIO_MMAXIPRIO,
 239 [53] = IPRIO_MMAXIPRIO,
 240 [52] = IPRIO_MMAXIPRIO,
 241 [51] = IPRIO_MMAXIPRIO,
 242 [50] = IPRIO_MMAXIPRIO,
 243 [49] = IPRIO_MMAXIPRIO,
 244 [48] = IPRIO_MMAXIPRIO,
 245
 246 /* Custom interrupts 24 to 31 */
 247 [31] = IPRIO_MMAXIPRIO,
 248 [30] = IPRIO_MMAXIPRIO,
 249 [29] = IPRIO_MMAXIPRIO,
 250 [28] = IPRIO_MMAXIPRIO,
 251 [27] = IPRIO_MMAXIPRIO,
 252 [26] = IPRIO_MMAXIPRIO,
 253 [25] = IPRIO_MMAXIPRIO,
 254 [24] = IPRIO_MMAXIPRIO,
 255
 256 [47] = IPRIO_DEFAULT_UPPER,
 257 [23] = IPRIO_DEFAULT_UPPER + 1,
 258 [46] = IPRIO_DEFAULT_UPPER + 2,
 259 [45] = IPRIO_DEFAULT_UPPER + 3,
 260 [22] = IPRIO_DEFAULT_UPPER + 4,
 261 [44] = IPRIO_DEFAULT_UPPER + 5,
 262
 263 [43] = IPRIO_DEFAULT_UPPER + 6,
 264 [21] = IPRIO_DEFAULT_UPPER + 7,
 265 [42] = IPRIO_DEFAULT_UPPER + 8,
 266 [41] = IPRIO_DEFAULT_UPPER + 9,
 267 [20] = IPRIO_DEFAULT_UPPER + 10,
 268 [40] = IPRIO_DEFAULT_UPPER + 11,
 269
 270 [11] = IPRIO_DEFAULT_M,
 271 [3]  = IPRIO_DEFAULT_M + 1,
 272 [7]  = IPRIO_DEFAULT_M + 2,
 273
 274 [9]  = IPRIO_DEFAULT_S,
 275 [1]  = IPRIO_DEFAULT_S + 1,
 276 [5]  = IPRIO_DEFAULT_S + 2,
 277
 278 [12] = IPRIO_DEFAULT_SGEXT,
 279
 280 [10] = IPRIO_DEFAULT_VS,
 281 [2]  = IPRIO_DEFAULT_VS + 1,
 282 [6]  = IPRIO_DEFAULT_VS + 2,
 283
 284 [39] = IPRIO_DEFAULT_LOWER,
 285 [19] = IPRIO_DEFAULT_LOWER + 1,
 286 [38] = IPRIO_DEFAULT_LOWER + 2,
 287 [37] = IPRIO_DEFAULT_LOWER + 3,
 288 [18] = IPRIO_DEFAULT_LOWER + 4,
 289 [36] = IPRIO_DEFAULT_LOWER + 5,
 290
 291 [35] = IPRIO_DEFAULT_LOWER + 6,
 292 [17] = IPRIO_DEFAULT_LOWER + 7,
 293 [34] = IPRIO_DEFAULT_LOWER + 8,
 294 [33] = IPRIO_DEFAULT_LOWER + 9,
 295 [16] = IPRIO_DEFAULT_LOWER + 10,
 296 [32] = IPRIO_DEFAULT_LOWER + 11,
 297};
 298
 299uint8_t riscv_cpu_default_priority(int irq)
 300{
 301    if (irq < 0 || irq > 63) {
 302        return IPRIO_MMAXIPRIO;
 303    }
 304
 305    return default_iprio[irq] ? default_iprio[irq] : IPRIO_MMAXIPRIO;
 306};
 307
 308static int riscv_cpu_pending_to_irq(CPURISCVState *env,
 309                                    int extirq, unsigned int extirq_def_prio,
 310                                    uint64_t pending, uint8_t *iprio)
 311{
 312    RISCVCPU *cpu = env_archcpu(env);
 313    int irq, best_irq = RISCV_EXCP_NONE;
 314    unsigned int prio, best_prio = UINT_MAX;
 315
 316    if (!pending) {
 317        return RISCV_EXCP_NONE;
 318    }
 319
 320    irq = ctz64(pending);
 321    if (!((extirq == IRQ_M_EXT) ? cpu->cfg.ext_smaia : cpu->cfg.ext_ssaia)) {
 322        return irq;
 323    }
 324
 325    pending = pending >> irq;
 326    while (pending) {
 327        prio = iprio[irq];
 328        if (!prio) {
 329            if (irq == extirq) {
 330                prio = extirq_def_prio;
 331            } else {
 332                prio = (riscv_cpu_default_priority(irq) < extirq_def_prio) ?
 333                       1 : IPRIO_MMAXIPRIO;
 334            }
 335        }
 336        if ((pending & 0x1) && (prio <= best_prio)) {
 337            best_irq = irq;
 338            best_prio = prio;
 339        }
 340        irq++;
 341        pending = pending >> 1;
 342    }
 343
 344    return best_irq;
 345}
 346
 347uint64_t riscv_cpu_all_pending(CPURISCVState *env)
 348{
 349    uint32_t gein = get_field(env->hstatus, HSTATUS_VGEIN);
 350    uint64_t vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
 351    uint64_t vstip = (env->vstime_irq) ? MIP_VSTIP : 0;
 352
 353    return (env->mip | vsgein | vstip) & env->mie;
 354}
 355
 356int riscv_cpu_mirq_pending(CPURISCVState *env)
 357{
 358    uint64_t irqs = riscv_cpu_all_pending(env) & ~env->mideleg &
 359                    ~(MIP_SGEIP | MIP_VSSIP | MIP_VSTIP | MIP_VSEIP);
 360
 361    return riscv_cpu_pending_to_irq(env, IRQ_M_EXT, IPRIO_DEFAULT_M,
 362                                    irqs, env->miprio);
 363}
 364
 365int riscv_cpu_sirq_pending(CPURISCVState *env)
 366{
 367    uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg &
 368                    ~(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP);
 369
 370    return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
 371                                    irqs, env->siprio);
 372}
 373
 374int riscv_cpu_vsirq_pending(CPURISCVState *env)
 375{
 376    uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg &
 377                    (MIP_VSSIP | MIP_VSTIP | MIP_VSEIP);
 378
 379    return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
 380                                    irqs >> 1, env->hviprio);
 381}
 382
 383static int riscv_cpu_local_irq_pending(CPURISCVState *env)
 384{
 385    int virq;
 386    uint64_t irqs, pending, mie, hsie, vsie;
 387
 388    /* Determine interrupt enable state of all privilege modes */
 389    if (riscv_cpu_virt_enabled(env)) {
 390        mie = 1;
 391        hsie = 1;
 392        vsie = (env->priv < PRV_S) ||
 393               (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_SIE));
 394    } else {
 395        mie = (env->priv < PRV_M) ||
 396              (env->priv == PRV_M && get_field(env->mstatus, MSTATUS_MIE));
 397        hsie = (env->priv < PRV_S) ||
 398               (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_SIE));
 399        vsie = 0;
 400    }
 401
 402    /* Determine all pending interrupts */
 403    pending = riscv_cpu_all_pending(env);
 404
 405    /* Check M-mode interrupts */
 406    irqs = pending & ~env->mideleg & -mie;
 407    if (irqs) {
 408        return riscv_cpu_pending_to_irq(env, IRQ_M_EXT, IPRIO_DEFAULT_M,
 409                                        irqs, env->miprio);
 410    }
 411
 412    /* Check HS-mode interrupts */
 413    irqs = pending & env->mideleg & ~env->hideleg & -hsie;
 414    if (irqs) {
 415        return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
 416                                        irqs, env->siprio);
 417    }
 418
 419    /* Check VS-mode interrupts */
 420    irqs = pending & env->mideleg & env->hideleg & -vsie;
 421    if (irqs) {
 422        virq = riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
 423                                        irqs >> 1, env->hviprio);
 424        return (virq <= 0) ? virq : virq + 1;
 425    }
 426
 427    /* Indicate no pending interrupt */
 428    return RISCV_EXCP_NONE;
 429}
 430
 431bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
 432{
 433    if (interrupt_request & CPU_INTERRUPT_HARD) {
 434        RISCVCPU *cpu = RISCV_CPU(cs);
 435        CPURISCVState *env = &cpu->env;
 436        int interruptno = riscv_cpu_local_irq_pending(env);
 437        if (interruptno >= 0) {
 438            cs->exception_index = RISCV_EXCP_INT_FLAG | interruptno;
 439            riscv_cpu_do_interrupt(cs);
 440            return true;
 441        }
 442    }
 443    return false;
 444}
 445
 446/* Return true is floating point support is currently enabled */
 447bool riscv_cpu_fp_enabled(CPURISCVState *env)
 448{
 449    if (env->mstatus & MSTATUS_FS) {
 450        if (riscv_cpu_virt_enabled(env) && !(env->mstatus_hs & MSTATUS_FS)) {
 451            return false;
 452        }
 453        return true;
 454    }
 455
 456    return false;
 457}
 458
 459/* Return true is vector support is currently enabled */
 460bool riscv_cpu_vector_enabled(CPURISCVState *env)
 461{
 462    if (env->mstatus & MSTATUS_VS) {
 463        if (riscv_cpu_virt_enabled(env) && !(env->mstatus_hs & MSTATUS_VS)) {
 464            return false;
 465        }
 466        return true;
 467    }
 468
 469    return false;
 470}
 471
 472void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env)
 473{
 474    uint64_t mstatus_mask = MSTATUS_MXR | MSTATUS_SUM |
 475                            MSTATUS_SPP | MSTATUS_SPIE | MSTATUS_SIE |
 476                            MSTATUS64_UXL | MSTATUS_VS;
 477
 478    if (riscv_has_ext(env, RVF)) {
 479        mstatus_mask |= MSTATUS_FS;
 480    }
 481    bool current_virt = riscv_cpu_virt_enabled(env);
 482
 483    g_assert(riscv_has_ext(env, RVH));
 484
 485    if (current_virt) {
 486        /* Current V=1 and we are about to change to V=0 */
 487        env->vsstatus = env->mstatus & mstatus_mask;
 488        env->mstatus &= ~mstatus_mask;
 489        env->mstatus |= env->mstatus_hs;
 490
 491        env->vstvec = env->stvec;
 492        env->stvec = env->stvec_hs;
 493
 494        env->vsscratch = env->sscratch;
 495        env->sscratch = env->sscratch_hs;
 496
 497        env->vsepc = env->sepc;
 498        env->sepc = env->sepc_hs;
 499
 500        env->vscause = env->scause;
 501        env->scause = env->scause_hs;
 502
 503        env->vstval = env->stval;
 504        env->stval = env->stval_hs;
 505
 506        env->vsatp = env->satp;
 507        env->satp = env->satp_hs;
 508    } else {
 509        /* Current V=0 and we are about to change to V=1 */
 510        env->mstatus_hs = env->mstatus & mstatus_mask;
 511        env->mstatus &= ~mstatus_mask;
 512        env->mstatus |= env->vsstatus;
 513
 514        env->stvec_hs = env->stvec;
 515        env->stvec = env->vstvec;
 516
 517        env->sscratch_hs = env->sscratch;
 518        env->sscratch = env->vsscratch;
 519
 520        env->sepc_hs = env->sepc;
 521        env->sepc = env->vsepc;
 522
 523        env->scause_hs = env->scause;
 524        env->scause = env->vscause;
 525
 526        env->stval_hs = env->stval;
 527        env->stval = env->vstval;
 528
 529        env->satp_hs = env->satp;
 530        env->satp = env->vsatp;
 531    }
 532}
 533
 534target_ulong riscv_cpu_get_geilen(CPURISCVState *env)
 535{
 536    if (!riscv_has_ext(env, RVH)) {
 537        return 0;
 538    }
 539
 540    return env->geilen;
 541}
 542
 543void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen)
 544{
 545    if (!riscv_has_ext(env, RVH)) {
 546        return;
 547    }
 548
 549    if (geilen > (TARGET_LONG_BITS - 1)) {
 550        return;
 551    }
 552
 553    env->geilen = geilen;
 554}
 555
 556bool riscv_cpu_virt_enabled(CPURISCVState *env)
 557{
 558    if (!riscv_has_ext(env, RVH)) {
 559        return false;
 560    }
 561
 562    return get_field(env->virt, VIRT_ONOFF);
 563}
 564
 565void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable)
 566{
 567    if (!riscv_has_ext(env, RVH)) {
 568        return;
 569    }
 570
 571    /* Flush the TLB on all virt mode changes. */
 572    if (get_field(env->virt, VIRT_ONOFF) != enable) {
 573        tlb_flush(env_cpu(env));
 574    }
 575
 576    env->virt = set_field(env->virt, VIRT_ONOFF, enable);
 577
 578    if (enable) {
 579        /*
 580         * The guest external interrupts from an interrupt controller are
 581         * delivered only when the Guest/VM is running (i.e. V=1). This means
 582         * any guest external interrupt which is triggered while the Guest/VM
 583         * is not running (i.e. V=0) will be missed on QEMU resulting in guest
 584         * with sluggish response to serial console input and other I/O events.
 585         *
 586         * To solve this, we check and inject interrupt after setting V=1.
 587         */
 588        riscv_cpu_update_mip(env_archcpu(env), 0, 0);
 589    }
 590}
 591
 592bool riscv_cpu_two_stage_lookup(int mmu_idx)
 593{
 594    return mmu_idx & TB_FLAGS_PRIV_HYP_ACCESS_MASK;
 595}
 596
 597int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts)
 598{
 599    CPURISCVState *env = &cpu->env;
 600    if (env->miclaim & interrupts) {
 601        return -1;
 602    } else {
 603        env->miclaim |= interrupts;
 604        return 0;
 605    }
 606}
 607
 608uint64_t riscv_cpu_update_mip(RISCVCPU *cpu, uint64_t mask, uint64_t value)
 609{
 610    CPURISCVState *env = &cpu->env;
 611    CPUState *cs = CPU(cpu);
 612    uint64_t gein, vsgein = 0, vstip = 0, old = env->mip;
 613    bool locked = false;
 614
 615    if (riscv_cpu_virt_enabled(env)) {
 616        gein = get_field(env->hstatus, HSTATUS_VGEIN);
 617        vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
 618    }
 619
 620    /* No need to update mip for VSTIP */
 621    mask = ((mask == MIP_VSTIP) && env->vstime_irq) ? 0 : mask;
 622    vstip = env->vstime_irq ? MIP_VSTIP : 0;
 623
 624    if (!qemu_mutex_iothread_locked()) {
 625        locked = true;
 626        qemu_mutex_lock_iothread();
 627    }
 628
 629    env->mip = (env->mip & ~mask) | (value & mask);
 630
 631    if (env->mip | vsgein | vstip) {
 632        cpu_interrupt(cs, CPU_INTERRUPT_HARD);
 633    } else {
 634        cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
 635    }
 636
 637    if (locked) {
 638        qemu_mutex_unlock_iothread();
 639    }
 640
 641    return old;
 642}
 643
 644void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *),
 645                             void *arg)
 646{
 647    env->rdtime_fn = fn;
 648    env->rdtime_fn_arg = arg;
 649}
 650
 651void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
 652                                   int (*rmw_fn)(void *arg,
 653                                                 target_ulong reg,
 654                                                 target_ulong *val,
 655                                                 target_ulong new_val,
 656                                                 target_ulong write_mask),
 657                                   void *rmw_fn_arg)
 658{
 659    if (priv <= PRV_M) {
 660        env->aia_ireg_rmw_fn[priv] = rmw_fn;
 661        env->aia_ireg_rmw_fn_arg[priv] = rmw_fn_arg;
 662    }
 663}
 664
 665void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv)
 666{
 667    if (newpriv > PRV_M) {
 668        g_assert_not_reached();
 669    }
 670    if (newpriv == PRV_H) {
 671        newpriv = PRV_U;
 672    }
 673    /* tlb_flush is unnecessary as mode is contained in mmu_idx */
 674    env->priv = newpriv;
 675    env->xl = cpu_recompute_xl(env);
 676    riscv_cpu_update_mask(env);
 677
 678    /*
 679     * Clear the load reservation - otherwise a reservation placed in one
 680     * context/process can be used by another, resulting in an SC succeeding
 681     * incorrectly. Version 2.2 of the ISA specification explicitly requires
 682     * this behaviour, while later revisions say that the kernel "should" use
 683     * an SC instruction to force the yielding of a load reservation on a
 684     * preemptive context switch. As a result, do both.
 685     */
 686    env->load_res = -1;
 687}
 688
 689/*
 690 * get_physical_address_pmp - check PMP permission for this physical address
 691 *
 692 * Match the PMP region and check permission for this physical address and it's
 693 * TLB page. Returns 0 if the permission checking was successful
 694 *
 695 * @env: CPURISCVState
 696 * @prot: The returned protection attributes
 697 * @tlb_size: TLB page size containing addr. It could be modified after PMP
 698 *            permission checking. NULL if not set TLB page for addr.
 699 * @addr: The physical address to be checked permission
 700 * @access_type: The type of MMU access
 701 * @mode: Indicates current privilege level.
 702 */
 703static int get_physical_address_pmp(CPURISCVState *env, int *prot,
 704                                    target_ulong *tlb_size, hwaddr addr,
 705                                    int size, MMUAccessType access_type,
 706                                    int mode)
 707{
 708    pmp_priv_t pmp_priv;
 709    target_ulong tlb_size_pmp = 0;
 710
 711    if (!riscv_feature(env, RISCV_FEATURE_PMP)) {
 712        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
 713        return TRANSLATE_SUCCESS;
 714    }
 715
 716    if (!pmp_hart_has_privs(env, addr, size, 1 << access_type, &pmp_priv,
 717                            mode)) {
 718        *prot = 0;
 719        return TRANSLATE_PMP_FAIL;
 720    }
 721
 722    *prot = pmp_priv_to_page_prot(pmp_priv);
 723    if (tlb_size != NULL) {
 724        if (pmp_is_range_in_tlb(env, addr & ~(*tlb_size - 1), &tlb_size_pmp)) {
 725            *tlb_size = tlb_size_pmp;
 726        }
 727    }
 728
 729    return TRANSLATE_SUCCESS;
 730}
 731
 732/* get_physical_address - get the physical address for this virtual address
 733 *
 734 * Do a page table walk to obtain the physical address corresponding to a
 735 * virtual address. Returns 0 if the translation was successful
 736 *
 737 * Adapted from Spike's mmu_t::translate and mmu_t::walk
 738 *
 739 * @env: CPURISCVState
 740 * @physical: This will be set to the calculated physical address
 741 * @prot: The returned protection attributes
 742 * @addr: The virtual address to be translated
 743 * @fault_pte_addr: If not NULL, this will be set to fault pte address
 744 *                  when a error occurs on pte address translation.
 745 *                  This will already be shifted to match htval.
 746 * @access_type: The type of MMU access
 747 * @mmu_idx: Indicates current privilege level
 748 * @first_stage: Are we in first stage translation?
 749 *               Second stage is used for hypervisor guest translation
 750 * @two_stage: Are we going to perform two stage translation
 751 * @is_debug: Is this access from a debugger or the monitor?
 752 */
 753static int get_physical_address(CPURISCVState *env, hwaddr *physical,
 754                                int *prot, target_ulong addr,
 755                                target_ulong *fault_pte_addr,
 756                                int access_type, int mmu_idx,
 757                                bool first_stage, bool two_stage,
 758                                bool is_debug)
 759{
 760    /* NOTE: the env->pc value visible here will not be
 761     * correct, but the value visible to the exception handler
 762     * (riscv_cpu_do_interrupt) is correct */
 763    MemTxResult res;
 764    MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
 765    int mode = mmu_idx & TB_FLAGS_PRIV_MMU_MASK;
 766    bool use_background = false;
 767    hwaddr ppn;
 768    RISCVCPU *cpu = env_archcpu(env);
 769    int napot_bits = 0;
 770    target_ulong napot_mask;
 771
 772    /*
 773     * Check if we should use the background registers for the two
 774     * stage translation. We don't need to check if we actually need
 775     * two stage translation as that happened before this function
 776     * was called. Background registers will be used if the guest has
 777     * forced a two stage translation to be on (in HS or M mode).
 778     */
 779    if (!riscv_cpu_virt_enabled(env) && two_stage) {
 780        use_background = true;
 781    }
 782
 783    /* MPRV does not affect the virtual-machine load/store
 784       instructions, HLV, HLVX, and HSV. */
 785    if (riscv_cpu_two_stage_lookup(mmu_idx)) {
 786        mode = get_field(env->hstatus, HSTATUS_SPVP);
 787    } else if (mode == PRV_M && access_type != MMU_INST_FETCH) {
 788        if (get_field(env->mstatus, MSTATUS_MPRV)) {
 789            mode = get_field(env->mstatus, MSTATUS_MPP);
 790        }
 791    }
 792
 793    if (first_stage == false) {
 794        /* We are in stage 2 translation, this is similar to stage 1. */
 795        /* Stage 2 is always taken as U-mode */
 796        mode = PRV_U;
 797    }
 798
 799    if (mode == PRV_M || !riscv_feature(env, RISCV_FEATURE_MMU)) {
 800        *physical = addr;
 801        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
 802        return TRANSLATE_SUCCESS;
 803    }
 804
 805    *prot = 0;
 806
 807    hwaddr base;
 808    int levels, ptidxbits, ptesize, vm, sum, mxr, widened;
 809
 810    if (first_stage == true) {
 811        mxr = get_field(env->mstatus, MSTATUS_MXR);
 812    } else {
 813        mxr = get_field(env->vsstatus, MSTATUS_MXR);
 814    }
 815
 816    if (first_stage == true) {
 817        if (use_background) {
 818            if (riscv_cpu_mxl(env) == MXL_RV32) {
 819                base = (hwaddr)get_field(env->vsatp, SATP32_PPN) << PGSHIFT;
 820                vm = get_field(env->vsatp, SATP32_MODE);
 821            } else {
 822                base = (hwaddr)get_field(env->vsatp, SATP64_PPN) << PGSHIFT;
 823                vm = get_field(env->vsatp, SATP64_MODE);
 824            }
 825        } else {
 826            if (riscv_cpu_mxl(env) == MXL_RV32) {
 827                base = (hwaddr)get_field(env->satp, SATP32_PPN) << PGSHIFT;
 828                vm = get_field(env->satp, SATP32_MODE);
 829            } else {
 830                base = (hwaddr)get_field(env->satp, SATP64_PPN) << PGSHIFT;
 831                vm = get_field(env->satp, SATP64_MODE);
 832            }
 833        }
 834        widened = 0;
 835    } else {
 836        if (riscv_cpu_mxl(env) == MXL_RV32) {
 837            base = (hwaddr)get_field(env->hgatp, SATP32_PPN) << PGSHIFT;
 838            vm = get_field(env->hgatp, SATP32_MODE);
 839        } else {
 840            base = (hwaddr)get_field(env->hgatp, SATP64_PPN) << PGSHIFT;
 841            vm = get_field(env->hgatp, SATP64_MODE);
 842        }
 843        widened = 2;
 844    }
 845    /* status.SUM will be ignored if execute on background */
 846    sum = get_field(env->mstatus, MSTATUS_SUM) || use_background || is_debug;
 847    switch (vm) {
 848    case VM_1_10_SV32:
 849      levels = 2; ptidxbits = 10; ptesize = 4; break;
 850    case VM_1_10_SV39:
 851      levels = 3; ptidxbits = 9; ptesize = 8; break;
 852    case VM_1_10_SV48:
 853      levels = 4; ptidxbits = 9; ptesize = 8; break;
 854    case VM_1_10_SV57:
 855      levels = 5; ptidxbits = 9; ptesize = 8; break;
 856    case VM_1_10_MBARE:
 857        *physical = addr;
 858        *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
 859        return TRANSLATE_SUCCESS;
 860    default:
 861      g_assert_not_reached();
 862    }
 863
 864    CPUState *cs = env_cpu(env);
 865    int va_bits = PGSHIFT + levels * ptidxbits + widened;
 866    target_ulong mask, masked_msbs;
 867
 868    if (TARGET_LONG_BITS > (va_bits - 1)) {
 869        mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1;
 870    } else {
 871        mask = 0;
 872    }
 873    masked_msbs = (addr >> (va_bits - 1)) & mask;
 874
 875    if (masked_msbs != 0 && masked_msbs != mask) {
 876        return TRANSLATE_FAIL;
 877    }
 878
 879    int ptshift = (levels - 1) * ptidxbits;
 880    int i;
 881
 882#if !TCG_OVERSIZED_GUEST
 883restart:
 884#endif
 885    for (i = 0; i < levels; i++, ptshift -= ptidxbits) {
 886        target_ulong idx;
 887        if (i == 0) {
 888            idx = (addr >> (PGSHIFT + ptshift)) &
 889                           ((1 << (ptidxbits + widened)) - 1);
 890        } else {
 891            idx = (addr >> (PGSHIFT + ptshift)) &
 892                           ((1 << ptidxbits) - 1);
 893        }
 894
 895        /* check that physical address of PTE is legal */
 896        hwaddr pte_addr;
 897
 898        if (two_stage && first_stage) {
 899            int vbase_prot;
 900            hwaddr vbase;
 901
 902            /* Do the second stage translation on the base PTE address. */
 903            int vbase_ret = get_physical_address(env, &vbase, &vbase_prot,
 904                                                 base, NULL, MMU_DATA_LOAD,
 905                                                 mmu_idx, false, true,
 906                                                 is_debug);
 907
 908            if (vbase_ret != TRANSLATE_SUCCESS) {
 909                if (fault_pte_addr) {
 910                    *fault_pte_addr = (base + idx * ptesize) >> 2;
 911                }
 912                return TRANSLATE_G_STAGE_FAIL;
 913            }
 914
 915            pte_addr = vbase + idx * ptesize;
 916        } else {
 917            pte_addr = base + idx * ptesize;
 918        }
 919
 920        int pmp_prot;
 921        int pmp_ret = get_physical_address_pmp(env, &pmp_prot, NULL, pte_addr,
 922                                               sizeof(target_ulong),
 923                                               MMU_DATA_LOAD, PRV_S);
 924        if (pmp_ret != TRANSLATE_SUCCESS) {
 925            return TRANSLATE_PMP_FAIL;
 926        }
 927
 928        target_ulong pte;
 929        if (riscv_cpu_mxl(env) == MXL_RV32) {
 930            pte = address_space_ldl(cs->as, pte_addr, attrs, &res);
 931        } else {
 932            pte = address_space_ldq(cs->as, pte_addr, attrs, &res);
 933        }
 934
 935        if (res != MEMTX_OK) {
 936            return TRANSLATE_FAIL;
 937        }
 938
 939        if (riscv_cpu_sxl(env) == MXL_RV32) {
 940            ppn = pte >> PTE_PPN_SHIFT;
 941        } else if (cpu->cfg.ext_svpbmt || cpu->cfg.ext_svnapot) {
 942            ppn = (pte & (target_ulong)PTE_PPN_MASK) >> PTE_PPN_SHIFT;
 943        } else {
 944            ppn = pte >> PTE_PPN_SHIFT;
 945            if ((pte & ~(target_ulong)PTE_PPN_MASK) >> PTE_PPN_SHIFT) {
 946                return TRANSLATE_FAIL;
 947            }
 948        }
 949
 950        if (!(pte & PTE_V)) {
 951            /* Invalid PTE */
 952            return TRANSLATE_FAIL;
 953        } else if (!cpu->cfg.ext_svpbmt && (pte & PTE_PBMT)) {
 954            return TRANSLATE_FAIL;
 955        } else if (!(pte & (PTE_R | PTE_W | PTE_X))) {
 956            /* Inner PTE, continue walking */
 957            if (pte & (PTE_D | PTE_A | PTE_U | PTE_ATTR)) {
 958                return TRANSLATE_FAIL;
 959            }
 960            base = ppn << PGSHIFT;
 961        } else if ((pte & (PTE_R | PTE_W | PTE_X)) == PTE_W) {
 962            /* Reserved leaf PTE flags: PTE_W */
 963            return TRANSLATE_FAIL;
 964        } else if ((pte & (PTE_R | PTE_W | PTE_X)) == (PTE_W | PTE_X)) {
 965            /* Reserved leaf PTE flags: PTE_W + PTE_X */
 966            return TRANSLATE_FAIL;
 967        } else if ((pte & PTE_U) && ((mode != PRV_U) &&
 968                   (!sum || access_type == MMU_INST_FETCH))) {
 969            /* User PTE flags when not U mode and mstatus.SUM is not set,
 970               or the access type is an instruction fetch */
 971            return TRANSLATE_FAIL;
 972        } else if (!(pte & PTE_U) && (mode != PRV_S)) {
 973            /* Supervisor PTE flags when not S mode */
 974            return TRANSLATE_FAIL;
 975        } else if (ppn & ((1ULL << ptshift) - 1)) {
 976            /* Misaligned PPN */
 977            return TRANSLATE_FAIL;
 978        } else if (access_type == MMU_DATA_LOAD && !((pte & PTE_R) ||
 979                   ((pte & PTE_X) && mxr))) {
 980            /* Read access check failed */
 981            return TRANSLATE_FAIL;
 982        } else if (access_type == MMU_DATA_STORE && !(pte & PTE_W)) {
 983            /* Write access check failed */
 984            return TRANSLATE_FAIL;
 985        } else if (access_type == MMU_INST_FETCH && !(pte & PTE_X)) {
 986            /* Fetch access check failed */
 987            return TRANSLATE_FAIL;
 988        } else {
 989            /* if necessary, set accessed and dirty bits. */
 990            target_ulong updated_pte = pte | PTE_A |
 991                (access_type == MMU_DATA_STORE ? PTE_D : 0);
 992
 993            /* Page table updates need to be atomic with MTTCG enabled */
 994            if (updated_pte != pte) {
 995                /*
 996                 * - if accessed or dirty bits need updating, and the PTE is
 997                 *   in RAM, then we do so atomically with a compare and swap.
 998                 * - if the PTE is in IO space or ROM, then it can't be updated
 999                 *   and we return TRANSLATE_FAIL.
1000                 * - if the PTE changed by the time we went to update it, then
1001                 *   it is no longer valid and we must re-walk the page table.
1002                 */
1003                MemoryRegion *mr;
1004                hwaddr l = sizeof(target_ulong), addr1;
1005                mr = address_space_translate(cs->as, pte_addr,
1006                    &addr1, &l, false, MEMTXATTRS_UNSPECIFIED);
1007                if (memory_region_is_ram(mr)) {
1008                    target_ulong *pte_pa =
1009                        qemu_map_ram_ptr(mr->ram_block, addr1);
1010#if TCG_OVERSIZED_GUEST
1011                    /* MTTCG is not enabled on oversized TCG guests so
1012                     * page table updates do not need to be atomic */
1013                    *pte_pa = pte = updated_pte;
1014#else
1015                    target_ulong old_pte =
1016                        qatomic_cmpxchg(pte_pa, pte, updated_pte);
1017                    if (old_pte != pte) {
1018                        goto restart;
1019                    } else {
1020                        pte = updated_pte;
1021                    }
1022#endif
1023                } else {
1024                    /* misconfigured PTE in ROM (AD bits are not preset) or
1025                     * PTE is in IO space and can't be updated atomically */
1026                    return TRANSLATE_FAIL;
1027                }
1028            }
1029
1030            /* for superpage mappings, make a fake leaf PTE for the TLB's
1031               benefit. */
1032            target_ulong vpn = addr >> PGSHIFT;
1033
1034            if (cpu->cfg.ext_svnapot && (pte & PTE_N)) {
1035                napot_bits = ctzl(ppn) + 1;
1036                if ((i != (levels - 1)) || (napot_bits != 4)) {
1037                    return TRANSLATE_FAIL;
1038                }
1039            }
1040
1041            napot_mask = (1 << napot_bits) - 1;
1042            *physical = (((ppn & ~napot_mask) | (vpn & napot_mask) |
1043                          (vpn & (((target_ulong)1 << ptshift) - 1))
1044                         ) << PGSHIFT) | (addr & ~TARGET_PAGE_MASK);
1045
1046            /* set permissions on the TLB entry */
1047            if ((pte & PTE_R) || ((pte & PTE_X) && mxr)) {
1048                *prot |= PAGE_READ;
1049            }
1050            if ((pte & PTE_X)) {
1051                *prot |= PAGE_EXEC;
1052            }
1053            /* add write permission on stores or if the page is already dirty,
1054               so that we TLB miss on later writes to update the dirty bit */
1055            if ((pte & PTE_W) &&
1056                    (access_type == MMU_DATA_STORE || (pte & PTE_D))) {
1057                *prot |= PAGE_WRITE;
1058            }
1059            return TRANSLATE_SUCCESS;
1060        }
1061    }
1062    return TRANSLATE_FAIL;
1063}
1064
1065static void raise_mmu_exception(CPURISCVState *env, target_ulong address,
1066                                MMUAccessType access_type, bool pmp_violation,
1067                                bool first_stage, bool two_stage,
1068                                bool two_stage_indirect)
1069{
1070    CPUState *cs = env_cpu(env);
1071    int page_fault_exceptions, vm;
1072    uint64_t stap_mode;
1073
1074    if (riscv_cpu_mxl(env) == MXL_RV32) {
1075        stap_mode = SATP32_MODE;
1076    } else {
1077        stap_mode = SATP64_MODE;
1078    }
1079
1080    if (first_stage) {
1081        vm = get_field(env->satp, stap_mode);
1082    } else {
1083        vm = get_field(env->hgatp, stap_mode);
1084    }
1085
1086    page_fault_exceptions = vm != VM_1_10_MBARE && !pmp_violation;
1087
1088    switch (access_type) {
1089    case MMU_INST_FETCH:
1090        if (riscv_cpu_virt_enabled(env) && !first_stage) {
1091            cs->exception_index = RISCV_EXCP_INST_GUEST_PAGE_FAULT;
1092        } else {
1093            cs->exception_index = page_fault_exceptions ?
1094                RISCV_EXCP_INST_PAGE_FAULT : RISCV_EXCP_INST_ACCESS_FAULT;
1095        }
1096        break;
1097    case MMU_DATA_LOAD:
1098        if (two_stage && !first_stage) {
1099            cs->exception_index = RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT;
1100        } else {
1101            cs->exception_index = page_fault_exceptions ?
1102                RISCV_EXCP_LOAD_PAGE_FAULT : RISCV_EXCP_LOAD_ACCESS_FAULT;
1103        }
1104        break;
1105    case MMU_DATA_STORE:
1106        if (two_stage && !first_stage) {
1107            cs->exception_index = RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT;
1108        } else {
1109            cs->exception_index = page_fault_exceptions ?
1110                RISCV_EXCP_STORE_PAGE_FAULT : RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
1111        }
1112        break;
1113    default:
1114        g_assert_not_reached();
1115    }
1116    env->badaddr = address;
1117    env->two_stage_lookup = two_stage;
1118    env->two_stage_indirect_lookup = two_stage_indirect;
1119}
1120
1121hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
1122{
1123    RISCVCPU *cpu = RISCV_CPU(cs);
1124    CPURISCVState *env = &cpu->env;
1125    hwaddr phys_addr;
1126    int prot;
1127    int mmu_idx = cpu_mmu_index(&cpu->env, false);
1128
1129    if (get_physical_address(env, &phys_addr, &prot, addr, NULL, 0, mmu_idx,
1130                             true, riscv_cpu_virt_enabled(env), true)) {
1131        return -1;
1132    }
1133
1134    if (riscv_cpu_virt_enabled(env)) {
1135        if (get_physical_address(env, &phys_addr, &prot, phys_addr, NULL,
1136                                 0, mmu_idx, false, true, true)) {
1137            return -1;
1138        }
1139    }
1140
1141    return phys_addr & TARGET_PAGE_MASK;
1142}
1143
1144void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
1145                                     vaddr addr, unsigned size,
1146                                     MMUAccessType access_type,
1147                                     int mmu_idx, MemTxAttrs attrs,
1148                                     MemTxResult response, uintptr_t retaddr)
1149{
1150    RISCVCPU *cpu = RISCV_CPU(cs);
1151    CPURISCVState *env = &cpu->env;
1152
1153    if (access_type == MMU_DATA_STORE) {
1154        cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
1155    } else if (access_type == MMU_DATA_LOAD) {
1156        cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
1157    } else {
1158        cs->exception_index = RISCV_EXCP_INST_ACCESS_FAULT;
1159    }
1160
1161    env->badaddr = addr;
1162    env->two_stage_lookup = riscv_cpu_virt_enabled(env) ||
1163                            riscv_cpu_two_stage_lookup(mmu_idx);
1164    env->two_stage_indirect_lookup = false;
1165    cpu_loop_exit_restore(cs, retaddr);
1166}
1167
1168void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
1169                                   MMUAccessType access_type, int mmu_idx,
1170                                   uintptr_t retaddr)
1171{
1172    RISCVCPU *cpu = RISCV_CPU(cs);
1173    CPURISCVState *env = &cpu->env;
1174    switch (access_type) {
1175    case MMU_INST_FETCH:
1176        cs->exception_index = RISCV_EXCP_INST_ADDR_MIS;
1177        break;
1178    case MMU_DATA_LOAD:
1179        cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS;
1180        break;
1181    case MMU_DATA_STORE:
1182        cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS;
1183        break;
1184    default:
1185        g_assert_not_reached();
1186    }
1187    env->badaddr = addr;
1188    env->two_stage_lookup = riscv_cpu_virt_enabled(env) ||
1189                            riscv_cpu_two_stage_lookup(mmu_idx);
1190    env->two_stage_indirect_lookup = false;
1191    cpu_loop_exit_restore(cs, retaddr);
1192}
1193
1194
1195static void pmu_tlb_fill_incr_ctr(RISCVCPU *cpu, MMUAccessType access_type)
1196{
1197    enum riscv_pmu_event_idx pmu_event_type;
1198
1199    switch (access_type) {
1200    case MMU_INST_FETCH:
1201        pmu_event_type = RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS;
1202        break;
1203    case MMU_DATA_LOAD:
1204        pmu_event_type = RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS;
1205        break;
1206    case MMU_DATA_STORE:
1207        pmu_event_type = RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS;
1208        break;
1209    default:
1210        return;
1211    }
1212
1213    riscv_pmu_incr_ctr(cpu, pmu_event_type);
1214}
1215
1216bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
1217                        MMUAccessType access_type, int mmu_idx,
1218                        bool probe, uintptr_t retaddr)
1219{
1220    RISCVCPU *cpu = RISCV_CPU(cs);
1221    CPURISCVState *env = &cpu->env;
1222    vaddr im_address;
1223    hwaddr pa = 0;
1224    int prot, prot2, prot_pmp;
1225    bool pmp_violation = false;
1226    bool first_stage_error = true;
1227    bool two_stage_lookup = false;
1228    bool two_stage_indirect_error = false;
1229    int ret = TRANSLATE_FAIL;
1230    int mode = mmu_idx;
1231    /* default TLB page size */
1232    target_ulong tlb_size = TARGET_PAGE_SIZE;
1233
1234    env->guest_phys_fault_addr = 0;
1235
1236    qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n",
1237                  __func__, address, access_type, mmu_idx);
1238
1239    /* MPRV does not affect the virtual-machine load/store
1240       instructions, HLV, HLVX, and HSV. */
1241    if (riscv_cpu_two_stage_lookup(mmu_idx)) {
1242        mode = get_field(env->hstatus, HSTATUS_SPVP);
1243    } else if (mode == PRV_M && access_type != MMU_INST_FETCH &&
1244               get_field(env->mstatus, MSTATUS_MPRV)) {
1245        mode = get_field(env->mstatus, MSTATUS_MPP);
1246        if (riscv_has_ext(env, RVH) && get_field(env->mstatus, MSTATUS_MPV)) {
1247            two_stage_lookup = true;
1248        }
1249    }
1250
1251    if (riscv_cpu_virt_enabled(env) ||
1252        ((riscv_cpu_two_stage_lookup(mmu_idx) || two_stage_lookup) &&
1253         access_type != MMU_INST_FETCH)) {
1254        /* Two stage lookup */
1255        ret = get_physical_address(env, &pa, &prot, address,
1256                                   &env->guest_phys_fault_addr, access_type,
1257                                   mmu_idx, true, true, false);
1258
1259        /*
1260         * A G-stage exception may be triggered during two state lookup.
1261         * And the env->guest_phys_fault_addr has already been set in
1262         * get_physical_address().
1263         */
1264        if (ret == TRANSLATE_G_STAGE_FAIL) {
1265            first_stage_error = false;
1266            two_stage_indirect_error = true;
1267            access_type = MMU_DATA_LOAD;
1268        }
1269
1270        qemu_log_mask(CPU_LOG_MMU,
1271                      "%s 1st-stage address=%" VADDR_PRIx " ret %d physical "
1272                      TARGET_FMT_plx " prot %d\n",
1273                      __func__, address, ret, pa, prot);
1274
1275        if (ret == TRANSLATE_SUCCESS) {
1276            /* Second stage lookup */
1277            im_address = pa;
1278
1279            ret = get_physical_address(env, &pa, &prot2, im_address, NULL,
1280                                       access_type, mmu_idx, false, true,
1281                                       false);
1282
1283            qemu_log_mask(CPU_LOG_MMU,
1284                    "%s 2nd-stage address=%" VADDR_PRIx " ret %d physical "
1285                    TARGET_FMT_plx " prot %d\n",
1286                    __func__, im_address, ret, pa, prot2);
1287
1288            prot &= prot2;
1289
1290            if (ret == TRANSLATE_SUCCESS) {
1291                ret = get_physical_address_pmp(env, &prot_pmp, &tlb_size, pa,
1292                                               size, access_type, mode);
1293
1294                qemu_log_mask(CPU_LOG_MMU,
1295                              "%s PMP address=" TARGET_FMT_plx " ret %d prot"
1296                              " %d tlb_size " TARGET_FMT_lu "\n",
1297                              __func__, pa, ret, prot_pmp, tlb_size);
1298
1299                prot &= prot_pmp;
1300            }
1301
1302            if (ret != TRANSLATE_SUCCESS) {
1303                /*
1304                 * Guest physical address translation failed, this is a HS
1305                 * level exception
1306                 */
1307                first_stage_error = false;
1308                env->guest_phys_fault_addr = (im_address |
1309                                              (address &
1310                                               (TARGET_PAGE_SIZE - 1))) >> 2;
1311            }
1312        }
1313    } else {
1314        pmu_tlb_fill_incr_ctr(cpu, access_type);
1315        /* Single stage lookup */
1316        ret = get_physical_address(env, &pa, &prot, address, NULL,
1317                                   access_type, mmu_idx, true, false, false);
1318
1319        qemu_log_mask(CPU_LOG_MMU,
1320                      "%s address=%" VADDR_PRIx " ret %d physical "
1321                      TARGET_FMT_plx " prot %d\n",
1322                      __func__, address, ret, pa, prot);
1323
1324        if (ret == TRANSLATE_SUCCESS) {
1325            ret = get_physical_address_pmp(env, &prot_pmp, &tlb_size, pa,
1326                                           size, access_type, mode);
1327
1328            qemu_log_mask(CPU_LOG_MMU,
1329                          "%s PMP address=" TARGET_FMT_plx " ret %d prot"
1330                          " %d tlb_size " TARGET_FMT_lu "\n",
1331                          __func__, pa, ret, prot_pmp, tlb_size);
1332
1333            prot &= prot_pmp;
1334        }
1335    }
1336
1337    if (ret == TRANSLATE_PMP_FAIL) {
1338        pmp_violation = true;
1339    }
1340
1341    if (ret == TRANSLATE_SUCCESS) {
1342        tlb_set_page(cs, address & ~(tlb_size - 1), pa & ~(tlb_size - 1),
1343                     prot, mmu_idx, tlb_size);
1344        return true;
1345    } else if (probe) {
1346        return false;
1347    } else {
1348        raise_mmu_exception(env, address, access_type, pmp_violation,
1349                            first_stage_error,
1350                            riscv_cpu_virt_enabled(env) ||
1351                                riscv_cpu_two_stage_lookup(mmu_idx),
1352                            two_stage_indirect_error);
1353        cpu_loop_exit_restore(cs, retaddr);
1354    }
1355
1356    return true;
1357}
1358
1359static target_ulong riscv_transformed_insn(CPURISCVState *env,
1360                                           target_ulong insn,
1361                                           target_ulong taddr)
1362{
1363    target_ulong xinsn = 0;
1364    target_ulong access_rs1 = 0, access_imm = 0, access_size = 0;
1365
1366    /*
1367     * Only Quadrant 0 and Quadrant 2 of RVC instruction space need to
1368     * be uncompressed. The Quadrant 1 of RVC instruction space need
1369     * not be transformed because these instructions won't generate
1370     * any load/store trap.
1371     */
1372
1373    if ((insn & 0x3) != 0x3) {
1374        /* Transform 16bit instruction into 32bit instruction */
1375        switch (GET_C_OP(insn)) {
1376        case OPC_RISC_C_OP_QUAD0: /* Quadrant 0 */
1377            switch (GET_C_FUNC(insn)) {
1378            case OPC_RISC_C_FUNC_FLD_LQ:
1379                if (riscv_cpu_xlen(env) != 128) { /* C.FLD (RV32/64) */
1380                    xinsn = OPC_RISC_FLD;
1381                    xinsn = SET_RD(xinsn, GET_C_RS2S(insn));
1382                    access_rs1 = GET_C_RS1S(insn);
1383                    access_imm = GET_C_LD_IMM(insn);
1384                    access_size = 8;
1385                }
1386                break;
1387            case OPC_RISC_C_FUNC_LW: /* C.LW */
1388                xinsn = OPC_RISC_LW;
1389                xinsn = SET_RD(xinsn, GET_C_RS2S(insn));
1390                access_rs1 = GET_C_RS1S(insn);
1391                access_imm = GET_C_LW_IMM(insn);
1392                access_size = 4;
1393                break;
1394            case OPC_RISC_C_FUNC_FLW_LD:
1395                if (riscv_cpu_xlen(env) == 32) { /* C.FLW (RV32) */
1396                    xinsn = OPC_RISC_FLW;
1397                    xinsn = SET_RD(xinsn, GET_C_RS2S(insn));
1398                    access_rs1 = GET_C_RS1S(insn);
1399                    access_imm = GET_C_LW_IMM(insn);
1400                    access_size = 4;
1401                } else { /* C.LD (RV64/RV128) */
1402                    xinsn = OPC_RISC_LD;
1403                    xinsn = SET_RD(xinsn, GET_C_RS2S(insn));
1404                    access_rs1 = GET_C_RS1S(insn);
1405                    access_imm = GET_C_LD_IMM(insn);
1406                    access_size = 8;
1407                }
1408                break;
1409            case OPC_RISC_C_FUNC_FSD_SQ:
1410                if (riscv_cpu_xlen(env) != 128) { /* C.FSD (RV32/64) */
1411                    xinsn = OPC_RISC_FSD;
1412                    xinsn = SET_RS2(xinsn, GET_C_RS2S(insn));
1413                    access_rs1 = GET_C_RS1S(insn);
1414                    access_imm = GET_C_SD_IMM(insn);
1415                    access_size = 8;
1416                }
1417                break;
1418            case OPC_RISC_C_FUNC_SW: /* C.SW */
1419                xinsn = OPC_RISC_SW;
1420                xinsn = SET_RS2(xinsn, GET_C_RS2S(insn));
1421                access_rs1 = GET_C_RS1S(insn);
1422                access_imm = GET_C_SW_IMM(insn);
1423                access_size = 4;
1424                break;
1425            case OPC_RISC_C_FUNC_FSW_SD:
1426                if (riscv_cpu_xlen(env) == 32) { /* C.FSW (RV32) */
1427                    xinsn = OPC_RISC_FSW;
1428                    xinsn = SET_RS2(xinsn, GET_C_RS2S(insn));
1429                    access_rs1 = GET_C_RS1S(insn);
1430                    access_imm = GET_C_SW_IMM(insn);
1431                    access_size = 4;
1432                } else { /* C.SD (RV64/RV128) */
1433                    xinsn = OPC_RISC_SD;
1434                    xinsn = SET_RS2(xinsn, GET_C_RS2S(insn));
1435                    access_rs1 = GET_C_RS1S(insn);
1436                    access_imm = GET_C_SD_IMM(insn);
1437                    access_size = 8;
1438                }
1439                break;
1440            default:
1441                break;
1442            }
1443            break;
1444        case OPC_RISC_C_OP_QUAD2: /* Quadrant 2 */
1445            switch (GET_C_FUNC(insn)) {
1446            case OPC_RISC_C_FUNC_FLDSP_LQSP:
1447                if (riscv_cpu_xlen(env) != 128) { /* C.FLDSP (RV32/64) */
1448                    xinsn = OPC_RISC_FLD;
1449                    xinsn = SET_RD(xinsn, GET_C_RD(insn));
1450                    access_rs1 = 2;
1451                    access_imm = GET_C_LDSP_IMM(insn);
1452                    access_size = 8;
1453                }
1454                break;
1455            case OPC_RISC_C_FUNC_LWSP: /* C.LWSP */
1456                xinsn = OPC_RISC_LW;
1457                xinsn = SET_RD(xinsn, GET_C_RD(insn));
1458                access_rs1 = 2;
1459                access_imm = GET_C_LWSP_IMM(insn);
1460                access_size = 4;
1461                break;
1462            case OPC_RISC_C_FUNC_FLWSP_LDSP:
1463                if (riscv_cpu_xlen(env) == 32) { /* C.FLWSP (RV32) */
1464                    xinsn = OPC_RISC_FLW;
1465                    xinsn = SET_RD(xinsn, GET_C_RD(insn));
1466                    access_rs1 = 2;
1467                    access_imm = GET_C_LWSP_IMM(insn);
1468                    access_size = 4;
1469                } else { /* C.LDSP (RV64/RV128) */
1470                    xinsn = OPC_RISC_LD;
1471                    xinsn = SET_RD(xinsn, GET_C_RD(insn));
1472                    access_rs1 = 2;
1473                    access_imm = GET_C_LDSP_IMM(insn);
1474                    access_size = 8;
1475                }
1476                break;
1477            case OPC_RISC_C_FUNC_FSDSP_SQSP:
1478                if (riscv_cpu_xlen(env) != 128) { /* C.FSDSP (RV32/64) */
1479                    xinsn = OPC_RISC_FSD;
1480                    xinsn = SET_RS2(xinsn, GET_C_RS2(insn));
1481                    access_rs1 = 2;
1482                    access_imm = GET_C_SDSP_IMM(insn);
1483                    access_size = 8;
1484                }
1485                break;
1486            case OPC_RISC_C_FUNC_SWSP: /* C.SWSP */
1487                xinsn = OPC_RISC_SW;
1488                xinsn = SET_RS2(xinsn, GET_C_RS2(insn));
1489                access_rs1 = 2;
1490                access_imm = GET_C_SWSP_IMM(insn);
1491                access_size = 4;
1492                break;
1493            case 7:
1494                if (riscv_cpu_xlen(env) == 32) { /* C.FSWSP (RV32) */
1495                    xinsn = OPC_RISC_FSW;
1496                    xinsn = SET_RS2(xinsn, GET_C_RS2(insn));
1497                    access_rs1 = 2;
1498                    access_imm = GET_C_SWSP_IMM(insn);
1499                    access_size = 4;
1500                } else { /* C.SDSP (RV64/RV128) */
1501                    xinsn = OPC_RISC_SD;
1502                    xinsn = SET_RS2(xinsn, GET_C_RS2(insn));
1503                    access_rs1 = 2;
1504                    access_imm = GET_C_SDSP_IMM(insn);
1505                    access_size = 8;
1506                }
1507                break;
1508            default:
1509                break;
1510            }
1511            break;
1512        default:
1513            break;
1514        }
1515
1516        /*
1517         * Clear Bit1 of transformed instruction to indicate that
1518         * original insruction was a 16bit instruction
1519         */
1520        xinsn &= ~((target_ulong)0x2);
1521    } else {
1522        /* Transform 32bit (or wider) instructions */
1523        switch (MASK_OP_MAJOR(insn)) {
1524        case OPC_RISC_ATOMIC:
1525            xinsn = insn;
1526            access_rs1 = GET_RS1(insn);
1527            access_size = 1 << GET_FUNCT3(insn);
1528            break;
1529        case OPC_RISC_LOAD:
1530        case OPC_RISC_FP_LOAD:
1531            xinsn = SET_I_IMM(insn, 0);
1532            access_rs1 = GET_RS1(insn);
1533            access_imm = GET_IMM(insn);
1534            access_size = 1 << GET_FUNCT3(insn);
1535            break;
1536        case OPC_RISC_STORE:
1537        case OPC_RISC_FP_STORE:
1538            xinsn = SET_S_IMM(insn, 0);
1539            access_rs1 = GET_RS1(insn);
1540            access_imm = GET_STORE_IMM(insn);
1541            access_size = 1 << GET_FUNCT3(insn);
1542            break;
1543        case OPC_RISC_SYSTEM:
1544            if (MASK_OP_SYSTEM(insn) == OPC_RISC_HLVHSV) {
1545                xinsn = insn;
1546                access_rs1 = GET_RS1(insn);
1547                access_size = 1 << ((GET_FUNCT7(insn) >> 1) & 0x3);
1548                access_size = 1 << access_size;
1549            }
1550            break;
1551        default:
1552            break;
1553        }
1554    }
1555
1556    if (access_size) {
1557        xinsn = SET_RS1(xinsn, (taddr - (env->gpr[access_rs1] + access_imm)) &
1558                               (access_size - 1));
1559    }
1560
1561    return xinsn;
1562}
1563#endif /* !CONFIG_USER_ONLY */
1564
1565/*
1566 * Handle Traps
1567 *
1568 * Adapted from Spike's processor_t::take_trap.
1569 *
1570 */
1571void riscv_cpu_do_interrupt(CPUState *cs)
1572{
1573#if !defined(CONFIG_USER_ONLY)
1574
1575    RISCVCPU *cpu = RISCV_CPU(cs);
1576    CPURISCVState *env = &cpu->env;
1577    bool write_gva = false;
1578    uint64_t s;
1579
1580    /* cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide
1581     * so we mask off the MSB and separate into trap type and cause.
1582     */
1583    bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG);
1584    target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK;
1585    uint64_t deleg = async ? env->mideleg : env->medeleg;
1586    target_ulong tval = 0;
1587    target_ulong tinst = 0;
1588    target_ulong htval = 0;
1589    target_ulong mtval2 = 0;
1590
1591    if  (cause == RISCV_EXCP_SEMIHOST) {
1592        do_common_semihosting(cs);
1593        env->pc += 4;
1594        return;
1595    }
1596
1597    if (!async) {
1598        /* set tval to badaddr for traps with address information */
1599        switch (cause) {
1600        case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT:
1601        case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT:
1602        case RISCV_EXCP_LOAD_ADDR_MIS:
1603        case RISCV_EXCP_STORE_AMO_ADDR_MIS:
1604        case RISCV_EXCP_LOAD_ACCESS_FAULT:
1605        case RISCV_EXCP_STORE_AMO_ACCESS_FAULT:
1606        case RISCV_EXCP_LOAD_PAGE_FAULT:
1607        case RISCV_EXCP_STORE_PAGE_FAULT:
1608            write_gva = env->two_stage_lookup;
1609            tval = env->badaddr;
1610            if (env->two_stage_indirect_lookup) {
1611                /*
1612                 * special pseudoinstruction for G-stage fault taken while
1613                 * doing VS-stage page table walk.
1614                 */
1615                tinst = (riscv_cpu_xlen(env) == 32) ? 0x00002000 : 0x00003000;
1616            } else {
1617                /*
1618                 * The "Addr. Offset" field in transformed instruction is
1619                 * non-zero only for misaligned access.
1620                 */
1621                tinst = riscv_transformed_insn(env, env->bins, tval);
1622            }
1623            break;
1624        case RISCV_EXCP_INST_GUEST_PAGE_FAULT:
1625        case RISCV_EXCP_INST_ADDR_MIS:
1626        case RISCV_EXCP_INST_ACCESS_FAULT:
1627        case RISCV_EXCP_INST_PAGE_FAULT:
1628            write_gva = env->two_stage_lookup;
1629            tval = env->badaddr;
1630            if (env->two_stage_indirect_lookup) {
1631                /*
1632                 * special pseudoinstruction for G-stage fault taken while
1633                 * doing VS-stage page table walk.
1634                 */
1635                tinst = (riscv_cpu_xlen(env) == 32) ? 0x00002000 : 0x00003000;
1636            }
1637            break;
1638        case RISCV_EXCP_ILLEGAL_INST:
1639        case RISCV_EXCP_VIRT_INSTRUCTION_FAULT:
1640            tval = env->bins;
1641            break;
1642        default:
1643            break;
1644        }
1645        /* ecall is dispatched as one cause so translate based on mode */
1646        if (cause == RISCV_EXCP_U_ECALL) {
1647            assert(env->priv <= 3);
1648
1649            if (env->priv == PRV_M) {
1650                cause = RISCV_EXCP_M_ECALL;
1651            } else if (env->priv == PRV_S && riscv_cpu_virt_enabled(env)) {
1652                cause = RISCV_EXCP_VS_ECALL;
1653            } else if (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) {
1654                cause = RISCV_EXCP_S_ECALL;
1655            } else if (env->priv == PRV_U) {
1656                cause = RISCV_EXCP_U_ECALL;
1657            }
1658        }
1659    }
1660
1661    trace_riscv_trap(env->mhartid, async, cause, env->pc, tval,
1662                     riscv_cpu_get_trap_name(cause, async));
1663
1664    qemu_log_mask(CPU_LOG_INT,
1665                  "%s: hart:"TARGET_FMT_ld", async:%d, cause:"TARGET_FMT_lx", "
1666                  "epc:0x"TARGET_FMT_lx", tval:0x"TARGET_FMT_lx", desc=%s\n",
1667                  __func__, env->mhartid, async, cause, env->pc, tval,
1668                  riscv_cpu_get_trap_name(cause, async));
1669
1670    if (env->priv <= PRV_S &&
1671            cause < TARGET_LONG_BITS && ((deleg >> cause) & 1)) {
1672        /* handle the trap in S-mode */
1673        if (riscv_has_ext(env, RVH)) {
1674            uint64_t hdeleg = async ? env->hideleg : env->hedeleg;
1675
1676            if (riscv_cpu_virt_enabled(env) && ((hdeleg >> cause) & 1)) {
1677                /* Trap to VS mode */
1678                /*
1679                 * See if we need to adjust cause. Yes if its VS mode interrupt
1680                 * no if hypervisor has delegated one of hs mode's interrupt
1681                 */
1682                if (cause == IRQ_VS_TIMER || cause == IRQ_VS_SOFT ||
1683                    cause == IRQ_VS_EXT) {
1684                    cause = cause - 1;
1685                }
1686                write_gva = false;
1687            } else if (riscv_cpu_virt_enabled(env)) {
1688                /* Trap into HS mode, from virt */
1689                riscv_cpu_swap_hypervisor_regs(env);
1690                env->hstatus = set_field(env->hstatus, HSTATUS_SPVP,
1691                                         env->priv);
1692                env->hstatus = set_field(env->hstatus, HSTATUS_SPV,
1693                                         riscv_cpu_virt_enabled(env));
1694
1695
1696                htval = env->guest_phys_fault_addr;
1697
1698                riscv_cpu_set_virt_enabled(env, 0);
1699            } else {
1700                /* Trap into HS mode */
1701                env->hstatus = set_field(env->hstatus, HSTATUS_SPV, false);
1702                htval = env->guest_phys_fault_addr;
1703            }
1704            env->hstatus = set_field(env->hstatus, HSTATUS_GVA, write_gva);
1705        }
1706
1707        s = env->mstatus;
1708        s = set_field(s, MSTATUS_SPIE, get_field(s, MSTATUS_SIE));
1709        s = set_field(s, MSTATUS_SPP, env->priv);
1710        s = set_field(s, MSTATUS_SIE, 0);
1711        env->mstatus = s;
1712        env->scause = cause | ((target_ulong)async << (TARGET_LONG_BITS - 1));
1713        env->sepc = env->pc;
1714        env->stval = tval;
1715        env->htval = htval;
1716        env->htinst = tinst;
1717        env->pc = (env->stvec >> 2 << 2) +
1718            ((async && (env->stvec & 3) == 1) ? cause * 4 : 0);
1719        riscv_cpu_set_mode(env, PRV_S);
1720    } else {
1721        /* handle the trap in M-mode */
1722        if (riscv_has_ext(env, RVH)) {
1723            if (riscv_cpu_virt_enabled(env)) {
1724                riscv_cpu_swap_hypervisor_regs(env);
1725            }
1726            env->mstatus = set_field(env->mstatus, MSTATUS_MPV,
1727                                     riscv_cpu_virt_enabled(env));
1728            if (riscv_cpu_virt_enabled(env) && tval) {
1729                env->mstatus = set_field(env->mstatus, MSTATUS_GVA, 1);
1730            }
1731
1732            mtval2 = env->guest_phys_fault_addr;
1733
1734            /* Trapping to M mode, virt is disabled */
1735            riscv_cpu_set_virt_enabled(env, 0);
1736        }
1737
1738        s = env->mstatus;
1739        s = set_field(s, MSTATUS_MPIE, get_field(s, MSTATUS_MIE));
1740        s = set_field(s, MSTATUS_MPP, env->priv);
1741        s = set_field(s, MSTATUS_MIE, 0);
1742        env->mstatus = s;
1743        env->mcause = cause | ~(((target_ulong)-1) >> async);
1744        env->mepc = env->pc;
1745        env->mtval = tval;
1746        env->mtval2 = mtval2;
1747        env->mtinst = tinst;
1748        env->pc = (env->mtvec >> 2 << 2) +
1749            ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0);
1750        riscv_cpu_set_mode(env, PRV_M);
1751    }
1752
1753    /* NOTE: it is not necessary to yield load reservations here. It is only
1754     * necessary for an SC from "another hart" to cause a load reservation
1755     * to be yielded. Refer to the memory consistency model section of the
1756     * RISC-V ISA Specification.
1757     */
1758
1759    env->two_stage_lookup = false;
1760    env->two_stage_indirect_lookup = false;
1761#endif
1762    cs->exception_index = RISCV_EXCP_NONE; /* mark handled to qemu */
1763}
1764