1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "qemu/log.h"
22#include "qemu/main-loop.h"
23#include "cpu.h"
24#include "exec/exec-all.h"
25#include "tcg/tcg-op.h"
26#include "trace.h"
27#include "semihosting/common-semi.h"
28
29int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch)
30{
31#ifdef CONFIG_USER_ONLY
32 return 0;
33#else
34 return env->priv;
35#endif
36}
37
38void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
39 target_ulong *cs_base, uint32_t *pflags)
40{
41 CPUState *cs = env_cpu(env);
42 RISCVCPU *cpu = RISCV_CPU(cs);
43
44 uint32_t flags = 0;
45
46 *pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc;
47 *cs_base = 0;
48
49 if (riscv_has_ext(env, RVV) || cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) {
50
51
52
53
54
55
56
57
58 uint32_t vlmax = vext_get_vlmax(env_archcpu(env), env->vtype);
59 uint32_t sew = FIELD_EX64(env->vtype, VTYPE, VSEW);
60 uint32_t maxsz = vlmax << sew;
61 bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) &&
62 (maxsz >= 8);
63 flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill);
64 flags = FIELD_DP32(flags, TB_FLAGS, SEW, sew);
65 flags = FIELD_DP32(flags, TB_FLAGS, LMUL,
66 FIELD_EX64(env->vtype, VTYPE, VLMUL));
67 flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax);
68 flags = FIELD_DP32(flags, TB_FLAGS, VTA,
69 FIELD_EX64(env->vtype, VTYPE, VTA));
70 } else {
71 flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1);
72 }
73
74#ifdef CONFIG_USER_ONLY
75 flags |= TB_FLAGS_MSTATUS_FS;
76 flags |= TB_FLAGS_MSTATUS_VS;
77#else
78 flags |= cpu_mmu_index(env, 0);
79 if (riscv_cpu_fp_enabled(env)) {
80 flags |= env->mstatus & MSTATUS_FS;
81 }
82
83 if (riscv_cpu_vector_enabled(env)) {
84 flags |= env->mstatus & MSTATUS_VS;
85 }
86
87 if (riscv_has_ext(env, RVH)) {
88 if (env->priv == PRV_M ||
89 (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) ||
90 (env->priv == PRV_U && !riscv_cpu_virt_enabled(env) &&
91 get_field(env->hstatus, HSTATUS_HU))) {
92 flags = FIELD_DP32(flags, TB_FLAGS, HLSX, 1);
93 }
94
95 flags = FIELD_DP32(flags, TB_FLAGS, MSTATUS_HS_FS,
96 get_field(env->mstatus_hs, MSTATUS_FS));
97
98 flags = FIELD_DP32(flags, TB_FLAGS, MSTATUS_HS_VS,
99 get_field(env->mstatus_hs, MSTATUS_VS));
100 }
101#endif
102
103 flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl);
104 if (env->cur_pmmask < (env->xl == MXL_RV32 ? UINT32_MAX : UINT64_MAX)) {
105 flags = FIELD_DP32(flags, TB_FLAGS, PM_MASK_ENABLED, 1);
106 }
107 if (env->cur_pmbase != 0) {
108 flags = FIELD_DP32(flags, TB_FLAGS, PM_BASE_ENABLED, 1);
109 }
110
111 *pflags = flags;
112}
113
114void riscv_cpu_update_mask(CPURISCVState *env)
115{
116 target_ulong mask = -1, base = 0;
117
118
119
120
121#ifndef CONFIG_USER_ONLY
122 if (riscv_has_ext(env, RVJ)) {
123 switch (env->priv) {
124 case PRV_M:
125 if (env->mmte & M_PM_ENABLE) {
126 mask = env->mpmmask;
127 base = env->mpmbase;
128 }
129 break;
130 case PRV_S:
131 if (env->mmte & S_PM_ENABLE) {
132 mask = env->spmmask;
133 base = env->spmbase;
134 }
135 break;
136 case PRV_U:
137 if (env->mmte & U_PM_ENABLE) {
138 mask = env->upmmask;
139 base = env->upmbase;
140 }
141 break;
142 default:
143 g_assert_not_reached();
144 }
145 }
146#endif
147 if (env->xl == MXL_RV32) {
148 env->cur_pmmask = mask & UINT32_MAX;
149 env->cur_pmbase = base & UINT32_MAX;
150 } else {
151 env->cur_pmmask = mask;
152 env->cur_pmbase = base;
153 }
154}
155
156#ifndef CONFIG_USER_ONLY
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180static const int hviprio_index2irq[] = {
181 0, 1, 4, 5, 8, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 };
182static const int hviprio_index2rdzero[] = {
183 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
184
185int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero)
186{
187 if (index < 0 || ARRAY_SIZE(hviprio_index2irq) <= index) {
188 return -EINVAL;
189 }
190
191 if (out_irq) {
192 *out_irq = hviprio_index2irq[index];
193 }
194
195 if (out_rdzero) {
196 *out_rdzero = hviprio_index2rdzero[index];
197 }
198
199 return 0;
200}
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222static const uint8_t default_iprio[64] = {
223
224 [63] = IPRIO_MMAXIPRIO,
225 [62] = IPRIO_MMAXIPRIO,
226 [61] = IPRIO_MMAXIPRIO,
227 [60] = IPRIO_MMAXIPRIO,
228 [59] = IPRIO_MMAXIPRIO,
229 [58] = IPRIO_MMAXIPRIO,
230 [57] = IPRIO_MMAXIPRIO,
231 [56] = IPRIO_MMAXIPRIO,
232 [55] = IPRIO_MMAXIPRIO,
233 [54] = IPRIO_MMAXIPRIO,
234 [53] = IPRIO_MMAXIPRIO,
235 [52] = IPRIO_MMAXIPRIO,
236 [51] = IPRIO_MMAXIPRIO,
237 [50] = IPRIO_MMAXIPRIO,
238 [49] = IPRIO_MMAXIPRIO,
239 [48] = IPRIO_MMAXIPRIO,
240
241
242 [31] = IPRIO_MMAXIPRIO,
243 [30] = IPRIO_MMAXIPRIO,
244 [29] = IPRIO_MMAXIPRIO,
245 [28] = IPRIO_MMAXIPRIO,
246 [27] = IPRIO_MMAXIPRIO,
247 [26] = IPRIO_MMAXIPRIO,
248 [25] = IPRIO_MMAXIPRIO,
249 [24] = IPRIO_MMAXIPRIO,
250
251 [47] = IPRIO_DEFAULT_UPPER,
252 [23] = IPRIO_DEFAULT_UPPER + 1,
253 [46] = IPRIO_DEFAULT_UPPER + 2,
254 [45] = IPRIO_DEFAULT_UPPER + 3,
255 [22] = IPRIO_DEFAULT_UPPER + 4,
256 [44] = IPRIO_DEFAULT_UPPER + 5,
257
258 [43] = IPRIO_DEFAULT_UPPER + 6,
259 [21] = IPRIO_DEFAULT_UPPER + 7,
260 [42] = IPRIO_DEFAULT_UPPER + 8,
261 [41] = IPRIO_DEFAULT_UPPER + 9,
262 [20] = IPRIO_DEFAULT_UPPER + 10,
263 [40] = IPRIO_DEFAULT_UPPER + 11,
264
265 [11] = IPRIO_DEFAULT_M,
266 [3] = IPRIO_DEFAULT_M + 1,
267 [7] = IPRIO_DEFAULT_M + 2,
268
269 [9] = IPRIO_DEFAULT_S,
270 [1] = IPRIO_DEFAULT_S + 1,
271 [5] = IPRIO_DEFAULT_S + 2,
272
273 [12] = IPRIO_DEFAULT_SGEXT,
274
275 [10] = IPRIO_DEFAULT_VS,
276 [2] = IPRIO_DEFAULT_VS + 1,
277 [6] = IPRIO_DEFAULT_VS + 2,
278
279 [39] = IPRIO_DEFAULT_LOWER,
280 [19] = IPRIO_DEFAULT_LOWER + 1,
281 [38] = IPRIO_DEFAULT_LOWER + 2,
282 [37] = IPRIO_DEFAULT_LOWER + 3,
283 [18] = IPRIO_DEFAULT_LOWER + 4,
284 [36] = IPRIO_DEFAULT_LOWER + 5,
285
286 [35] = IPRIO_DEFAULT_LOWER + 6,
287 [17] = IPRIO_DEFAULT_LOWER + 7,
288 [34] = IPRIO_DEFAULT_LOWER + 8,
289 [33] = IPRIO_DEFAULT_LOWER + 9,
290 [16] = IPRIO_DEFAULT_LOWER + 10,
291 [32] = IPRIO_DEFAULT_LOWER + 11,
292};
293
294uint8_t riscv_cpu_default_priority(int irq)
295{
296 if (irq < 0 || irq > 63) {
297 return IPRIO_MMAXIPRIO;
298 }
299
300 return default_iprio[irq] ? default_iprio[irq] : IPRIO_MMAXIPRIO;
301};
302
303static int riscv_cpu_pending_to_irq(CPURISCVState *env,
304 int extirq, unsigned int extirq_def_prio,
305 uint64_t pending, uint8_t *iprio)
306{
307 int irq, best_irq = RISCV_EXCP_NONE;
308 unsigned int prio, best_prio = UINT_MAX;
309
310 if (!pending) {
311 return RISCV_EXCP_NONE;
312 }
313
314 irq = ctz64(pending);
315 if (!riscv_feature(env, RISCV_FEATURE_AIA)) {
316 return irq;
317 }
318
319 pending = pending >> irq;
320 while (pending) {
321 prio = iprio[irq];
322 if (!prio) {
323 if (irq == extirq) {
324 prio = extirq_def_prio;
325 } else {
326 prio = (riscv_cpu_default_priority(irq) < extirq_def_prio) ?
327 1 : IPRIO_MMAXIPRIO;
328 }
329 }
330 if ((pending & 0x1) && (prio <= best_prio)) {
331 best_irq = irq;
332 best_prio = prio;
333 }
334 irq++;
335 pending = pending >> 1;
336 }
337
338 return best_irq;
339}
340
341uint64_t riscv_cpu_all_pending(CPURISCVState *env)
342{
343 uint32_t gein = get_field(env->hstatus, HSTATUS_VGEIN);
344 uint64_t vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
345
346 return (env->mip | vsgein) & env->mie;
347}
348
349int riscv_cpu_mirq_pending(CPURISCVState *env)
350{
351 uint64_t irqs = riscv_cpu_all_pending(env) & ~env->mideleg &
352 ~(MIP_SGEIP | MIP_VSSIP | MIP_VSTIP | MIP_VSEIP);
353
354 return riscv_cpu_pending_to_irq(env, IRQ_M_EXT, IPRIO_DEFAULT_M,
355 irqs, env->miprio);
356}
357
358int riscv_cpu_sirq_pending(CPURISCVState *env)
359{
360 uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg &
361 ~(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP);
362
363 return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
364 irqs, env->siprio);
365}
366
367int riscv_cpu_vsirq_pending(CPURISCVState *env)
368{
369 uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg &
370 (MIP_VSSIP | MIP_VSTIP | MIP_VSEIP);
371
372 return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
373 irqs >> 1, env->hviprio);
374}
375
376static int riscv_cpu_local_irq_pending(CPURISCVState *env)
377{
378 int virq;
379 uint64_t irqs, pending, mie, hsie, vsie;
380
381
382 if (riscv_cpu_virt_enabled(env)) {
383 mie = 1;
384 hsie = 1;
385 vsie = (env->priv < PRV_S) ||
386 (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_SIE));
387 } else {
388 mie = (env->priv < PRV_M) ||
389 (env->priv == PRV_M && get_field(env->mstatus, MSTATUS_MIE));
390 hsie = (env->priv < PRV_S) ||
391 (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_SIE));
392 vsie = 0;
393 }
394
395
396 pending = riscv_cpu_all_pending(env);
397
398
399 irqs = pending & ~env->mideleg & -mie;
400 if (irqs) {
401 return riscv_cpu_pending_to_irq(env, IRQ_M_EXT, IPRIO_DEFAULT_M,
402 irqs, env->miprio);
403 }
404
405
406 irqs = pending & env->mideleg & ~env->hideleg & -hsie;
407 if (irqs) {
408 return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
409 irqs, env->siprio);
410 }
411
412
413 irqs = pending & env->mideleg & env->hideleg & -vsie;
414 if (irqs) {
415 virq = riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
416 irqs >> 1, env->hviprio);
417 return (virq <= 0) ? virq : virq + 1;
418 }
419
420
421 return RISCV_EXCP_NONE;
422}
423
424bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
425{
426 if (interrupt_request & CPU_INTERRUPT_HARD) {
427 RISCVCPU *cpu = RISCV_CPU(cs);
428 CPURISCVState *env = &cpu->env;
429 int interruptno = riscv_cpu_local_irq_pending(env);
430 if (interruptno >= 0) {
431 cs->exception_index = RISCV_EXCP_INT_FLAG | interruptno;
432 riscv_cpu_do_interrupt(cs);
433 return true;
434 }
435 }
436 return false;
437}
438
439
440bool riscv_cpu_fp_enabled(CPURISCVState *env)
441{
442 if (env->mstatus & MSTATUS_FS) {
443 if (riscv_cpu_virt_enabled(env) && !(env->mstatus_hs & MSTATUS_FS)) {
444 return false;
445 }
446 return true;
447 }
448
449 return false;
450}
451
452
453bool riscv_cpu_vector_enabled(CPURISCVState *env)
454{
455 if (env->mstatus & MSTATUS_VS) {
456 if (riscv_cpu_virt_enabled(env) && !(env->mstatus_hs & MSTATUS_VS)) {
457 return false;
458 }
459 return true;
460 }
461
462 return false;
463}
464
465void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env)
466{
467 uint64_t mstatus_mask = MSTATUS_MXR | MSTATUS_SUM |
468 MSTATUS_SPP | MSTATUS_SPIE | MSTATUS_SIE |
469 MSTATUS64_UXL | MSTATUS_VS;
470
471 if (riscv_has_ext(env, RVF)) {
472 mstatus_mask |= MSTATUS_FS;
473 }
474 bool current_virt = riscv_cpu_virt_enabled(env);
475
476 g_assert(riscv_has_ext(env, RVH));
477
478 if (current_virt) {
479
480 env->vsstatus = env->mstatus & mstatus_mask;
481 env->mstatus &= ~mstatus_mask;
482 env->mstatus |= env->mstatus_hs;
483
484 env->vstvec = env->stvec;
485 env->stvec = env->stvec_hs;
486
487 env->vsscratch = env->sscratch;
488 env->sscratch = env->sscratch_hs;
489
490 env->vsepc = env->sepc;
491 env->sepc = env->sepc_hs;
492
493 env->vscause = env->scause;
494 env->scause = env->scause_hs;
495
496 env->vstval = env->stval;
497 env->stval = env->stval_hs;
498
499 env->vsatp = env->satp;
500 env->satp = env->satp_hs;
501 } else {
502
503 env->mstatus_hs = env->mstatus & mstatus_mask;
504 env->mstatus &= ~mstatus_mask;
505 env->mstatus |= env->vsstatus;
506
507 env->stvec_hs = env->stvec;
508 env->stvec = env->vstvec;
509
510 env->sscratch_hs = env->sscratch;
511 env->sscratch = env->vsscratch;
512
513 env->sepc_hs = env->sepc;
514 env->sepc = env->vsepc;
515
516 env->scause_hs = env->scause;
517 env->scause = env->vscause;
518
519 env->stval_hs = env->stval;
520 env->stval = env->vstval;
521
522 env->satp_hs = env->satp;
523 env->satp = env->vsatp;
524 }
525}
526
527target_ulong riscv_cpu_get_geilen(CPURISCVState *env)
528{
529 if (!riscv_has_ext(env, RVH)) {
530 return 0;
531 }
532
533 return env->geilen;
534}
535
536void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen)
537{
538 if (!riscv_has_ext(env, RVH)) {
539 return;
540 }
541
542 if (geilen > (TARGET_LONG_BITS - 1)) {
543 return;
544 }
545
546 env->geilen = geilen;
547}
548
549bool riscv_cpu_virt_enabled(CPURISCVState *env)
550{
551 if (!riscv_has_ext(env, RVH)) {
552 return false;
553 }
554
555 return get_field(env->virt, VIRT_ONOFF);
556}
557
558void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable)
559{
560 if (!riscv_has_ext(env, RVH)) {
561 return;
562 }
563
564
565 if (get_field(env->virt, VIRT_ONOFF) != enable) {
566 tlb_flush(env_cpu(env));
567 }
568
569 env->virt = set_field(env->virt, VIRT_ONOFF, enable);
570
571 if (enable) {
572
573
574
575
576
577
578
579
580
581 riscv_cpu_update_mip(env_archcpu(env), 0, 0);
582 }
583}
584
585bool riscv_cpu_two_stage_lookup(int mmu_idx)
586{
587 return mmu_idx & TB_FLAGS_PRIV_HYP_ACCESS_MASK;
588}
589
590int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts)
591{
592 CPURISCVState *env = &cpu->env;
593 if (env->miclaim & interrupts) {
594 return -1;
595 } else {
596 env->miclaim |= interrupts;
597 return 0;
598 }
599}
600
601uint64_t riscv_cpu_update_mip(RISCVCPU *cpu, uint64_t mask, uint64_t value)
602{
603 CPURISCVState *env = &cpu->env;
604 CPUState *cs = CPU(cpu);
605 uint64_t gein, vsgein = 0, old = env->mip;
606 bool locked = false;
607
608 if (riscv_cpu_virt_enabled(env)) {
609 gein = get_field(env->hstatus, HSTATUS_VGEIN);
610 vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
611 }
612
613 if (!qemu_mutex_iothread_locked()) {
614 locked = true;
615 qemu_mutex_lock_iothread();
616 }
617
618 env->mip = (env->mip & ~mask) | (value & mask);
619
620 if (env->mip | vsgein) {
621 cpu_interrupt(cs, CPU_INTERRUPT_HARD);
622 } else {
623 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
624 }
625
626 if (locked) {
627 qemu_mutex_unlock_iothread();
628 }
629
630 return old;
631}
632
633void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *),
634 void *arg)
635{
636 env->rdtime_fn = fn;
637 env->rdtime_fn_arg = arg;
638}
639
640void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
641 int (*rmw_fn)(void *arg,
642 target_ulong reg,
643 target_ulong *val,
644 target_ulong new_val,
645 target_ulong write_mask),
646 void *rmw_fn_arg)
647{
648 if (priv <= PRV_M) {
649 env->aia_ireg_rmw_fn[priv] = rmw_fn;
650 env->aia_ireg_rmw_fn_arg[priv] = rmw_fn_arg;
651 }
652}
653
654void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv)
655{
656 if (newpriv > PRV_M) {
657 g_assert_not_reached();
658 }
659 if (newpriv == PRV_H) {
660 newpriv = PRV_U;
661 }
662
663 env->priv = newpriv;
664 env->xl = cpu_recompute_xl(env);
665 riscv_cpu_update_mask(env);
666
667
668
669
670
671
672
673
674
675 env->load_res = -1;
676}
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692static int get_physical_address_pmp(CPURISCVState *env, int *prot,
693 target_ulong *tlb_size, hwaddr addr,
694 int size, MMUAccessType access_type,
695 int mode)
696{
697 pmp_priv_t pmp_priv;
698 target_ulong tlb_size_pmp = 0;
699
700 if (!riscv_feature(env, RISCV_FEATURE_PMP)) {
701 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
702 return TRANSLATE_SUCCESS;
703 }
704
705 if (!pmp_hart_has_privs(env, addr, size, 1 << access_type, &pmp_priv,
706 mode)) {
707 *prot = 0;
708 return TRANSLATE_PMP_FAIL;
709 }
710
711 *prot = pmp_priv_to_page_prot(pmp_priv);
712 if (tlb_size != NULL) {
713 if (pmp_is_range_in_tlb(env, addr & ~(*tlb_size - 1), &tlb_size_pmp)) {
714 *tlb_size = tlb_size_pmp;
715 }
716 }
717
718 return TRANSLATE_SUCCESS;
719}
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742static int get_physical_address(CPURISCVState *env, hwaddr *physical,
743 int *prot, target_ulong addr,
744 target_ulong *fault_pte_addr,
745 int access_type, int mmu_idx,
746 bool first_stage, bool two_stage,
747 bool is_debug)
748{
749
750
751
752 MemTxResult res;
753 MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
754 int mode = mmu_idx & TB_FLAGS_PRIV_MMU_MASK;
755 bool use_background = false;
756 hwaddr ppn;
757 RISCVCPU *cpu = env_archcpu(env);
758 int napot_bits = 0;
759 target_ulong napot_mask;
760
761
762
763
764
765
766
767
768 if (!riscv_cpu_virt_enabled(env) && two_stage) {
769 use_background = true;
770 }
771
772
773
774 if (riscv_cpu_two_stage_lookup(mmu_idx)) {
775 mode = get_field(env->hstatus, HSTATUS_SPVP);
776 } else if (mode == PRV_M && access_type != MMU_INST_FETCH) {
777 if (get_field(env->mstatus, MSTATUS_MPRV)) {
778 mode = get_field(env->mstatus, MSTATUS_MPP);
779 }
780 }
781
782 if (first_stage == false) {
783
784
785 mode = PRV_U;
786 }
787
788 if (mode == PRV_M || !riscv_feature(env, RISCV_FEATURE_MMU)) {
789 *physical = addr;
790 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
791 return TRANSLATE_SUCCESS;
792 }
793
794 *prot = 0;
795
796 hwaddr base;
797 int levels, ptidxbits, ptesize, vm, sum, mxr, widened;
798
799 if (first_stage == true) {
800 mxr = get_field(env->mstatus, MSTATUS_MXR);
801 } else {
802 mxr = get_field(env->vsstatus, MSTATUS_MXR);
803 }
804
805 if (first_stage == true) {
806 if (use_background) {
807 if (riscv_cpu_mxl(env) == MXL_RV32) {
808 base = (hwaddr)get_field(env->vsatp, SATP32_PPN) << PGSHIFT;
809 vm = get_field(env->vsatp, SATP32_MODE);
810 } else {
811 base = (hwaddr)get_field(env->vsatp, SATP64_PPN) << PGSHIFT;
812 vm = get_field(env->vsatp, SATP64_MODE);
813 }
814 } else {
815 if (riscv_cpu_mxl(env) == MXL_RV32) {
816 base = (hwaddr)get_field(env->satp, SATP32_PPN) << PGSHIFT;
817 vm = get_field(env->satp, SATP32_MODE);
818 } else {
819 base = (hwaddr)get_field(env->satp, SATP64_PPN) << PGSHIFT;
820 vm = get_field(env->satp, SATP64_MODE);
821 }
822 }
823 widened = 0;
824 } else {
825 if (riscv_cpu_mxl(env) == MXL_RV32) {
826 base = (hwaddr)get_field(env->hgatp, SATP32_PPN) << PGSHIFT;
827 vm = get_field(env->hgatp, SATP32_MODE);
828 } else {
829 base = (hwaddr)get_field(env->hgatp, SATP64_PPN) << PGSHIFT;
830 vm = get_field(env->hgatp, SATP64_MODE);
831 }
832 widened = 2;
833 }
834
835 sum = get_field(env->mstatus, MSTATUS_SUM) || use_background || is_debug;
836 switch (vm) {
837 case VM_1_10_SV32:
838 levels = 2; ptidxbits = 10; ptesize = 4; break;
839 case VM_1_10_SV39:
840 levels = 3; ptidxbits = 9; ptesize = 8; break;
841 case VM_1_10_SV48:
842 levels = 4; ptidxbits = 9; ptesize = 8; break;
843 case VM_1_10_SV57:
844 levels = 5; ptidxbits = 9; ptesize = 8; break;
845 case VM_1_10_MBARE:
846 *physical = addr;
847 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
848 return TRANSLATE_SUCCESS;
849 default:
850 g_assert_not_reached();
851 }
852
853 CPUState *cs = env_cpu(env);
854 int va_bits = PGSHIFT + levels * ptidxbits + widened;
855 target_ulong mask, masked_msbs;
856
857 if (TARGET_LONG_BITS > (va_bits - 1)) {
858 mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1;
859 } else {
860 mask = 0;
861 }
862 masked_msbs = (addr >> (va_bits - 1)) & mask;
863
864 if (masked_msbs != 0 && masked_msbs != mask) {
865 return TRANSLATE_FAIL;
866 }
867
868 int ptshift = (levels - 1) * ptidxbits;
869 int i;
870
871#if !TCG_OVERSIZED_GUEST
872restart:
873#endif
874 for (i = 0; i < levels; i++, ptshift -= ptidxbits) {
875 target_ulong idx;
876 if (i == 0) {
877 idx = (addr >> (PGSHIFT + ptshift)) &
878 ((1 << (ptidxbits + widened)) - 1);
879 } else {
880 idx = (addr >> (PGSHIFT + ptshift)) &
881 ((1 << ptidxbits) - 1);
882 }
883
884
885 hwaddr pte_addr;
886
887 if (two_stage && first_stage) {
888 int vbase_prot;
889 hwaddr vbase;
890
891
892 int vbase_ret = get_physical_address(env, &vbase, &vbase_prot,
893 base, NULL, MMU_DATA_LOAD,
894 mmu_idx, false, true,
895 is_debug);
896
897 if (vbase_ret != TRANSLATE_SUCCESS) {
898 if (fault_pte_addr) {
899 *fault_pte_addr = (base + idx * ptesize) >> 2;
900 }
901 return TRANSLATE_G_STAGE_FAIL;
902 }
903
904 pte_addr = vbase + idx * ptesize;
905 } else {
906 pte_addr = base + idx * ptesize;
907 }
908
909 int pmp_prot;
910 int pmp_ret = get_physical_address_pmp(env, &pmp_prot, NULL, pte_addr,
911 sizeof(target_ulong),
912 MMU_DATA_LOAD, PRV_S);
913 if (pmp_ret != TRANSLATE_SUCCESS) {
914 return TRANSLATE_PMP_FAIL;
915 }
916
917 target_ulong pte;
918 if (riscv_cpu_mxl(env) == MXL_RV32) {
919 pte = address_space_ldl(cs->as, pte_addr, attrs, &res);
920 } else {
921 pte = address_space_ldq(cs->as, pte_addr, attrs, &res);
922 }
923
924 if (res != MEMTX_OK) {
925 return TRANSLATE_FAIL;
926 }
927
928 if (riscv_cpu_sxl(env) == MXL_RV32) {
929 ppn = pte >> PTE_PPN_SHIFT;
930 } else if (cpu->cfg.ext_svpbmt || cpu->cfg.ext_svnapot) {
931 ppn = (pte & (target_ulong)PTE_PPN_MASK) >> PTE_PPN_SHIFT;
932 } else {
933 ppn = pte >> PTE_PPN_SHIFT;
934 if ((pte & ~(target_ulong)PTE_PPN_MASK) >> PTE_PPN_SHIFT) {
935 return TRANSLATE_FAIL;
936 }
937 }
938
939 if (!(pte & PTE_V)) {
940
941 return TRANSLATE_FAIL;
942 } else if (!cpu->cfg.ext_svpbmt && (pte & PTE_PBMT)) {
943 return TRANSLATE_FAIL;
944 } else if (!(pte & (PTE_R | PTE_W | PTE_X))) {
945
946 if (pte & (PTE_D | PTE_A | PTE_U | PTE_ATTR)) {
947 return TRANSLATE_FAIL;
948 }
949 base = ppn << PGSHIFT;
950 } else if ((pte & (PTE_R | PTE_W | PTE_X)) == PTE_W) {
951
952 return TRANSLATE_FAIL;
953 } else if ((pte & (PTE_R | PTE_W | PTE_X)) == (PTE_W | PTE_X)) {
954
955 return TRANSLATE_FAIL;
956 } else if ((pte & PTE_U) && ((mode != PRV_U) &&
957 (!sum || access_type == MMU_INST_FETCH))) {
958
959
960 return TRANSLATE_FAIL;
961 } else if (!(pte & PTE_U) && (mode != PRV_S)) {
962
963 return TRANSLATE_FAIL;
964 } else if (ppn & ((1ULL << ptshift) - 1)) {
965
966 return TRANSLATE_FAIL;
967 } else if (access_type == MMU_DATA_LOAD && !((pte & PTE_R) ||
968 ((pte & PTE_X) && mxr))) {
969
970 return TRANSLATE_FAIL;
971 } else if (access_type == MMU_DATA_STORE && !(pte & PTE_W)) {
972
973 return TRANSLATE_FAIL;
974 } else if (access_type == MMU_INST_FETCH && !(pte & PTE_X)) {
975
976 return TRANSLATE_FAIL;
977 } else {
978
979 target_ulong updated_pte = pte | PTE_A |
980 (access_type == MMU_DATA_STORE ? PTE_D : 0);
981
982
983 if (updated_pte != pte) {
984
985
986
987
988
989
990
991
992 MemoryRegion *mr;
993 hwaddr l = sizeof(target_ulong), addr1;
994 mr = address_space_translate(cs->as, pte_addr,
995 &addr1, &l, false, MEMTXATTRS_UNSPECIFIED);
996 if (memory_region_is_ram(mr)) {
997 target_ulong *pte_pa =
998 qemu_map_ram_ptr(mr->ram_block, addr1);
999#if TCG_OVERSIZED_GUEST
1000
1001
1002 *pte_pa = pte = updated_pte;
1003#else
1004 target_ulong old_pte =
1005 qatomic_cmpxchg(pte_pa, pte, updated_pte);
1006 if (old_pte != pte) {
1007 goto restart;
1008 } else {
1009 pte = updated_pte;
1010 }
1011#endif
1012 } else {
1013
1014
1015 return TRANSLATE_FAIL;
1016 }
1017 }
1018
1019
1020
1021 target_ulong vpn = addr >> PGSHIFT;
1022
1023 if (cpu->cfg.ext_svnapot && (pte & PTE_N)) {
1024 napot_bits = ctzl(ppn) + 1;
1025 if ((i != (levels - 1)) || (napot_bits != 4)) {
1026 return TRANSLATE_FAIL;
1027 }
1028 }
1029
1030 napot_mask = (1 << napot_bits) - 1;
1031 *physical = (((ppn & ~napot_mask) | (vpn & napot_mask) |
1032 (vpn & (((target_ulong)1 << ptshift) - 1))
1033 ) << PGSHIFT) | (addr & ~TARGET_PAGE_MASK);
1034
1035
1036 if ((pte & PTE_R) || ((pte & PTE_X) && mxr)) {
1037 *prot |= PAGE_READ;
1038 }
1039 if ((pte & PTE_X)) {
1040 *prot |= PAGE_EXEC;
1041 }
1042
1043
1044 if ((pte & PTE_W) &&
1045 (access_type == MMU_DATA_STORE || (pte & PTE_D))) {
1046 *prot |= PAGE_WRITE;
1047 }
1048 return TRANSLATE_SUCCESS;
1049 }
1050 }
1051 return TRANSLATE_FAIL;
1052}
1053
1054static void raise_mmu_exception(CPURISCVState *env, target_ulong address,
1055 MMUAccessType access_type, bool pmp_violation,
1056 bool first_stage, bool two_stage)
1057{
1058 CPUState *cs = env_cpu(env);
1059 int page_fault_exceptions, vm;
1060 uint64_t stap_mode;
1061
1062 if (riscv_cpu_mxl(env) == MXL_RV32) {
1063 stap_mode = SATP32_MODE;
1064 } else {
1065 stap_mode = SATP64_MODE;
1066 }
1067
1068 if (first_stage) {
1069 vm = get_field(env->satp, stap_mode);
1070 } else {
1071 vm = get_field(env->hgatp, stap_mode);
1072 }
1073
1074 page_fault_exceptions = vm != VM_1_10_MBARE && !pmp_violation;
1075
1076 switch (access_type) {
1077 case MMU_INST_FETCH:
1078 if (riscv_cpu_virt_enabled(env) && !first_stage) {
1079 cs->exception_index = RISCV_EXCP_INST_GUEST_PAGE_FAULT;
1080 } else {
1081 cs->exception_index = page_fault_exceptions ?
1082 RISCV_EXCP_INST_PAGE_FAULT : RISCV_EXCP_INST_ACCESS_FAULT;
1083 }
1084 break;
1085 case MMU_DATA_LOAD:
1086 if (two_stage && !first_stage) {
1087 cs->exception_index = RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT;
1088 } else {
1089 cs->exception_index = page_fault_exceptions ?
1090 RISCV_EXCP_LOAD_PAGE_FAULT : RISCV_EXCP_LOAD_ACCESS_FAULT;
1091 }
1092 break;
1093 case MMU_DATA_STORE:
1094 if (two_stage && !first_stage) {
1095 cs->exception_index = RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT;
1096 } else {
1097 cs->exception_index = page_fault_exceptions ?
1098 RISCV_EXCP_STORE_PAGE_FAULT : RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
1099 }
1100 break;
1101 default:
1102 g_assert_not_reached();
1103 }
1104 env->badaddr = address;
1105 env->two_stage_lookup = two_stage;
1106}
1107
1108hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
1109{
1110 RISCVCPU *cpu = RISCV_CPU(cs);
1111 CPURISCVState *env = &cpu->env;
1112 hwaddr phys_addr;
1113 int prot;
1114 int mmu_idx = cpu_mmu_index(&cpu->env, false);
1115
1116 if (get_physical_address(env, &phys_addr, &prot, addr, NULL, 0, mmu_idx,
1117 true, riscv_cpu_virt_enabled(env), true)) {
1118 return -1;
1119 }
1120
1121 if (riscv_cpu_virt_enabled(env)) {
1122 if (get_physical_address(env, &phys_addr, &prot, phys_addr, NULL,
1123 0, mmu_idx, false, true, true)) {
1124 return -1;
1125 }
1126 }
1127
1128 return phys_addr & TARGET_PAGE_MASK;
1129}
1130
1131void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
1132 vaddr addr, unsigned size,
1133 MMUAccessType access_type,
1134 int mmu_idx, MemTxAttrs attrs,
1135 MemTxResult response, uintptr_t retaddr)
1136{
1137 RISCVCPU *cpu = RISCV_CPU(cs);
1138 CPURISCVState *env = &cpu->env;
1139
1140 if (access_type == MMU_DATA_STORE) {
1141 cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
1142 } else if (access_type == MMU_DATA_LOAD) {
1143 cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
1144 } else {
1145 cs->exception_index = RISCV_EXCP_INST_ACCESS_FAULT;
1146 }
1147
1148 env->badaddr = addr;
1149 env->two_stage_lookup = riscv_cpu_virt_enabled(env) ||
1150 riscv_cpu_two_stage_lookup(mmu_idx);
1151 cpu_loop_exit_restore(cs, retaddr);
1152}
1153
1154void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
1155 MMUAccessType access_type, int mmu_idx,
1156 uintptr_t retaddr)
1157{
1158 RISCVCPU *cpu = RISCV_CPU(cs);
1159 CPURISCVState *env = &cpu->env;
1160 switch (access_type) {
1161 case MMU_INST_FETCH:
1162 cs->exception_index = RISCV_EXCP_INST_ADDR_MIS;
1163 break;
1164 case MMU_DATA_LOAD:
1165 cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS;
1166 break;
1167 case MMU_DATA_STORE:
1168 cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS;
1169 break;
1170 default:
1171 g_assert_not_reached();
1172 }
1173 env->badaddr = addr;
1174 env->two_stage_lookup = riscv_cpu_virt_enabled(env) ||
1175 riscv_cpu_two_stage_lookup(mmu_idx);
1176 cpu_loop_exit_restore(cs, retaddr);
1177}
1178
1179bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
1180 MMUAccessType access_type, int mmu_idx,
1181 bool probe, uintptr_t retaddr)
1182{
1183 RISCVCPU *cpu = RISCV_CPU(cs);
1184 CPURISCVState *env = &cpu->env;
1185 vaddr im_address;
1186 hwaddr pa = 0;
1187 int prot, prot2, prot_pmp;
1188 bool pmp_violation = false;
1189 bool first_stage_error = true;
1190 bool two_stage_lookup = false;
1191 int ret = TRANSLATE_FAIL;
1192 int mode = mmu_idx;
1193
1194 target_ulong tlb_size = TARGET_PAGE_SIZE;
1195
1196 env->guest_phys_fault_addr = 0;
1197
1198 qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n",
1199 __func__, address, access_type, mmu_idx);
1200
1201
1202
1203 if (riscv_cpu_two_stage_lookup(mmu_idx)) {
1204 mode = get_field(env->hstatus, HSTATUS_SPVP);
1205 } else if (mode == PRV_M && access_type != MMU_INST_FETCH &&
1206 get_field(env->mstatus, MSTATUS_MPRV)) {
1207 mode = get_field(env->mstatus, MSTATUS_MPP);
1208 if (riscv_has_ext(env, RVH) && get_field(env->mstatus, MSTATUS_MPV)) {
1209 two_stage_lookup = true;
1210 }
1211 }
1212
1213 if (riscv_cpu_virt_enabled(env) ||
1214 ((riscv_cpu_two_stage_lookup(mmu_idx) || two_stage_lookup) &&
1215 access_type != MMU_INST_FETCH)) {
1216
1217 ret = get_physical_address(env, &pa, &prot, address,
1218 &env->guest_phys_fault_addr, access_type,
1219 mmu_idx, true, true, false);
1220
1221
1222
1223
1224
1225
1226 if (ret == TRANSLATE_G_STAGE_FAIL) {
1227 first_stage_error = false;
1228 access_type = MMU_DATA_LOAD;
1229 }
1230
1231 qemu_log_mask(CPU_LOG_MMU,
1232 "%s 1st-stage address=%" VADDR_PRIx " ret %d physical "
1233 TARGET_FMT_plx " prot %d\n",
1234 __func__, address, ret, pa, prot);
1235
1236 if (ret == TRANSLATE_SUCCESS) {
1237
1238 im_address = pa;
1239
1240 ret = get_physical_address(env, &pa, &prot2, im_address, NULL,
1241 access_type, mmu_idx, false, true,
1242 false);
1243
1244 qemu_log_mask(CPU_LOG_MMU,
1245 "%s 2nd-stage address=%" VADDR_PRIx " ret %d physical "
1246 TARGET_FMT_plx " prot %d\n",
1247 __func__, im_address, ret, pa, prot2);
1248
1249 prot &= prot2;
1250
1251 if (ret == TRANSLATE_SUCCESS) {
1252 ret = get_physical_address_pmp(env, &prot_pmp, &tlb_size, pa,
1253 size, access_type, mode);
1254
1255 qemu_log_mask(CPU_LOG_MMU,
1256 "%s PMP address=" TARGET_FMT_plx " ret %d prot"
1257 " %d tlb_size " TARGET_FMT_lu "\n",
1258 __func__, pa, ret, prot_pmp, tlb_size);
1259
1260 prot &= prot_pmp;
1261 }
1262
1263 if (ret != TRANSLATE_SUCCESS) {
1264
1265
1266
1267
1268 first_stage_error = false;
1269 env->guest_phys_fault_addr = (im_address |
1270 (address &
1271 (TARGET_PAGE_SIZE - 1))) >> 2;
1272 }
1273 }
1274 } else {
1275
1276 ret = get_physical_address(env, &pa, &prot, address, NULL,
1277 access_type, mmu_idx, true, false, false);
1278
1279 qemu_log_mask(CPU_LOG_MMU,
1280 "%s address=%" VADDR_PRIx " ret %d physical "
1281 TARGET_FMT_plx " prot %d\n",
1282 __func__, address, ret, pa, prot);
1283
1284 if (ret == TRANSLATE_SUCCESS) {
1285 ret = get_physical_address_pmp(env, &prot_pmp, &tlb_size, pa,
1286 size, access_type, mode);
1287
1288 qemu_log_mask(CPU_LOG_MMU,
1289 "%s PMP address=" TARGET_FMT_plx " ret %d prot"
1290 " %d tlb_size " TARGET_FMT_lu "\n",
1291 __func__, pa, ret, prot_pmp, tlb_size);
1292
1293 prot &= prot_pmp;
1294 }
1295 }
1296
1297 if (ret == TRANSLATE_PMP_FAIL) {
1298 pmp_violation = true;
1299 }
1300
1301 if (ret == TRANSLATE_SUCCESS) {
1302 tlb_set_page(cs, address & ~(tlb_size - 1), pa & ~(tlb_size - 1),
1303 prot, mmu_idx, tlb_size);
1304 return true;
1305 } else if (probe) {
1306 return false;
1307 } else {
1308 raise_mmu_exception(env, address, access_type, pmp_violation,
1309 first_stage_error,
1310 riscv_cpu_virt_enabled(env) ||
1311 riscv_cpu_two_stage_lookup(mmu_idx));
1312 cpu_loop_exit_restore(cs, retaddr);
1313 }
1314
1315 return true;
1316}
1317#endif
1318
1319
1320
1321
1322
1323
1324
1325void riscv_cpu_do_interrupt(CPUState *cs)
1326{
1327#if !defined(CONFIG_USER_ONLY)
1328
1329 RISCVCPU *cpu = RISCV_CPU(cs);
1330 CPURISCVState *env = &cpu->env;
1331 bool write_gva = false;
1332 uint64_t s;
1333
1334
1335
1336
1337 bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG);
1338 target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK;
1339 uint64_t deleg = async ? env->mideleg : env->medeleg;
1340 target_ulong tval = 0;
1341 target_ulong htval = 0;
1342 target_ulong mtval2 = 0;
1343
1344 if (cause == RISCV_EXCP_SEMIHOST) {
1345 if (env->priv >= PRV_S) {
1346 do_common_semihosting(cs);
1347 env->pc += 4;
1348 return;
1349 }
1350 cause = RISCV_EXCP_BREAKPOINT;
1351 }
1352
1353 if (!async) {
1354
1355 switch (cause) {
1356 case RISCV_EXCP_INST_GUEST_PAGE_FAULT:
1357 case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT:
1358 case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT:
1359 case RISCV_EXCP_INST_ADDR_MIS:
1360 case RISCV_EXCP_INST_ACCESS_FAULT:
1361 case RISCV_EXCP_LOAD_ADDR_MIS:
1362 case RISCV_EXCP_STORE_AMO_ADDR_MIS:
1363 case RISCV_EXCP_LOAD_ACCESS_FAULT:
1364 case RISCV_EXCP_STORE_AMO_ACCESS_FAULT:
1365 case RISCV_EXCP_INST_PAGE_FAULT:
1366 case RISCV_EXCP_LOAD_PAGE_FAULT:
1367 case RISCV_EXCP_STORE_PAGE_FAULT:
1368 write_gva = env->two_stage_lookup;
1369 tval = env->badaddr;
1370 break;
1371 case RISCV_EXCP_ILLEGAL_INST:
1372 case RISCV_EXCP_VIRT_INSTRUCTION_FAULT:
1373 tval = env->bins;
1374 break;
1375 default:
1376 break;
1377 }
1378
1379 if (cause == RISCV_EXCP_U_ECALL) {
1380 assert(env->priv <= 3);
1381
1382 if (env->priv == PRV_M) {
1383 cause = RISCV_EXCP_M_ECALL;
1384 } else if (env->priv == PRV_S && riscv_cpu_virt_enabled(env)) {
1385 cause = RISCV_EXCP_VS_ECALL;
1386 } else if (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) {
1387 cause = RISCV_EXCP_S_ECALL;
1388 } else if (env->priv == PRV_U) {
1389 cause = RISCV_EXCP_U_ECALL;
1390 }
1391 }
1392 }
1393
1394 trace_riscv_trap(env->mhartid, async, cause, env->pc, tval,
1395 riscv_cpu_get_trap_name(cause, async));
1396
1397 qemu_log_mask(CPU_LOG_INT,
1398 "%s: hart:"TARGET_FMT_ld", async:%d, cause:"TARGET_FMT_lx", "
1399 "epc:0x"TARGET_FMT_lx", tval:0x"TARGET_FMT_lx", desc=%s\n",
1400 __func__, env->mhartid, async, cause, env->pc, tval,
1401 riscv_cpu_get_trap_name(cause, async));
1402
1403 if (env->priv <= PRV_S &&
1404 cause < TARGET_LONG_BITS && ((deleg >> cause) & 1)) {
1405
1406 if (riscv_has_ext(env, RVH)) {
1407 uint64_t hdeleg = async ? env->hideleg : env->hedeleg;
1408
1409 if (riscv_cpu_virt_enabled(env) && ((hdeleg >> cause) & 1)) {
1410
1411
1412
1413
1414
1415 if (cause == IRQ_VS_TIMER || cause == IRQ_VS_SOFT ||
1416 cause == IRQ_VS_EXT) {
1417 cause = cause - 1;
1418 }
1419 write_gva = false;
1420 } else if (riscv_cpu_virt_enabled(env)) {
1421
1422 riscv_cpu_swap_hypervisor_regs(env);
1423 env->hstatus = set_field(env->hstatus, HSTATUS_SPVP,
1424 env->priv);
1425 env->hstatus = set_field(env->hstatus, HSTATUS_SPV,
1426 riscv_cpu_virt_enabled(env));
1427
1428
1429 htval = env->guest_phys_fault_addr;
1430
1431 riscv_cpu_set_virt_enabled(env, 0);
1432 } else {
1433
1434 env->hstatus = set_field(env->hstatus, HSTATUS_SPV, false);
1435 htval = env->guest_phys_fault_addr;
1436 }
1437 env->hstatus = set_field(env->hstatus, HSTATUS_GVA, write_gva);
1438 }
1439
1440 s = env->mstatus;
1441 s = set_field(s, MSTATUS_SPIE, get_field(s, MSTATUS_SIE));
1442 s = set_field(s, MSTATUS_SPP, env->priv);
1443 s = set_field(s, MSTATUS_SIE, 0);
1444 env->mstatus = s;
1445 env->scause = cause | ((target_ulong)async << (TARGET_LONG_BITS - 1));
1446 env->sepc = env->pc;
1447 env->stval = tval;
1448 env->htval = htval;
1449 env->pc = (env->stvec >> 2 << 2) +
1450 ((async && (env->stvec & 3) == 1) ? cause * 4 : 0);
1451 riscv_cpu_set_mode(env, PRV_S);
1452 } else {
1453
1454 if (riscv_has_ext(env, RVH)) {
1455 if (riscv_cpu_virt_enabled(env)) {
1456 riscv_cpu_swap_hypervisor_regs(env);
1457 }
1458 env->mstatus = set_field(env->mstatus, MSTATUS_MPV,
1459 riscv_cpu_virt_enabled(env));
1460 if (riscv_cpu_virt_enabled(env) && tval) {
1461 env->mstatus = set_field(env->mstatus, MSTATUS_GVA, 1);
1462 }
1463
1464 mtval2 = env->guest_phys_fault_addr;
1465
1466
1467 riscv_cpu_set_virt_enabled(env, 0);
1468 }
1469
1470 s = env->mstatus;
1471 s = set_field(s, MSTATUS_MPIE, get_field(s, MSTATUS_MIE));
1472 s = set_field(s, MSTATUS_MPP, env->priv);
1473 s = set_field(s, MSTATUS_MIE, 0);
1474 env->mstatus = s;
1475 env->mcause = cause | ~(((target_ulong)-1) >> async);
1476 env->mepc = env->pc;
1477 env->mtval = tval;
1478 env->mtval2 = mtval2;
1479 env->pc = (env->mtvec >> 2 << 2) +
1480 ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0);
1481 riscv_cpu_set_mode(env, PRV_M);
1482 }
1483
1484
1485
1486
1487
1488
1489
1490 env->two_stage_lookup = false;
1491#endif
1492 cs->exception_index = RISCV_EXCP_NONE;
1493}
1494