1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "qemu/log.h"
22#include "qemu/main-loop.h"
23#include "cpu.h"
24#include "internals.h"
25#include "pmu.h"
26#include "exec/cputlb.h"
27#include "exec/page-protection.h"
28#include "exec/target_page.h"
29#include "system/memory.h"
30#include "instmap.h"
31#include "tcg/tcg-op.h"
32#include "accel/tcg/cpu-ops.h"
33#include "trace.h"
34#include "semihosting/common-semi.h"
35#include "exec/icount.h"
36#include "cpu_bits.h"
37#include "debug.h"
38#include "pmp.h"
39
40int riscv_env_mmu_index(CPURISCVState *env, bool ifetch)
41{
42#ifdef CONFIG_USER_ONLY
43 return 0;
44#else
45 bool virt = env->virt_enabled;
46 int mode = env->priv;
47
48
49 if (!ifetch) {
50 uint64_t status = env->mstatus;
51
52 if (mode == PRV_M && get_field(status, MSTATUS_MPRV)) {
53 mode = get_field(env->mstatus, MSTATUS_MPP);
54 virt = get_field(env->mstatus, MSTATUS_MPV) &&
55 (mode != PRV_M);
56 if (virt) {
57 status = env->vsstatus;
58 }
59 }
60 if (mode == PRV_S && get_field(status, MSTATUS_SUM)) {
61 mode = MMUIdx_S_SUM;
62 }
63 }
64
65 return mode | (virt ? MMU_2STAGE_BIT : 0);
66#endif
67}
68
69bool cpu_get_fcfien(CPURISCVState *env)
70{
71
72 if (!env_archcpu(env)->cfg.ext_zicfilp) {
73 return false;
74 }
75
76 switch (env->priv) {
77 case PRV_U:
78 if (riscv_has_ext(env, RVS)) {
79 return env->senvcfg & SENVCFG_LPE;
80 }
81 return env->menvcfg & MENVCFG_LPE;
82#ifndef CONFIG_USER_ONLY
83 case PRV_S:
84 if (env->virt_enabled) {
85 return env->henvcfg & HENVCFG_LPE;
86 }
87 return env->menvcfg & MENVCFG_LPE;
88 case PRV_M:
89 return env->mseccfg & MSECCFG_MLPE;
90#endif
91 default:
92 g_assert_not_reached();
93 }
94}
95
96bool cpu_get_bcfien(CPURISCVState *env)
97{
98
99 if (!env_archcpu(env)->cfg.ext_zicfiss) {
100 return false;
101 }
102
103 switch (env->priv) {
104 case PRV_U:
105
106
107
108
109
110 return env->senvcfg & SENVCFG_SSE;
111#ifndef CONFIG_USER_ONLY
112 case PRV_S:
113 if (env->virt_enabled) {
114 return env->henvcfg & HENVCFG_SSE;
115 }
116 return env->menvcfg & MENVCFG_SSE;
117 case PRV_M:
118 return false;
119#endif
120 default:
121 g_assert_not_reached();
122 }
123}
124
125bool riscv_env_smode_dbltrp_enabled(CPURISCVState *env, bool virt)
126{
127#ifdef CONFIG_USER_ONLY
128 return false;
129#else
130 if (virt) {
131 return (env->henvcfg & HENVCFG_DTE) != 0;
132 } else {
133 return (env->menvcfg & MENVCFG_DTE) != 0;
134 }
135#endif
136}
137
138RISCVPmPmm riscv_pm_get_pmm(CPURISCVState *env)
139{
140#ifndef CONFIG_USER_ONLY
141 int priv_mode = cpu_address_mode(env);
142
143 if (get_field(env->mstatus, MSTATUS_MPRV) &&
144 get_field(env->mstatus, MSTATUS_MXR)) {
145 return PMM_FIELD_DISABLED;
146 }
147
148
149 switch (priv_mode) {
150 case PRV_M:
151 if (riscv_cpu_cfg(env)->ext_smmpm) {
152 return get_field(env->mseccfg, MSECCFG_PMM);
153 }
154 break;
155 case PRV_S:
156 if (riscv_cpu_cfg(env)->ext_smnpm) {
157 if (get_field(env->mstatus, MSTATUS_MPV)) {
158 return get_field(env->henvcfg, HENVCFG_PMM);
159 } else {
160 return get_field(env->menvcfg, MENVCFG_PMM);
161 }
162 }
163 break;
164 case PRV_U:
165 if (riscv_has_ext(env, RVS)) {
166 if (riscv_cpu_cfg(env)->ext_ssnpm) {
167 return get_field(env->senvcfg, SENVCFG_PMM);
168 }
169 } else {
170 if (riscv_cpu_cfg(env)->ext_smnpm) {
171 return get_field(env->menvcfg, MENVCFG_PMM);
172 }
173 }
174 break;
175 default:
176 g_assert_not_reached();
177 }
178 return PMM_FIELD_DISABLED;
179#else
180 return PMM_FIELD_DISABLED;
181#endif
182}
183
184RISCVPmPmm riscv_pm_get_virt_pmm(CPURISCVState *env)
185{
186#ifndef CONFIG_USER_ONLY
187 int priv_mode = cpu_address_mode(env);
188
189 if (priv_mode == PRV_U) {
190 return get_field(env->hstatus, HSTATUS_HUPMM);
191 } else {
192 if (get_field(env->hstatus, HSTATUS_SPVP)) {
193 return get_field(env->henvcfg, HENVCFG_PMM);
194 } else {
195 return get_field(env->senvcfg, SENVCFG_PMM);
196 }
197 }
198#else
199 return PMM_FIELD_DISABLED;
200#endif
201}
202
203bool riscv_cpu_virt_mem_enabled(CPURISCVState *env)
204{
205#ifndef CONFIG_USER_ONLY
206 int satp_mode = 0;
207 int priv_mode = cpu_address_mode(env);
208
209 if (riscv_cpu_mxl(env) == MXL_RV32) {
210 satp_mode = get_field(env->satp, SATP32_MODE);
211 } else {
212 satp_mode = get_field(env->satp, SATP64_MODE);
213 }
214
215 return ((satp_mode != VM_1_10_MBARE) && (priv_mode != PRV_M));
216#else
217 return false;
218#endif
219}
220
221uint32_t riscv_pm_get_pmlen(RISCVPmPmm pmm)
222{
223 switch (pmm) {
224 case PMM_FIELD_DISABLED:
225 return 0;
226 case PMM_FIELD_PMLEN7:
227 return 7;
228 case PMM_FIELD_PMLEN16:
229 return 16;
230 default:
231 g_assert_not_reached();
232 }
233}
234
235#ifndef CONFIG_USER_ONLY
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259static const int hviprio_index2irq[] = {
260 0, 1, 4, 5, 8, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 };
261static const int hviprio_index2rdzero[] = {
262 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
263
264int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero)
265{
266 if (index < 0 || ARRAY_SIZE(hviprio_index2irq) <= index) {
267 return -EINVAL;
268 }
269
270 if (out_irq) {
271 *out_irq = hviprio_index2irq[index];
272 }
273
274 if (out_rdzero) {
275 *out_rdzero = hviprio_index2rdzero[index];
276 }
277
278 return 0;
279}
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301static const uint8_t default_iprio[64] = {
302
303 [63] = IPRIO_MMAXIPRIO,
304 [62] = IPRIO_MMAXIPRIO,
305 [61] = IPRIO_MMAXIPRIO,
306 [60] = IPRIO_MMAXIPRIO,
307 [59] = IPRIO_MMAXIPRIO,
308 [58] = IPRIO_MMAXIPRIO,
309 [57] = IPRIO_MMAXIPRIO,
310 [56] = IPRIO_MMAXIPRIO,
311 [55] = IPRIO_MMAXIPRIO,
312 [54] = IPRIO_MMAXIPRIO,
313 [53] = IPRIO_MMAXIPRIO,
314 [52] = IPRIO_MMAXIPRIO,
315 [51] = IPRIO_MMAXIPRIO,
316 [50] = IPRIO_MMAXIPRIO,
317 [49] = IPRIO_MMAXIPRIO,
318 [48] = IPRIO_MMAXIPRIO,
319
320
321 [31] = IPRIO_MMAXIPRIO,
322 [30] = IPRIO_MMAXIPRIO,
323 [29] = IPRIO_MMAXIPRIO,
324 [28] = IPRIO_MMAXIPRIO,
325 [27] = IPRIO_MMAXIPRIO,
326 [26] = IPRIO_MMAXIPRIO,
327 [25] = IPRIO_MMAXIPRIO,
328 [24] = IPRIO_MMAXIPRIO,
329
330 [47] = IPRIO_DEFAULT_UPPER,
331 [23] = IPRIO_DEFAULT_UPPER + 1,
332 [46] = IPRIO_DEFAULT_UPPER + 2,
333 [45] = IPRIO_DEFAULT_UPPER + 3,
334 [22] = IPRIO_DEFAULT_UPPER + 4,
335 [44] = IPRIO_DEFAULT_UPPER + 5,
336
337 [43] = IPRIO_DEFAULT_UPPER + 6,
338 [21] = IPRIO_DEFAULT_UPPER + 7,
339 [42] = IPRIO_DEFAULT_UPPER + 8,
340 [41] = IPRIO_DEFAULT_UPPER + 9,
341 [20] = IPRIO_DEFAULT_UPPER + 10,
342 [40] = IPRIO_DEFAULT_UPPER + 11,
343
344 [11] = IPRIO_DEFAULT_M,
345 [3] = IPRIO_DEFAULT_M + 1,
346 [7] = IPRIO_DEFAULT_M + 2,
347
348 [9] = IPRIO_DEFAULT_S,
349 [1] = IPRIO_DEFAULT_S + 1,
350 [5] = IPRIO_DEFAULT_S + 2,
351
352 [12] = IPRIO_DEFAULT_SGEXT,
353
354 [10] = IPRIO_DEFAULT_VS,
355 [2] = IPRIO_DEFAULT_VS + 1,
356 [6] = IPRIO_DEFAULT_VS + 2,
357
358 [39] = IPRIO_DEFAULT_LOWER,
359 [19] = IPRIO_DEFAULT_LOWER + 1,
360 [38] = IPRIO_DEFAULT_LOWER + 2,
361 [37] = IPRIO_DEFAULT_LOWER + 3,
362 [18] = IPRIO_DEFAULT_LOWER + 4,
363 [36] = IPRIO_DEFAULT_LOWER + 5,
364
365 [35] = IPRIO_DEFAULT_LOWER + 6,
366 [17] = IPRIO_DEFAULT_LOWER + 7,
367 [34] = IPRIO_DEFAULT_LOWER + 8,
368 [33] = IPRIO_DEFAULT_LOWER + 9,
369 [16] = IPRIO_DEFAULT_LOWER + 10,
370 [32] = IPRIO_DEFAULT_LOWER + 11,
371};
372
373uint8_t riscv_cpu_default_priority(int irq)
374{
375 if (irq < 0 || irq > 63) {
376 return IPRIO_MMAXIPRIO;
377 }
378
379 return default_iprio[irq] ? default_iprio[irq] : IPRIO_MMAXIPRIO;
380};
381
382static int riscv_cpu_pending_to_irq(CPURISCVState *env,
383 int extirq, unsigned int extirq_def_prio,
384 uint64_t pending, uint8_t *iprio)
385{
386 int irq, best_irq = RISCV_EXCP_NONE;
387 unsigned int prio, best_prio = UINT_MAX;
388
389 if (!pending) {
390 return RISCV_EXCP_NONE;
391 }
392
393 irq = ctz64(pending);
394 if (!((extirq == IRQ_M_EXT) ? riscv_cpu_cfg(env)->ext_smaia :
395 riscv_cpu_cfg(env)->ext_ssaia)) {
396 return irq;
397 }
398
399 pending = pending >> irq;
400 while (pending) {
401 prio = iprio[irq];
402 if (!prio) {
403 if (irq == extirq) {
404 prio = extirq_def_prio;
405 } else {
406 prio = (riscv_cpu_default_priority(irq) < extirq_def_prio) ?
407 1 : IPRIO_MMAXIPRIO;
408 }
409 }
410 if ((pending & 0x1) && (prio <= best_prio)) {
411 best_irq = irq;
412 best_prio = prio;
413 }
414 irq++;
415 pending = pending >> 1;
416 }
417
418 return best_irq;
419}
420
421
422
423
424
425
426uint64_t riscv_cpu_all_pending(CPURISCVState *env)
427{
428 uint32_t gein = get_field(env->hstatus, HSTATUS_VGEIN);
429 uint64_t vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
430 uint64_t vstip = (env->vstime_irq) ? MIP_VSTIP : 0;
431
432 return (env->mip | vsgein | vstip) & env->mie;
433}
434
435int riscv_cpu_mirq_pending(CPURISCVState *env)
436{
437 uint64_t irqs = riscv_cpu_all_pending(env) & ~env->mideleg &
438 ~(MIP_SGEIP | MIP_VSSIP | MIP_VSTIP | MIP_VSEIP);
439
440 return riscv_cpu_pending_to_irq(env, IRQ_M_EXT, IPRIO_DEFAULT_M,
441 irqs, env->miprio);
442}
443
444int riscv_cpu_sirq_pending(CPURISCVState *env)
445{
446 uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg &
447 ~(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP);
448 uint64_t irqs_f = env->mvip & env->mvien & ~env->mideleg & env->sie;
449
450 return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
451 irqs | irqs_f, env->siprio);
452}
453
454int riscv_cpu_vsirq_pending(CPURISCVState *env)
455{
456 uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg & env->hideleg;
457 uint64_t irqs_f_vs = env->hvip & env->hvien & ~env->hideleg & env->vsie;
458 uint64_t vsbits;
459
460
461 vsbits = irqs & VS_MODE_INTERRUPTS;
462 irqs &= ~VS_MODE_INTERRUPTS;
463 irqs |= vsbits >> 1;
464
465 return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
466 (irqs | irqs_f_vs), env->hviprio);
467}
468
469static int riscv_cpu_local_irq_pending(CPURISCVState *env)
470{
471 uint64_t irqs, pending, mie, hsie, vsie, irqs_f, irqs_f_vs;
472 uint64_t vsbits, irq_delegated;
473 int virq;
474
475
476 if (riscv_cpu_cfg(env)->ext_smrnmi) {
477
478 if (!get_field(env->mnstatus, MNSTATUS_NMIE)) {
479 return RISCV_EXCP_NONE;
480 }
481
482 if (env->rnmip) {
483 return ctz64(env->rnmip);
484 }
485 }
486
487
488 if (env->virt_enabled) {
489 mie = 1;
490 hsie = 1;
491 vsie = (env->priv < PRV_S) ||
492 (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_SIE));
493 } else {
494 mie = (env->priv < PRV_M) ||
495 (env->priv == PRV_M && get_field(env->mstatus, MSTATUS_MIE));
496 hsie = (env->priv < PRV_S) ||
497 (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_SIE));
498 vsie = 0;
499 }
500
501
502 pending = riscv_cpu_all_pending(env);
503
504
505 irqs = pending & ~env->mideleg & -mie;
506 if (irqs) {
507 return riscv_cpu_pending_to_irq(env, IRQ_M_EXT, IPRIO_DEFAULT_M,
508 irqs, env->miprio);
509 }
510
511
512 irqs_f = env->mvip & (env->mvien & ~env->mideleg) & env->sie;
513
514
515 irqs = ((pending & env->mideleg & ~env->hideleg) | irqs_f) & -hsie;
516 if (irqs) {
517 return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
518 irqs, env->siprio);
519 }
520
521
522 irqs_f_vs = env->hvip & env->hvien & ~env->hideleg & env->vsie;
523
524
525 irq_delegated = pending & env->mideleg & env->hideleg;
526
527
528 vsbits = irq_delegated & VS_MODE_INTERRUPTS;
529 irq_delegated &= ~VS_MODE_INTERRUPTS;
530 irq_delegated |= vsbits >> 1;
531
532 irqs = (irq_delegated | irqs_f_vs) & -vsie;
533 if (irqs) {
534 virq = riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
535 irqs, env->hviprio);
536 if (virq <= 0 || (virq > 12 && virq <= 63)) {
537 return virq;
538 } else {
539 return virq + 1;
540 }
541 }
542
543
544 return RISCV_EXCP_NONE;
545}
546
547bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
548{
549 uint32_t mask = CPU_INTERRUPT_HARD | CPU_INTERRUPT_RNMI;
550
551 if (interrupt_request & mask) {
552 RISCVCPU *cpu = RISCV_CPU(cs);
553 CPURISCVState *env = &cpu->env;
554 int interruptno = riscv_cpu_local_irq_pending(env);
555 if (interruptno >= 0) {
556 cs->exception_index = RISCV_EXCP_INT_FLAG | interruptno;
557 riscv_cpu_do_interrupt(cs);
558 return true;
559 }
560 }
561 return false;
562}
563
564
565bool riscv_cpu_fp_enabled(CPURISCVState *env)
566{
567 if (env->mstatus & MSTATUS_FS) {
568 if (env->virt_enabled && !(env->mstatus_hs & MSTATUS_FS)) {
569 return false;
570 }
571 return true;
572 }
573
574 return false;
575}
576
577
578bool riscv_cpu_vector_enabled(CPURISCVState *env)
579{
580 if (env->mstatus & MSTATUS_VS) {
581 if (env->virt_enabled && !(env->mstatus_hs & MSTATUS_VS)) {
582 return false;
583 }
584 return true;
585 }
586
587 return false;
588}
589
590void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env)
591{
592 uint64_t mstatus_mask = MSTATUS_MXR | MSTATUS_SUM |
593 MSTATUS_SPP | MSTATUS_SPIE | MSTATUS_SIE |
594 MSTATUS64_UXL | MSTATUS_VS;
595
596 if (riscv_has_ext(env, RVF)) {
597 mstatus_mask |= MSTATUS_FS;
598 }
599 bool current_virt = env->virt_enabled;
600
601
602
603
604
605 if (env_archcpu(env)->cfg.ext_zicfilp &&
606 get_field(env->henvcfg, HENVCFG_LPE)) {
607 mstatus_mask |= SSTATUS_SPELP;
608 }
609
610 g_assert(riscv_has_ext(env, RVH));
611
612 if (riscv_env_smode_dbltrp_enabled(env, current_virt)) {
613 mstatus_mask |= MSTATUS_SDT;
614 }
615
616 if (current_virt) {
617
618 env->vsstatus = env->mstatus & mstatus_mask;
619 env->mstatus &= ~mstatus_mask;
620 env->mstatus |= env->mstatus_hs;
621
622 env->vstvec = env->stvec;
623 env->stvec = env->stvec_hs;
624
625 env->vsscratch = env->sscratch;
626 env->sscratch = env->sscratch_hs;
627
628 env->vsepc = env->sepc;
629 env->sepc = env->sepc_hs;
630
631 env->vscause = env->scause;
632 env->scause = env->scause_hs;
633
634 env->vstval = env->stval;
635 env->stval = env->stval_hs;
636
637 env->vsatp = env->satp;
638 env->satp = env->satp_hs;
639 } else {
640
641 env->mstatus_hs = env->mstatus & mstatus_mask;
642 env->mstatus &= ~mstatus_mask;
643 env->mstatus |= env->vsstatus;
644
645 env->stvec_hs = env->stvec;
646 env->stvec = env->vstvec;
647
648 env->sscratch_hs = env->sscratch;
649 env->sscratch = env->vsscratch;
650
651 env->sepc_hs = env->sepc;
652 env->sepc = env->vsepc;
653
654 env->scause_hs = env->scause;
655 env->scause = env->vscause;
656
657 env->stval_hs = env->stval;
658 env->stval = env->vstval;
659
660 env->satp_hs = env->satp;
661 env->satp = env->vsatp;
662 }
663}
664
665target_ulong riscv_cpu_get_geilen(CPURISCVState *env)
666{
667 if (!riscv_has_ext(env, RVH)) {
668 return 0;
669 }
670
671 return env->geilen;
672}
673
674void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen)
675{
676 if (!riscv_has_ext(env, RVH)) {
677 return;
678 }
679
680 if (geilen > (TARGET_LONG_BITS - 1)) {
681 return;
682 }
683
684 env->geilen = geilen;
685}
686
687void riscv_cpu_set_rnmi(RISCVCPU *cpu, uint32_t irq, bool level)
688{
689 CPURISCVState *env = &cpu->env;
690 CPUState *cs = CPU(cpu);
691 bool release_lock = false;
692
693 if (!bql_locked()) {
694 release_lock = true;
695 bql_lock();
696 }
697
698 if (level) {
699 env->rnmip |= 1 << irq;
700 cpu_interrupt(cs, CPU_INTERRUPT_RNMI);
701 } else {
702 env->rnmip &= ~(1 << irq);
703 cpu_reset_interrupt(cs, CPU_INTERRUPT_RNMI);
704 }
705
706 if (release_lock) {
707 bql_unlock();
708 }
709}
710
711int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts)
712{
713 CPURISCVState *env = &cpu->env;
714 if (env->miclaim & interrupts) {
715 return -1;
716 } else {
717 env->miclaim |= interrupts;
718 return 0;
719 }
720}
721
722void riscv_cpu_interrupt(CPURISCVState *env)
723{
724 uint64_t gein, vsgein = 0, vstip = 0, irqf = 0;
725 CPUState *cs = env_cpu(env);
726
727 BQL_LOCK_GUARD();
728
729 if (env->virt_enabled) {
730 gein = get_field(env->hstatus, HSTATUS_VGEIN);
731 vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
732 irqf = env->hvien & env->hvip & env->vsie;
733 } else {
734 irqf = env->mvien & env->mvip & env->sie;
735 }
736
737 vstip = env->vstime_irq ? MIP_VSTIP : 0;
738
739 if (env->mip | vsgein | vstip | irqf) {
740 cpu_interrupt(cs, CPU_INTERRUPT_HARD);
741 } else {
742 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
743 }
744}
745
746uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask, uint64_t value)
747{
748 uint64_t old = env->mip;
749
750
751 mask = ((mask == MIP_VSTIP) && env->vstime_irq) ? 0 : mask;
752
753 BQL_LOCK_GUARD();
754
755 env->mip = (env->mip & ~mask) | (value & mask);
756
757 riscv_cpu_interrupt(env);
758
759 return old;
760}
761
762void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *),
763 void *arg)
764{
765 env->rdtime_fn = fn;
766 env->rdtime_fn_arg = arg;
767}
768
769void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
770 int (*rmw_fn)(void *arg,
771 target_ulong reg,
772 target_ulong *val,
773 target_ulong new_val,
774 target_ulong write_mask),
775 void *rmw_fn_arg)
776{
777 if (priv <= PRV_M) {
778 env->aia_ireg_rmw_fn[priv] = rmw_fn;
779 env->aia_ireg_rmw_fn_arg[priv] = rmw_fn_arg;
780 }
781}
782
783static void riscv_ctr_freeze(CPURISCVState *env, uint64_t freeze_mask,
784 bool virt)
785{
786 uint64_t ctl = virt ? env->vsctrctl : env->mctrctl;
787
788 assert((freeze_mask & (~(XCTRCTL_BPFRZ | XCTRCTL_LCOFIFRZ))) == 0);
789
790 if (ctl & freeze_mask) {
791 env->sctrstatus |= SCTRSTATUS_FROZEN;
792 }
793}
794
795void riscv_ctr_clear(CPURISCVState *env)
796{
797 memset(env->ctr_src, 0x0, sizeof(env->ctr_src));
798 memset(env->ctr_dst, 0x0, sizeof(env->ctr_dst));
799 memset(env->ctr_data, 0x0, sizeof(env->ctr_data));
800}
801
802static uint64_t riscv_ctr_priv_to_mask(target_ulong priv, bool virt)
803{
804 switch (priv) {
805 case PRV_M:
806 return MCTRCTL_M;
807 case PRV_S:
808 if (virt) {
809 return XCTRCTL_S;
810 }
811 return XCTRCTL_S;
812 case PRV_U:
813 if (virt) {
814 return XCTRCTL_U;
815 }
816 return XCTRCTL_U;
817 }
818
819 g_assert_not_reached();
820}
821
822static uint64_t riscv_ctr_get_control(CPURISCVState *env, target_long priv,
823 bool virt)
824{
825 switch (priv) {
826 case PRV_M:
827 return env->mctrctl;
828 case PRV_S:
829 case PRV_U:
830 if (virt) {
831 return env->vsctrctl;
832 }
833 return env->mctrctl;
834 }
835
836 g_assert_not_reached();
837}
838
839
840
841
842
843
844static bool riscv_ctr_check_xte(CPURISCVState *env, target_long src_prv,
845 bool src_virt)
846{
847 target_long tgt_prv = env->priv;
848 bool res = true;
849
850
851
852
853
854
855 if (src_virt && src_prv == PRV_S) {
856 src_prv = PRV_U;
857 } else if (env->virt_enabled && tgt_prv == PRV_S) {
858 tgt_prv = PRV_U;
859 }
860
861
862 if (src_virt && src_prv == PRV_U) {
863 res &= !!(env->vsctrctl & XCTRCTL_STE);
864 }
865
866 switch (src_prv) {
867 case PRV_U:
868 if (tgt_prv == PRV_U) {
869 break;
870 }
871 res &= !!(env->mctrctl & XCTRCTL_STE);
872
873 case PRV_S:
874 if (tgt_prv == PRV_S) {
875 break;
876 }
877 res &= !!(env->mctrctl & MCTRCTL_MTE);
878
879 case PRV_M:
880 break;
881 }
882
883 return res;
884}
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932void riscv_ctr_add_entry(CPURISCVState *env, target_long src, target_long dst,
933 enum CTRType type, target_ulong src_priv, bool src_virt)
934{
935 bool tgt_virt = env->virt_enabled;
936 uint64_t src_mask = riscv_ctr_priv_to_mask(src_priv, src_virt);
937 uint64_t tgt_mask = riscv_ctr_priv_to_mask(env->priv, tgt_virt);
938 uint64_t src_ctrl = riscv_ctr_get_control(env, src_priv, src_virt);
939 uint64_t tgt_ctrl = riscv_ctr_get_control(env, env->priv, tgt_virt);
940 uint64_t depth, head;
941 bool ext_trap = false;
942
943
944
945
946
947 if ((!(src_ctrl & src_mask) && !(tgt_ctrl & tgt_mask)) ||
948 env->sctrstatus & SCTRSTATUS_FROZEN) {
949 return;
950 }
951
952
953
954
955
956 if (tgt_ctrl & XCTRCTL_RASEMU &&
957 type != CTRDATA_TYPE_INDIRECT_CALL &&
958 type != CTRDATA_TYPE_DIRECT_CALL &&
959 type != CTRDATA_TYPE_RETURN &&
960 type != CTRDATA_TYPE_CO_ROUTINE_SWAP) {
961 return;
962 }
963
964 if (type == CTRDATA_TYPE_EXCEPTION || type == CTRDATA_TYPE_INTERRUPT) {
965
966 if (!(src_ctrl & src_mask)) {
967 src = 0;
968 } else if (!(tgt_ctrl & tgt_mask)) {
969
970 if (!riscv_ctr_check_xte(env, src_priv, src_virt)) {
971 return;
972 }
973
974 ext_trap = true;
975 dst = 0;
976 }
977 } else if (type == CTRDATA_TYPE_EXCEP_INT_RET) {
978
979
980
981
982 if (!(src_ctrl & src_mask)) {
983 return;
984 }
985
986
987 if (!(tgt_ctrl & tgt_mask)) {
988 dst = 0;
989 }
990 }
991
992
993 if (!(tgt_ctrl & XCTRCTL_RASEMU) && !ext_trap) {
994
995
996
997
998 bool check = tgt_ctrl & BIT_ULL(type + XCTRCTL_INH_START);
999 if ((type == CTRDATA_TYPE_NONTAKEN_BRANCH && !check) ||
1000 (type != CTRDATA_TYPE_NONTAKEN_BRANCH && check)) {
1001 return;
1002 }
1003 }
1004
1005 head = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK);
1006
1007 depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
1008 if (tgt_ctrl & XCTRCTL_RASEMU && type == CTRDATA_TYPE_RETURN) {
1009 head = (head - 1) & (depth - 1);
1010
1011 env->ctr_src[head] &= ~CTRSOURCE_VALID;
1012 env->sctrstatus =
1013 set_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK, head);
1014 return;
1015 }
1016
1017
1018 if (tgt_ctrl & XCTRCTL_RASEMU && type == CTRDATA_TYPE_CO_ROUTINE_SWAP) {
1019 head = (head - 1) & (depth - 1);
1020 }
1021
1022 env->ctr_src[head] = src | CTRSOURCE_VALID;
1023 env->ctr_dst[head] = dst & ~CTRTARGET_MISP;
1024 env->ctr_data[head] = set_field(0, CTRDATA_TYPE_MASK, type);
1025
1026 head = (head + 1) & (depth - 1);
1027
1028 env->sctrstatus = set_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK, head);
1029}
1030
1031void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en)
1032{
1033 g_assert(newpriv <= PRV_M && newpriv != PRV_RESERVED);
1034
1035 if (newpriv != env->priv || env->virt_enabled != virt_en) {
1036 if (icount_enabled()) {
1037 riscv_itrigger_update_priv(env);
1038 }
1039
1040 riscv_pmu_update_fixed_ctrs(env, newpriv, virt_en);
1041 }
1042
1043
1044 env->priv = newpriv;
1045 env->xl = cpu_recompute_xl(env);
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055 env->load_res = -1;
1056
1057 if (riscv_has_ext(env, RVH)) {
1058
1059 if (env->virt_enabled != virt_en) {
1060 tlb_flush(env_cpu(env));
1061 }
1062
1063 env->virt_enabled = virt_en;
1064 if (virt_en) {
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075 riscv_cpu_update_mip(env, 0, 0);
1076 }
1077 }
1078}
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092static int get_physical_address_pmp(CPURISCVState *env, int *prot, hwaddr addr,
1093 int size, MMUAccessType access_type,
1094 int mode)
1095{
1096 pmp_priv_t pmp_priv;
1097 bool pmp_has_privs;
1098
1099 if (!riscv_cpu_cfg(env)->pmp) {
1100 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1101 return TRANSLATE_SUCCESS;
1102 }
1103
1104 pmp_has_privs = pmp_hart_has_privs(env, addr, size, 1 << access_type,
1105 &pmp_priv, mode);
1106 if (!pmp_has_privs) {
1107 *prot = 0;
1108 return TRANSLATE_PMP_FAIL;
1109 }
1110
1111 *prot = pmp_priv_to_page_prot(pmp_priv);
1112
1113 return TRANSLATE_SUCCESS;
1114}
1115
1116
1117static bool do_svukte_check(CPURISCVState *env, bool first_stage,
1118 int mode, bool virt)
1119{
1120
1121 if (!(env_archcpu(env)->cfg.ext_svukte ||
1122 !first_stage ||
1123 VM_1_10_SV39 != get_field(env->satp, SATP64_MODE))) {
1124 return false;
1125 }
1126
1127
1128
1129
1130
1131
1132 if (env->priv == PRV_U && !env->virt_enabled && virt) {
1133 if (!get_field(env->hstatus, HSTATUS_HUKTE)) {
1134 return false;
1135 }
1136 } else if (!get_field(env->senvcfg, SENVCFG_UKTE)) {
1137 return false;
1138 }
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150 if (mode != PRV_U) {
1151 return false;
1152 }
1153
1154 return true;
1155}
1156
1157static bool check_svukte_addr(CPURISCVState *env, vaddr addr)
1158{
1159
1160 uint32_t sxlen = 32 * riscv_cpu_sxl(env);
1161 uint64_t high_bit = addr & (1UL << (sxlen - 1));
1162 return !high_bit;
1163}
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187static int get_physical_address(CPURISCVState *env, hwaddr *physical,
1188 int *ret_prot, vaddr addr,
1189 target_ulong *fault_pte_addr,
1190 int access_type, int mmu_idx,
1191 bool first_stage, bool two_stage,
1192 bool is_debug, bool is_probe)
1193{
1194
1195
1196
1197
1198
1199 MemTxResult res;
1200 MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
1201 int mode = mmuidx_priv(mmu_idx);
1202 bool virt = mmuidx_2stage(mmu_idx);
1203 bool use_background = false;
1204 hwaddr ppn;
1205 int napot_bits = 0;
1206 target_ulong napot_mask;
1207 bool is_sstack_idx = ((mmu_idx & MMU_IDX_SS_WRITE) == MMU_IDX_SS_WRITE);
1208 bool sstack_page = false;
1209
1210 if (do_svukte_check(env, first_stage, mode, virt) &&
1211 !check_svukte_addr(env, addr)) {
1212 return TRANSLATE_FAIL;
1213 }
1214
1215
1216
1217
1218
1219
1220
1221
1222 if (!env->virt_enabled && two_stage) {
1223 use_background = true;
1224 }
1225
1226 if (mode == PRV_M || !riscv_cpu_cfg(env)->mmu) {
1227 *physical = addr;
1228 *ret_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1229 return TRANSLATE_SUCCESS;
1230 }
1231
1232 *ret_prot = 0;
1233
1234 hwaddr base;
1235 int levels, ptidxbits, ptesize, vm, widened;
1236
1237 if (first_stage == true) {
1238 if (use_background) {
1239 if (riscv_cpu_mxl(env) == MXL_RV32) {
1240 base = (hwaddr)get_field(env->vsatp, SATP32_PPN) << PGSHIFT;
1241 vm = get_field(env->vsatp, SATP32_MODE);
1242 } else {
1243 base = (hwaddr)get_field(env->vsatp, SATP64_PPN) << PGSHIFT;
1244 vm = get_field(env->vsatp, SATP64_MODE);
1245 }
1246 } else {
1247 if (riscv_cpu_mxl(env) == MXL_RV32) {
1248 base = (hwaddr)get_field(env->satp, SATP32_PPN) << PGSHIFT;
1249 vm = get_field(env->satp, SATP32_MODE);
1250 } else {
1251 base = (hwaddr)get_field(env->satp, SATP64_PPN) << PGSHIFT;
1252 vm = get_field(env->satp, SATP64_MODE);
1253 }
1254 }
1255 widened = 0;
1256 } else {
1257 if (riscv_cpu_mxl(env) == MXL_RV32) {
1258 base = (hwaddr)get_field(env->hgatp, SATP32_PPN) << PGSHIFT;
1259 vm = get_field(env->hgatp, SATP32_MODE);
1260 } else {
1261 base = (hwaddr)get_field(env->hgatp, SATP64_PPN) << PGSHIFT;
1262 vm = get_field(env->hgatp, SATP64_MODE);
1263 }
1264 widened = 2;
1265 }
1266
1267 switch (vm) {
1268 case VM_1_10_SV32:
1269 levels = 2; ptidxbits = 10; ptesize = 4; break;
1270 case VM_1_10_SV39:
1271 levels = 3; ptidxbits = 9; ptesize = 8; break;
1272 case VM_1_10_SV48:
1273 levels = 4; ptidxbits = 9; ptesize = 8; break;
1274 case VM_1_10_SV57:
1275 levels = 5; ptidxbits = 9; ptesize = 8; break;
1276 case VM_1_10_MBARE:
1277 *physical = addr;
1278 *ret_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1279 return TRANSLATE_SUCCESS;
1280 default:
1281 g_assert_not_reached();
1282 }
1283
1284 CPUState *cs = env_cpu(env);
1285 int va_bits = PGSHIFT + levels * ptidxbits + widened;
1286 int sxlen = 16 << riscv_cpu_sxl(env);
1287 int sxlen_bytes = sxlen / 8;
1288
1289 if (first_stage == true) {
1290 target_ulong mask, masked_msbs;
1291
1292 if (sxlen > (va_bits - 1)) {
1293 mask = (1L << (sxlen - (va_bits - 1))) - 1;
1294 } else {
1295 mask = 0;
1296 }
1297 masked_msbs = (addr >> (va_bits - 1)) & mask;
1298
1299 if (masked_msbs != 0 && masked_msbs != mask) {
1300 return TRANSLATE_FAIL;
1301 }
1302 } else {
1303 if (vm != VM_1_10_SV32 && addr >> va_bits != 0) {
1304 return TRANSLATE_FAIL;
1305 }
1306 }
1307
1308 bool pbmte = env->menvcfg & MENVCFG_PBMTE;
1309 bool svade = riscv_cpu_cfg(env)->ext_svade;
1310 bool svadu = riscv_cpu_cfg(env)->ext_svadu;
1311 bool adue = svadu ? env->menvcfg & MENVCFG_ADUE : !svade;
1312 bool svrsw60t59b = riscv_cpu_cfg(env)->ext_svrsw60t59b;
1313
1314 if (first_stage && two_stage && env->virt_enabled) {
1315 pbmte = pbmte && (env->henvcfg & HENVCFG_PBMTE);
1316 adue = adue && (env->henvcfg & HENVCFG_ADUE);
1317 }
1318
1319 int ptshift = (levels - 1) * ptidxbits;
1320 target_ulong pte;
1321 hwaddr pte_addr;
1322 int i;
1323
1324 restart:
1325 for (i = 0; i < levels; i++, ptshift -= ptidxbits) {
1326 target_ulong idx;
1327 if (i == 0) {
1328 idx = (addr >> (PGSHIFT + ptshift)) &
1329 ((1 << (ptidxbits + widened)) - 1);
1330 } else {
1331 idx = (addr >> (PGSHIFT + ptshift)) &
1332 ((1 << ptidxbits) - 1);
1333 }
1334
1335
1336
1337 if (two_stage && first_stage) {
1338 int vbase_prot;
1339 hwaddr vbase;
1340
1341
1342 int vbase_ret = get_physical_address(env, &vbase, &vbase_prot,
1343 base, NULL, MMU_DATA_LOAD,
1344 MMUIdx_U, false, true,
1345 is_debug, false);
1346
1347 if (vbase_ret != TRANSLATE_SUCCESS) {
1348 if (fault_pte_addr) {
1349 *fault_pte_addr = (base + idx * ptesize) >> 2;
1350 }
1351 return TRANSLATE_G_STAGE_FAIL;
1352 }
1353
1354 pte_addr = vbase + idx * ptesize;
1355 } else {
1356 pte_addr = base + idx * ptesize;
1357 }
1358
1359 int pmp_prot;
1360 int pmp_ret = get_physical_address_pmp(env, &pmp_prot, pte_addr,
1361 sxlen_bytes,
1362 MMU_DATA_LOAD, PRV_S);
1363 if (pmp_ret != TRANSLATE_SUCCESS) {
1364 return TRANSLATE_PMP_FAIL;
1365 }
1366
1367 if (riscv_cpu_mxl(env) == MXL_RV32) {
1368 pte = address_space_ldl(cs->as, pte_addr, attrs, &res);
1369 } else {
1370 pte = address_space_ldq(cs->as, pte_addr, attrs, &res);
1371 }
1372
1373 if (res != MEMTX_OK) {
1374 return TRANSLATE_FAIL;
1375 }
1376
1377 if (riscv_cpu_sxl(env) == MXL_RV32) {
1378 ppn = pte >> PTE_PPN_SHIFT;
1379 } else {
1380 if (pte & PTE_RESERVED(svrsw60t59b)) {
1381 qemu_log_mask(LOG_GUEST_ERROR, "%s: reserved bits set in PTE: "
1382 "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
1383 __func__, pte_addr, pte);
1384 return TRANSLATE_FAIL;
1385 }
1386
1387 if (!pbmte && (pte & PTE_PBMT)) {
1388
1389 qemu_log_mask(LOG_GUEST_ERROR, "%s: PBMT bits set in PTE, "
1390 "and Svpbmt extension is disabled: "
1391 "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
1392 __func__, pte_addr, pte);
1393 return TRANSLATE_FAIL;
1394 }
1395
1396 if (!riscv_cpu_cfg(env)->ext_svnapot && (pte & PTE_N)) {
1397
1398 qemu_log_mask(LOG_GUEST_ERROR, "%s: N bit set in PTE, "
1399 "and Svnapot extension is disabled: "
1400 "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
1401 __func__, pte_addr, pte);
1402 return TRANSLATE_FAIL;
1403 }
1404
1405 ppn = (pte & (target_ulong)PTE_PPN_MASK) >> PTE_PPN_SHIFT;
1406 }
1407
1408 if (!(pte & PTE_V)) {
1409
1410 return TRANSLATE_FAIL;
1411 }
1412
1413 if (pte & (PTE_R | PTE_W | PTE_X)) {
1414 goto leaf;
1415 }
1416
1417 if (pte & (PTE_D | PTE_A | PTE_U | PTE_ATTR)) {
1418
1419 qemu_log_mask(LOG_GUEST_ERROR, "%s: D, A, or U bits set in non-leaf PTE: "
1420 "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
1421 __func__, pte_addr, pte);
1422 return TRANSLATE_FAIL;
1423 }
1424
1425 base = ppn << PGSHIFT;
1426 }
1427
1428
1429 return TRANSLATE_FAIL;
1430
1431 leaf:
1432 if (ppn & ((1ULL << ptshift) - 1)) {
1433
1434 qemu_log_mask(LOG_GUEST_ERROR, "%s: PPN bits in PTE is misaligned: "
1435 "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
1436 __func__, pte_addr, pte);
1437 return TRANSLATE_FAIL;
1438 }
1439 if (!pbmte && (pte & PTE_PBMT)) {
1440
1441 qemu_log_mask(LOG_GUEST_ERROR, "%s: PBMT bits set in PTE, "
1442 "and Svpbmt extension is disabled: "
1443 "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
1444 __func__, pte_addr, pte);
1445 return TRANSLATE_FAIL;
1446 }
1447
1448 target_ulong rwx = pte & (PTE_R | PTE_W | PTE_X);
1449
1450 switch (rwx) {
1451 case PTE_W | PTE_X:
1452 return TRANSLATE_FAIL;
1453 case PTE_W:
1454
1455 if (cpu_get_bcfien(env) && first_stage) {
1456 sstack_page = true;
1457
1458
1459
1460
1461 rwx = is_sstack_idx ? (PTE_R | PTE_W) : (is_probe ? 0 : PTE_R);
1462 break;
1463 }
1464 return TRANSLATE_FAIL;
1465 case PTE_R:
1466
1467
1468
1469
1470
1471 if (is_sstack_idx) {
1472 return TRANSLATE_FAIL;
1473 }
1474 break;
1475 }
1476
1477 int prot = 0;
1478 if (rwx & PTE_R) {
1479 prot |= PAGE_READ;
1480 }
1481 if (rwx & PTE_W) {
1482 prot |= PAGE_WRITE;
1483 }
1484 if (rwx & PTE_X) {
1485 bool mxr = false;
1486
1487
1488
1489
1490
1491 if (first_stage || !env->virt_enabled) {
1492 mxr = get_field(env->mstatus, MSTATUS_MXR);
1493 }
1494
1495
1496 if (first_stage && two_stage && !env->virt_enabled) {
1497 mxr |= get_field(env->vsstatus, MSTATUS_MXR);
1498 }
1499
1500
1501
1502
1503
1504 if (env->virt_enabled) {
1505 mxr |= get_field(env->mstatus_hs, MSTATUS_MXR);
1506 }
1507
1508 if (mxr) {
1509 prot |= PAGE_READ;
1510 }
1511 prot |= PAGE_EXEC;
1512 }
1513
1514 if (pte & PTE_U) {
1515 if (mode != PRV_U) {
1516 if (!mmuidx_sum(mmu_idx)) {
1517 return TRANSLATE_FAIL;
1518 }
1519
1520 prot &= PAGE_READ | PAGE_WRITE;
1521 }
1522 } else if (mode != PRV_S) {
1523
1524 return TRANSLATE_FAIL;
1525 }
1526
1527 if (!((prot >> access_type) & 1)) {
1528
1529
1530
1531
1532 return sstack_page ? TRANSLATE_PMP_FAIL : TRANSLATE_FAIL;
1533 }
1534
1535 target_ulong updated_pte = pte;
1536
1537
1538
1539
1540
1541 if (adue) {
1542 updated_pte |= PTE_A | (access_type == MMU_DATA_STORE ? PTE_D : 0);
1543 } else if (!(pte & PTE_A) ||
1544 (access_type == MMU_DATA_STORE && !(pte & PTE_D))) {
1545 return TRANSLATE_FAIL;
1546 }
1547
1548
1549 if (updated_pte != pte && !is_debug) {
1550 if (!adue) {
1551 return TRANSLATE_FAIL;
1552 }
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562 MemoryRegion *mr;
1563 hwaddr l = sxlen_bytes, addr1;
1564 mr = address_space_translate(cs->as, pte_addr, &addr1, &l,
1565 false, MEMTXATTRS_UNSPECIFIED);
1566 if (memory_region_is_ram(mr)) {
1567 target_ulong *pte_pa = qemu_map_ram_ptr(mr->ram_block, addr1);
1568 target_ulong old_pte;
1569 if (riscv_cpu_sxl(env) == MXL_RV32) {
1570 old_pte = qatomic_cmpxchg((uint32_t *)pte_pa, cpu_to_le32(pte), cpu_to_le32(updated_pte));
1571 old_pte = le32_to_cpu(old_pte);
1572 } else {
1573 old_pte = qatomic_cmpxchg(pte_pa, cpu_to_le64(pte), cpu_to_le64(updated_pte));
1574 old_pte = le64_to_cpu(old_pte);
1575 }
1576 if (old_pte != pte) {
1577 goto restart;
1578 }
1579 pte = updated_pte;
1580 } else {
1581
1582
1583
1584
1585 return TRANSLATE_FAIL;
1586 }
1587 }
1588
1589
1590 target_ulong vpn = addr >> PGSHIFT;
1591
1592 if (riscv_cpu_cfg(env)->ext_svnapot && (pte & PTE_N)) {
1593 napot_bits = ctzl(ppn) + 1;
1594 if ((i != (levels - 1)) || (napot_bits != 4)) {
1595 return TRANSLATE_FAIL;
1596 }
1597 }
1598
1599 napot_mask = (1 << napot_bits) - 1;
1600 *physical = (((ppn & ~napot_mask) | (vpn & napot_mask) |
1601 (vpn & (((target_ulong)1 << ptshift) - 1))
1602 ) << PGSHIFT) | (addr & ~TARGET_PAGE_MASK);
1603
1604
1605
1606
1607
1608
1609 if (access_type != MMU_DATA_STORE && !(pte & PTE_D)) {
1610 prot &= ~PAGE_WRITE;
1611 }
1612 *ret_prot = prot;
1613
1614 return TRANSLATE_SUCCESS;
1615}
1616
1617static void raise_mmu_exception(CPURISCVState *env, target_ulong address,
1618 MMUAccessType access_type, bool pmp_violation,
1619 bool first_stage, bool two_stage,
1620 bool two_stage_indirect)
1621{
1622 CPUState *cs = env_cpu(env);
1623
1624 switch (access_type) {
1625 case MMU_INST_FETCH:
1626 if (pmp_violation) {
1627 cs->exception_index = RISCV_EXCP_INST_ACCESS_FAULT;
1628 } else if (env->virt_enabled && !first_stage) {
1629 cs->exception_index = RISCV_EXCP_INST_GUEST_PAGE_FAULT;
1630 } else {
1631 cs->exception_index = RISCV_EXCP_INST_PAGE_FAULT;
1632 }
1633 break;
1634 case MMU_DATA_LOAD:
1635 if (pmp_violation) {
1636 cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
1637 } else if (two_stage && !first_stage) {
1638 cs->exception_index = RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT;
1639 } else {
1640 cs->exception_index = RISCV_EXCP_LOAD_PAGE_FAULT;
1641 }
1642 break;
1643 case MMU_DATA_STORE:
1644 if (pmp_violation) {
1645 cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
1646 } else if (two_stage && !first_stage) {
1647 cs->exception_index = RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT;
1648 } else {
1649 cs->exception_index = RISCV_EXCP_STORE_PAGE_FAULT;
1650 }
1651 break;
1652 default:
1653 g_assert_not_reached();
1654 }
1655 env->badaddr = address;
1656 env->two_stage_lookup = two_stage;
1657 env->two_stage_indirect_lookup = two_stage_indirect;
1658}
1659
1660hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
1661{
1662 RISCVCPU *cpu = RISCV_CPU(cs);
1663 CPURISCVState *env = &cpu->env;
1664 hwaddr phys_addr;
1665 int prot;
1666 int mmu_idx = riscv_env_mmu_index(&cpu->env, false);
1667
1668 if (get_physical_address(env, &phys_addr, &prot, addr, NULL, 0, mmu_idx,
1669 true, env->virt_enabled, true, false)) {
1670 return -1;
1671 }
1672
1673 if (env->virt_enabled) {
1674 if (get_physical_address(env, &phys_addr, &prot, phys_addr, NULL,
1675 0, MMUIdx_U, false, true, true, false)) {
1676 return -1;
1677 }
1678 }
1679
1680 return phys_addr & TARGET_PAGE_MASK;
1681}
1682
1683void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
1684 vaddr addr, unsigned size,
1685 MMUAccessType access_type,
1686 int mmu_idx, MemTxAttrs attrs,
1687 MemTxResult response, uintptr_t retaddr)
1688{
1689 RISCVCPU *cpu = RISCV_CPU(cs);
1690 CPURISCVState *env = &cpu->env;
1691
1692 if (access_type == MMU_DATA_STORE) {
1693 cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
1694 } else if (access_type == MMU_DATA_LOAD) {
1695 cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
1696 } else {
1697 cs->exception_index = RISCV_EXCP_INST_ACCESS_FAULT;
1698 }
1699
1700 env->badaddr = addr;
1701 env->two_stage_lookup = mmuidx_2stage(mmu_idx);
1702 env->two_stage_indirect_lookup = false;
1703 cpu_loop_exit_restore(cs, retaddr);
1704}
1705
1706void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
1707 MMUAccessType access_type, int mmu_idx,
1708 uintptr_t retaddr)
1709{
1710 RISCVCPU *cpu = RISCV_CPU(cs);
1711 CPURISCVState *env = &cpu->env;
1712 switch (access_type) {
1713 case MMU_INST_FETCH:
1714 cs->exception_index = RISCV_EXCP_INST_ADDR_MIS;
1715 break;
1716 case MMU_DATA_LOAD:
1717 cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS;
1718
1719 if (mmu_idx & MMU_IDX_SS_WRITE) {
1720 cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
1721 }
1722 break;
1723 case MMU_DATA_STORE:
1724 cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS;
1725
1726 if (mmu_idx & MMU_IDX_SS_WRITE) {
1727 cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
1728 }
1729 break;
1730 default:
1731 g_assert_not_reached();
1732 }
1733 env->badaddr = addr;
1734 env->two_stage_lookup = mmuidx_2stage(mmu_idx);
1735 env->two_stage_indirect_lookup = false;
1736 cpu_loop_exit_restore(cs, retaddr);
1737}
1738
1739
1740static void pmu_tlb_fill_incr_ctr(RISCVCPU *cpu, MMUAccessType access_type)
1741{
1742 enum riscv_pmu_event_idx pmu_event_type;
1743
1744 switch (access_type) {
1745 case MMU_INST_FETCH:
1746 pmu_event_type = RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS;
1747 break;
1748 case MMU_DATA_LOAD:
1749 pmu_event_type = RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS;
1750 break;
1751 case MMU_DATA_STORE:
1752 pmu_event_type = RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS;
1753 break;
1754 default:
1755 return;
1756 }
1757
1758 riscv_pmu_incr_ctr(cpu, pmu_event_type);
1759}
1760
1761bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
1762 MMUAccessType access_type, int mmu_idx,
1763 bool probe, uintptr_t retaddr)
1764{
1765 RISCVCPU *cpu = RISCV_CPU(cs);
1766 CPURISCVState *env = &cpu->env;
1767 vaddr im_address;
1768 hwaddr pa = 0;
1769 int prot, prot2, prot_pmp;
1770 bool pmp_violation = false;
1771 bool first_stage_error = true;
1772 bool two_stage_lookup = mmuidx_2stage(mmu_idx);
1773 bool two_stage_indirect_error = false;
1774 int ret = TRANSLATE_FAIL;
1775 int mode = mmuidx_priv(mmu_idx);
1776
1777 hwaddr tlb_size = TARGET_PAGE_SIZE;
1778
1779 env->guest_phys_fault_addr = 0;
1780
1781 qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n",
1782 __func__, address, access_type, mmu_idx);
1783
1784 pmu_tlb_fill_incr_ctr(cpu, access_type);
1785 if (two_stage_lookup) {
1786
1787 ret = get_physical_address(env, &pa, &prot, address,
1788 &env->guest_phys_fault_addr, access_type,
1789 mmu_idx, true, true, false, probe);
1790
1791
1792
1793
1794
1795
1796 if (ret == TRANSLATE_G_STAGE_FAIL) {
1797 first_stage_error = false;
1798 two_stage_indirect_error = true;
1799 }
1800
1801 qemu_log_mask(CPU_LOG_MMU,
1802 "%s 1st-stage address=%" VADDR_PRIx " ret %d physical "
1803 HWADDR_FMT_plx " prot %d\n",
1804 __func__, address, ret, pa, prot);
1805
1806 if (ret == TRANSLATE_SUCCESS) {
1807
1808 im_address = pa;
1809
1810 ret = get_physical_address(env, &pa, &prot2, im_address, NULL,
1811 access_type, MMUIdx_U, false, true,
1812 false, probe);
1813
1814 qemu_log_mask(CPU_LOG_MMU,
1815 "%s 2nd-stage address=%" VADDR_PRIx
1816 " ret %d physical "
1817 HWADDR_FMT_plx " prot %d\n",
1818 __func__, im_address, ret, pa, prot2);
1819
1820 prot &= prot2;
1821
1822 if (ret == TRANSLATE_SUCCESS) {
1823 ret = get_physical_address_pmp(env, &prot_pmp, pa,
1824 size, access_type, mode);
1825 tlb_size = pmp_get_tlb_size(env, pa);
1826
1827 qemu_log_mask(CPU_LOG_MMU,
1828 "%s PMP address=" HWADDR_FMT_plx " ret %d prot"
1829 " %d tlb_size %" HWADDR_PRIu "\n",
1830 __func__, pa, ret, prot_pmp, tlb_size);
1831
1832 prot &= prot_pmp;
1833 } else {
1834
1835
1836
1837
1838 first_stage_error = false;
1839 if (ret != TRANSLATE_PMP_FAIL) {
1840 env->guest_phys_fault_addr = (im_address |
1841 (address &
1842 (TARGET_PAGE_SIZE - 1))) >> 2;
1843 }
1844 }
1845 }
1846 } else {
1847
1848 ret = get_physical_address(env, &pa, &prot, address, NULL,
1849 access_type, mmu_idx, true, false, false,
1850 probe);
1851
1852 qemu_log_mask(CPU_LOG_MMU,
1853 "%s address=%" VADDR_PRIx " ret %d physical "
1854 HWADDR_FMT_plx " prot %d\n",
1855 __func__, address, ret, pa, prot);
1856
1857 if (ret == TRANSLATE_SUCCESS) {
1858 ret = get_physical_address_pmp(env, &prot_pmp, pa,
1859 size, access_type, mode);
1860 tlb_size = pmp_get_tlb_size(env, pa);
1861
1862 qemu_log_mask(CPU_LOG_MMU,
1863 "%s PMP address=" HWADDR_FMT_plx " ret %d prot"
1864 " %d tlb_size %" HWADDR_PRIu "\n",
1865 __func__, pa, ret, prot_pmp, tlb_size);
1866
1867 prot &= prot_pmp;
1868 }
1869 }
1870
1871 if (ret == TRANSLATE_PMP_FAIL) {
1872 pmp_violation = true;
1873 }
1874
1875 if (ret == TRANSLATE_SUCCESS) {
1876 tlb_set_page(cs, address & ~(tlb_size - 1), pa & ~(tlb_size - 1),
1877 prot, mmu_idx, tlb_size);
1878 return true;
1879 } else if (probe) {
1880 return false;
1881 } else {
1882 int wp_access = 0;
1883
1884 if (access_type == MMU_DATA_LOAD) {
1885 wp_access |= BP_MEM_READ;
1886 } else if (access_type == MMU_DATA_STORE) {
1887 wp_access |= BP_MEM_WRITE;
1888 }
1889
1890
1891
1892
1893
1894
1895
1896 cpu_check_watchpoint(cs, address, size, MEMTXATTRS_UNSPECIFIED,
1897 wp_access, retaddr);
1898
1899 raise_mmu_exception(env, address, access_type, pmp_violation,
1900 first_stage_error, two_stage_lookup,
1901 two_stage_indirect_error);
1902 cpu_loop_exit_restore(cs, retaddr);
1903 }
1904
1905 return true;
1906}
1907
1908static target_ulong riscv_transformed_insn(CPURISCVState *env,
1909 target_ulong insn,
1910 target_ulong taddr)
1911{
1912 target_ulong xinsn = 0;
1913 target_ulong access_rs1 = 0, access_imm = 0, access_size = 0;
1914
1915
1916
1917
1918
1919
1920
1921
1922 if ((insn & 0x3) != 0x3) {
1923
1924 switch (GET_C_OP(insn)) {
1925 case OPC_RISC_C_OP_QUAD0:
1926 switch (GET_C_FUNC(insn)) {
1927 case OPC_RISC_C_FUNC_FLD_LQ:
1928 if (riscv_cpu_xlen(env) != 128) {
1929 xinsn = OPC_RISC_FLD;
1930 xinsn = SET_RD(xinsn, GET_C_RS2S(insn));
1931 access_rs1 = GET_C_RS1S(insn);
1932 access_imm = GET_C_LD_IMM(insn);
1933 access_size = 8;
1934 }
1935 break;
1936 case OPC_RISC_C_FUNC_LW:
1937 xinsn = OPC_RISC_LW;
1938 xinsn = SET_RD(xinsn, GET_C_RS2S(insn));
1939 access_rs1 = GET_C_RS1S(insn);
1940 access_imm = GET_C_LW_IMM(insn);
1941 access_size = 4;
1942 break;
1943 case OPC_RISC_C_FUNC_FLW_LD:
1944 if (riscv_cpu_xlen(env) == 32) {
1945 xinsn = OPC_RISC_FLW;
1946 xinsn = SET_RD(xinsn, GET_C_RS2S(insn));
1947 access_rs1 = GET_C_RS1S(insn);
1948 access_imm = GET_C_LW_IMM(insn);
1949 access_size = 4;
1950 } else {
1951 xinsn = OPC_RISC_LD;
1952 xinsn = SET_RD(xinsn, GET_C_RS2S(insn));
1953 access_rs1 = GET_C_RS1S(insn);
1954 access_imm = GET_C_LD_IMM(insn);
1955 access_size = 8;
1956 }
1957 break;
1958 case OPC_RISC_C_FUNC_FSD_SQ:
1959 if (riscv_cpu_xlen(env) != 128) {
1960 xinsn = OPC_RISC_FSD;
1961 xinsn = SET_RS2(xinsn, GET_C_RS2S(insn));
1962 access_rs1 = GET_C_RS1S(insn);
1963 access_imm = GET_C_SD_IMM(insn);
1964 access_size = 8;
1965 }
1966 break;
1967 case OPC_RISC_C_FUNC_SW:
1968 xinsn = OPC_RISC_SW;
1969 xinsn = SET_RS2(xinsn, GET_C_RS2S(insn));
1970 access_rs1 = GET_C_RS1S(insn);
1971 access_imm = GET_C_SW_IMM(insn);
1972 access_size = 4;
1973 break;
1974 case OPC_RISC_C_FUNC_FSW_SD:
1975 if (riscv_cpu_xlen(env) == 32) {
1976 xinsn = OPC_RISC_FSW;
1977 xinsn = SET_RS2(xinsn, GET_C_RS2S(insn));
1978 access_rs1 = GET_C_RS1S(insn);
1979 access_imm = GET_C_SW_IMM(insn);
1980 access_size = 4;
1981 } else {
1982 xinsn = OPC_RISC_SD;
1983 xinsn = SET_RS2(xinsn, GET_C_RS2S(insn));
1984 access_rs1 = GET_C_RS1S(insn);
1985 access_imm = GET_C_SD_IMM(insn);
1986 access_size = 8;
1987 }
1988 break;
1989 default:
1990 break;
1991 }
1992 break;
1993 case OPC_RISC_C_OP_QUAD2:
1994 switch (GET_C_FUNC(insn)) {
1995 case OPC_RISC_C_FUNC_FLDSP_LQSP:
1996 if (riscv_cpu_xlen(env) != 128) {
1997 xinsn = OPC_RISC_FLD;
1998 xinsn = SET_RD(xinsn, GET_C_RD(insn));
1999 access_rs1 = 2;
2000 access_imm = GET_C_LDSP_IMM(insn);
2001 access_size = 8;
2002 }
2003 break;
2004 case OPC_RISC_C_FUNC_LWSP:
2005 xinsn = OPC_RISC_LW;
2006 xinsn = SET_RD(xinsn, GET_C_RD(insn));
2007 access_rs1 = 2;
2008 access_imm = GET_C_LWSP_IMM(insn);
2009 access_size = 4;
2010 break;
2011 case OPC_RISC_C_FUNC_FLWSP_LDSP:
2012 if (riscv_cpu_xlen(env) == 32) {
2013 xinsn = OPC_RISC_FLW;
2014 xinsn = SET_RD(xinsn, GET_C_RD(insn));
2015 access_rs1 = 2;
2016 access_imm = GET_C_LWSP_IMM(insn);
2017 access_size = 4;
2018 } else {
2019 xinsn = OPC_RISC_LD;
2020 xinsn = SET_RD(xinsn, GET_C_RD(insn));
2021 access_rs1 = 2;
2022 access_imm = GET_C_LDSP_IMM(insn);
2023 access_size = 8;
2024 }
2025 break;
2026 case OPC_RISC_C_FUNC_FSDSP_SQSP:
2027 if (riscv_cpu_xlen(env) != 128) {
2028 xinsn = OPC_RISC_FSD;
2029 xinsn = SET_RS2(xinsn, GET_C_RS2(insn));
2030 access_rs1 = 2;
2031 access_imm = GET_C_SDSP_IMM(insn);
2032 access_size = 8;
2033 }
2034 break;
2035 case OPC_RISC_C_FUNC_SWSP:
2036 xinsn = OPC_RISC_SW;
2037 xinsn = SET_RS2(xinsn, GET_C_RS2(insn));
2038 access_rs1 = 2;
2039 access_imm = GET_C_SWSP_IMM(insn);
2040 access_size = 4;
2041 break;
2042 case 7:
2043 if (riscv_cpu_xlen(env) == 32) {
2044 xinsn = OPC_RISC_FSW;
2045 xinsn = SET_RS2(xinsn, GET_C_RS2(insn));
2046 access_rs1 = 2;
2047 access_imm = GET_C_SWSP_IMM(insn);
2048 access_size = 4;
2049 } else {
2050 xinsn = OPC_RISC_SD;
2051 xinsn = SET_RS2(xinsn, GET_C_RS2(insn));
2052 access_rs1 = 2;
2053 access_imm = GET_C_SDSP_IMM(insn);
2054 access_size = 8;
2055 }
2056 break;
2057 default:
2058 break;
2059 }
2060 break;
2061 default:
2062 break;
2063 }
2064
2065
2066
2067
2068
2069 xinsn &= ~((target_ulong)0x2);
2070 } else {
2071
2072 switch (MASK_OP_MAJOR(insn)) {
2073 case OPC_RISC_ATOMIC:
2074 xinsn = insn;
2075 access_rs1 = GET_RS1(insn);
2076 access_size = 1 << GET_FUNCT3(insn);
2077 break;
2078 case OPC_RISC_LOAD:
2079 case OPC_RISC_FP_LOAD:
2080 xinsn = SET_I_IMM(insn, 0);
2081 access_rs1 = GET_RS1(insn);
2082 access_imm = GET_IMM(insn);
2083 access_size = 1 << GET_FUNCT3(insn);
2084 break;
2085 case OPC_RISC_STORE:
2086 case OPC_RISC_FP_STORE:
2087 xinsn = SET_S_IMM(insn, 0);
2088 access_rs1 = GET_RS1(insn);
2089 access_imm = GET_STORE_IMM(insn);
2090 access_size = 1 << GET_FUNCT3(insn);
2091 break;
2092 case OPC_RISC_SYSTEM:
2093 if (MASK_OP_SYSTEM(insn) == OPC_RISC_HLVHSV) {
2094 xinsn = insn;
2095 access_rs1 = GET_RS1(insn);
2096 access_size = 1 << ((GET_FUNCT7(insn) >> 1) & 0x3);
2097 access_size = 1 << access_size;
2098 }
2099 break;
2100 default:
2101 break;
2102 }
2103 }
2104
2105 if (access_size) {
2106 xinsn = SET_RS1(xinsn, (taddr - (env->gpr[access_rs1] + access_imm)) &
2107 (access_size - 1));
2108 }
2109
2110 return xinsn;
2111}
2112
2113static target_ulong promote_load_fault(target_ulong orig_cause)
2114{
2115 switch (orig_cause) {
2116 case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT:
2117 return RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT;
2118
2119 case RISCV_EXCP_LOAD_ACCESS_FAULT:
2120 return RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
2121
2122 case RISCV_EXCP_LOAD_PAGE_FAULT:
2123 return RISCV_EXCP_STORE_PAGE_FAULT;
2124 }
2125
2126
2127 return orig_cause;
2128}
2129
2130static void riscv_do_nmi(CPURISCVState *env, target_ulong cause, bool virt)
2131{
2132 env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, false);
2133 env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPV, virt);
2134 env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPP, env->priv);
2135 env->mncause = cause;
2136 env->mnepc = env->pc;
2137 env->pc = env->rnmi_irqvec;
2138
2139 if (cpu_get_fcfien(env)) {
2140 env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPELP, env->elp);
2141 }
2142
2143
2144 riscv_cpu_set_mode(env, PRV_M, false);
2145}
2146
2147
2148
2149
2150
2151
2152
2153void riscv_cpu_do_interrupt(CPUState *cs)
2154{
2155 RISCVCPU *cpu = RISCV_CPU(cs);
2156 CPURISCVState *env = &cpu->env;
2157 bool virt = env->virt_enabled;
2158 bool write_gva = false;
2159 bool always_storeamo = (env->excp_uw2 & RISCV_UW2_ALWAYS_STORE_AMO);
2160 bool vsmode_exc;
2161 uint64_t s;
2162 int mode;
2163
2164
2165
2166
2167
2168 bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG);
2169 target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK;
2170 uint64_t deleg = async ? env->mideleg : env->medeleg;
2171 bool s_injected = env->mvip & (1ULL << cause) & env->mvien &&
2172 !(env->mip & (1ULL << cause));
2173 bool vs_injected = env->hvip & (1ULL << cause) & env->hvien &&
2174 !(env->mip & (1ULL << cause));
2175 bool smode_double_trap = false;
2176 uint64_t hdeleg = async ? env->hideleg : env->hedeleg;
2177 const bool prev_virt = env->virt_enabled;
2178 const target_ulong prev_priv = env->priv;
2179 target_ulong tval = 0;
2180 target_ulong tinst = 0;
2181 target_ulong htval = 0;
2182 target_ulong mtval2 = 0;
2183 target_ulong src;
2184 int sxlen = 0;
2185 int mxlen = 16 << riscv_cpu_mxl(env);
2186 bool nnmi_excep = false;
2187
2188 if (cpu->cfg.ext_smrnmi && env->rnmip && async) {
2189 riscv_do_nmi(env, cause | ((target_ulong)1U << (mxlen - 1)),
2190 env->virt_enabled);
2191 return;
2192 }
2193
2194 if (!async) {
2195
2196 switch (cause) {
2197#ifdef CONFIG_TCG
2198 case RISCV_EXCP_SEMIHOST:
2199 do_common_semihosting(cs);
2200 env->pc += 4;
2201 return;
2202#endif
2203 case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT:
2204 case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT:
2205 case RISCV_EXCP_LOAD_ADDR_MIS:
2206 case RISCV_EXCP_STORE_AMO_ADDR_MIS:
2207 case RISCV_EXCP_LOAD_ACCESS_FAULT:
2208 case RISCV_EXCP_STORE_AMO_ACCESS_FAULT:
2209 case RISCV_EXCP_LOAD_PAGE_FAULT:
2210 case RISCV_EXCP_STORE_PAGE_FAULT:
2211 if (always_storeamo) {
2212 cause = promote_load_fault(cause);
2213 }
2214 write_gva = env->two_stage_lookup;
2215 tval = env->badaddr;
2216 if (env->two_stage_indirect_lookup) {
2217
2218
2219
2220
2221 tinst = (riscv_cpu_xlen(env) == 32) ? 0x00002000 : 0x00003000;
2222 } else {
2223
2224
2225
2226
2227 tinst = riscv_transformed_insn(env, env->bins, tval);
2228 }
2229 break;
2230 case RISCV_EXCP_INST_GUEST_PAGE_FAULT:
2231 case RISCV_EXCP_INST_ADDR_MIS:
2232 case RISCV_EXCP_INST_ACCESS_FAULT:
2233 case RISCV_EXCP_INST_PAGE_FAULT:
2234 write_gva = env->two_stage_lookup;
2235 tval = env->badaddr;
2236 if (env->two_stage_indirect_lookup) {
2237
2238
2239
2240
2241 tinst = (riscv_cpu_xlen(env) == 32) ? 0x00002000 : 0x00003000;
2242 }
2243 break;
2244 case RISCV_EXCP_ILLEGAL_INST:
2245 case RISCV_EXCP_VIRT_INSTRUCTION_FAULT:
2246 tval = env->bins;
2247 break;
2248 case RISCV_EXCP_BREAKPOINT:
2249 tval = env->badaddr;
2250 if (cs->watchpoint_hit) {
2251 tval = cs->watchpoint_hit->hitaddr;
2252 cs->watchpoint_hit = NULL;
2253 }
2254 break;
2255 case RISCV_EXCP_SW_CHECK:
2256 tval = env->sw_check_code;
2257 break;
2258 default:
2259 break;
2260 }
2261
2262 if (cause == RISCV_EXCP_U_ECALL) {
2263 assert(env->priv <= 3);
2264
2265 if (env->priv == PRV_M) {
2266 cause = RISCV_EXCP_M_ECALL;
2267 } else if (env->priv == PRV_S && env->virt_enabled) {
2268 cause = RISCV_EXCP_VS_ECALL;
2269 } else if (env->priv == PRV_S && !env->virt_enabled) {
2270 cause = RISCV_EXCP_S_ECALL;
2271 } else if (env->priv == PRV_U) {
2272 cause = RISCV_EXCP_U_ECALL;
2273 }
2274 }
2275 }
2276
2277 trace_riscv_trap(env->mhartid, async, cause, env->pc, tval,
2278 riscv_cpu_get_trap_name(cause, async));
2279
2280 qemu_log_mask(CPU_LOG_INT,
2281 "%s: hart:"TARGET_FMT_ld", async:%d, cause:"TARGET_FMT_lx", "
2282 "epc:0x"TARGET_FMT_lx", tval:0x"TARGET_FMT_lx", desc=%s\n",
2283 __func__, env->mhartid, async, cause, env->pc, tval,
2284 riscv_cpu_get_trap_name(cause, async));
2285
2286 mode = env->priv <= PRV_S && cause < 64 &&
2287 (((deleg >> cause) & 1) || s_injected || vs_injected) ? PRV_S : PRV_M;
2288
2289 vsmode_exc = env->virt_enabled && cause < 64 &&
2290 (((hdeleg >> cause) & 1) || vs_injected);
2291
2292
2293
2294
2295
2296 if (cpu->cfg.ext_ssdbltrp && env->priv == PRV_S && mode == PRV_S) {
2297 bool dte = (env->menvcfg & MENVCFG_DTE) != 0;
2298 bool sdt = (env->mstatus & MSTATUS_SDT) != 0;
2299
2300 if (riscv_has_ext(env, RVH)) {
2301 if (vsmode_exc) {
2302
2303 dte = (env->henvcfg & HENVCFG_DTE) != 0;
2304 } else if (env->virt_enabled) {
2305
2306 sdt = (env->mstatus_hs & MSTATUS_SDT) != 0;
2307 }
2308 }
2309 smode_double_trap = dte && sdt;
2310 if (smode_double_trap) {
2311 mode = PRV_M;
2312 }
2313 }
2314
2315 if (mode == PRV_S) {
2316
2317
2318 if (cpu_get_fcfien(env)) {
2319 env->mstatus = set_field(env->mstatus, MSTATUS_SPELP, env->elp);
2320 }
2321
2322 if (riscv_has_ext(env, RVH)) {
2323 if (vsmode_exc) {
2324
2325
2326
2327
2328
2329 if (async && (cause == IRQ_VS_TIMER || cause == IRQ_VS_SOFT ||
2330 cause == IRQ_VS_EXT)) {
2331 cause = cause - 1;
2332 }
2333 write_gva = false;
2334 } else if (env->virt_enabled) {
2335
2336 riscv_cpu_swap_hypervisor_regs(env);
2337 env->hstatus = set_field(env->hstatus, HSTATUS_SPVP,
2338 env->priv);
2339 env->hstatus = set_field(env->hstatus, HSTATUS_SPV, true);
2340
2341 htval = env->guest_phys_fault_addr;
2342
2343 virt = false;
2344 } else {
2345
2346 env->hstatus = set_field(env->hstatus, HSTATUS_SPV, false);
2347 htval = env->guest_phys_fault_addr;
2348 }
2349 env->hstatus = set_field(env->hstatus, HSTATUS_GVA, write_gva);
2350 }
2351
2352 s = env->mstatus;
2353 s = set_field(s, MSTATUS_SPIE, get_field(s, MSTATUS_SIE));
2354 s = set_field(s, MSTATUS_SPP, env->priv);
2355 s = set_field(s, MSTATUS_SIE, 0);
2356 if (riscv_env_smode_dbltrp_enabled(env, virt)) {
2357 s = set_field(s, MSTATUS_SDT, 1);
2358 }
2359 env->mstatus = s;
2360 sxlen = 16 << riscv_cpu_sxl(env);
2361 env->scause = cause | ((target_ulong)async << (sxlen - 1));
2362 env->sepc = env->pc;
2363 env->stval = tval;
2364 env->htval = htval;
2365 env->htinst = tinst;
2366 env->pc = (env->stvec >> 2 << 2) +
2367 ((async && (env->stvec & 3) == 1) ? cause * 4 : 0);
2368 riscv_cpu_set_mode(env, PRV_S, virt);
2369
2370 src = env->sepc;
2371 } else {
2372
2373
2374
2375
2376 nnmi_excep = cpu->cfg.ext_smrnmi &&
2377 !get_field(env->mnstatus, MNSTATUS_NMIE) &&
2378 !async;
2379
2380
2381
2382 if (cpu_get_fcfien(env)) {
2383 if (nnmi_excep) {
2384 env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPELP,
2385 env->elp);
2386 } else {
2387 env->mstatus = set_field(env->mstatus, MSTATUS_MPELP, env->elp);
2388 }
2389 }
2390
2391 if (riscv_has_ext(env, RVH)) {
2392 if (env->virt_enabled) {
2393 riscv_cpu_swap_hypervisor_regs(env);
2394 }
2395 env->mstatus = set_field(env->mstatus, MSTATUS_MPV,
2396 env->virt_enabled);
2397 if (env->virt_enabled && tval) {
2398 env->mstatus = set_field(env->mstatus, MSTATUS_GVA, 1);
2399 }
2400
2401 mtval2 = env->guest_phys_fault_addr;
2402
2403
2404 virt = false;
2405 }
2406
2407
2408
2409
2410
2411 nnmi_excep = cpu->cfg.ext_smrnmi &&
2412 !get_field(env->mnstatus, MNSTATUS_NMIE) &&
2413 !async;
2414
2415 s = env->mstatus;
2416 s = set_field(s, MSTATUS_MPIE, get_field(s, MSTATUS_MIE));
2417 s = set_field(s, MSTATUS_MPP, env->priv);
2418 s = set_field(s, MSTATUS_MIE, 0);
2419 if (cpu->cfg.ext_smdbltrp) {
2420 if (env->mstatus & MSTATUS_MDT) {
2421 assert(env->priv == PRV_M);
2422 if (!cpu->cfg.ext_smrnmi || nnmi_excep) {
2423 cpu_abort(CPU(cpu), "M-mode double trap\n");
2424 } else {
2425 riscv_do_nmi(env, cause, false);
2426 return;
2427 }
2428 }
2429
2430 s = set_field(s, MSTATUS_MDT, 1);
2431 }
2432 env->mstatus = s;
2433 env->mcause = cause | ((target_ulong)async << (mxlen - 1));
2434 if (smode_double_trap) {
2435 env->mtval2 = env->mcause;
2436 env->mcause = RISCV_EXCP_DOUBLE_TRAP;
2437 } else {
2438 env->mtval2 = mtval2;
2439 }
2440 env->mepc = env->pc;
2441 env->mtval = tval;
2442 env->mtinst = tinst;
2443
2444
2445
2446
2447
2448 if (nnmi_excep) {
2449 env->pc = env->rnmi_excpvec;
2450 } else {
2451 env->pc = (env->mtvec >> 2 << 2) +
2452 ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0);
2453 }
2454 riscv_cpu_set_mode(env, PRV_M, virt);
2455 src = env->mepc;
2456 }
2457
2458 if (riscv_cpu_cfg(env)->ext_smctr || riscv_cpu_cfg(env)->ext_ssctr) {
2459 if (async && cause == IRQ_PMU_OVF) {
2460 riscv_ctr_freeze(env, XCTRCTL_LCOFIFRZ, virt);
2461 } else if (!async && cause == RISCV_EXCP_BREAKPOINT) {
2462 riscv_ctr_freeze(env, XCTRCTL_BPFRZ, virt);
2463 }
2464
2465 riscv_ctr_add_entry(env, src, env->pc,
2466 async ? CTRDATA_TYPE_INTERRUPT : CTRDATA_TYPE_EXCEPTION,
2467 prev_priv, prev_virt);
2468 }
2469
2470
2471
2472
2473
2474
2475 env->elp = false;
2476
2477
2478
2479
2480
2481
2482
2483
2484 env->two_stage_lookup = false;
2485 env->two_stage_indirect_lookup = false;
2486}
2487
2488#endif
2489