1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "qemu/osdep.h"
21#include "qemu/log.h"
22#include "cpu.h"
23#include "exec/exec-all.h"
24#include "tcg-op.h"
25#include "trace.h"
26
27int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch)
28{
29#ifdef CONFIG_USER_ONLY
30 return 0;
31#else
32 return env->priv;
33#endif
34}
35
36#ifndef CONFIG_USER_ONLY
37static int riscv_cpu_local_irq_pending(CPURISCVState *env)
38{
39 target_ulong mstatus_mie = get_field(env->mstatus, MSTATUS_MIE);
40 target_ulong mstatus_sie = get_field(env->mstatus, MSTATUS_SIE);
41 target_ulong pending = atomic_read(&env->mip) & env->mie;
42 target_ulong mie = env->priv < PRV_M || (env->priv == PRV_M && mstatus_mie);
43 target_ulong sie = env->priv < PRV_S || (env->priv == PRV_S && mstatus_sie);
44 target_ulong irqs = (pending & ~env->mideleg & -mie) |
45 (pending & env->mideleg & -sie);
46
47 if (irqs) {
48 return ctz64(irqs);
49 } else {
50 return EXCP_NONE;
51 }
52}
53#endif
54
55bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
56{
57#if !defined(CONFIG_USER_ONLY)
58 if (interrupt_request & CPU_INTERRUPT_HARD) {
59 RISCVCPU *cpu = RISCV_CPU(cs);
60 CPURISCVState *env = &cpu->env;
61 int interruptno = riscv_cpu_local_irq_pending(env);
62 if (interruptno >= 0) {
63 cs->exception_index = RISCV_EXCP_INT_FLAG | interruptno;
64 riscv_cpu_do_interrupt(cs);
65 return true;
66 }
67 }
68#endif
69 return false;
70}
71
72#if !defined(CONFIG_USER_ONLY)
73
74int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts)
75{
76 CPURISCVState *env = &cpu->env;
77 if (env->miclaim & interrupts) {
78 return -1;
79 } else {
80 env->miclaim |= interrupts;
81 return 0;
82 }
83}
84
85
86uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value)
87{
88 CPURISCVState *env = &cpu->env;
89 uint32_t old, new, cmp = atomic_read(&env->mip);
90
91 do {
92 old = cmp;
93 new = (old & ~mask) | (value & mask);
94 cmp = atomic_cmpxchg(&env->mip, old, new);
95 } while (old != cmp);
96
97 if (new) {
98 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
99 } else {
100 cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
101 }
102
103 return old;
104}
105
106void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv)
107{
108 if (newpriv > PRV_M) {
109 g_assert_not_reached();
110 }
111 if (newpriv == PRV_H) {
112 newpriv = PRV_U;
113 }
114
115 env->priv = newpriv;
116}
117
118
119
120
121
122
123
124
125
126static int get_physical_address(CPURISCVState *env, hwaddr *physical,
127 int *prot, target_ulong addr,
128 int access_type, int mmu_idx)
129{
130
131
132
133
134 int mode = mmu_idx;
135
136 if (mode == PRV_M && access_type != MMU_INST_FETCH) {
137 if (get_field(env->mstatus, MSTATUS_MPRV)) {
138 mode = get_field(env->mstatus, MSTATUS_MPP);
139 }
140 }
141
142 if (mode == PRV_M || !riscv_feature(env, RISCV_FEATURE_MMU)) {
143 *physical = addr;
144 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
145 return TRANSLATE_SUCCESS;
146 }
147
148 *prot = 0;
149
150 target_ulong base;
151 int levels, ptidxbits, ptesize, vm, sum;
152 int mxr = get_field(env->mstatus, MSTATUS_MXR);
153
154 if (env->priv_ver >= PRIV_VERSION_1_10_0) {
155 base = get_field(env->satp, SATP_PPN) << PGSHIFT;
156 sum = get_field(env->mstatus, MSTATUS_SUM);
157 vm = get_field(env->satp, SATP_MODE);
158 switch (vm) {
159 case VM_1_10_SV32:
160 levels = 2; ptidxbits = 10; ptesize = 4; break;
161 case VM_1_10_SV39:
162 levels = 3; ptidxbits = 9; ptesize = 8; break;
163 case VM_1_10_SV48:
164 levels = 4; ptidxbits = 9; ptesize = 8; break;
165 case VM_1_10_SV57:
166 levels = 5; ptidxbits = 9; ptesize = 8; break;
167 case VM_1_10_MBARE:
168 *physical = addr;
169 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
170 return TRANSLATE_SUCCESS;
171 default:
172 g_assert_not_reached();
173 }
174 } else {
175 base = env->sptbr << PGSHIFT;
176 sum = !get_field(env->mstatus, MSTATUS_PUM);
177 vm = get_field(env->mstatus, MSTATUS_VM);
178 switch (vm) {
179 case VM_1_09_SV32:
180 levels = 2; ptidxbits = 10; ptesize = 4; break;
181 case VM_1_09_SV39:
182 levels = 3; ptidxbits = 9; ptesize = 8; break;
183 case VM_1_09_SV48:
184 levels = 4; ptidxbits = 9; ptesize = 8; break;
185 case VM_1_09_MBARE:
186 *physical = addr;
187 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
188 return TRANSLATE_SUCCESS;
189 default:
190 g_assert_not_reached();
191 }
192 }
193
194 CPUState *cs = CPU(riscv_env_get_cpu(env));
195 int va_bits = PGSHIFT + levels * ptidxbits;
196 target_ulong mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1;
197 target_ulong masked_msbs = (addr >> (va_bits - 1)) & mask;
198 if (masked_msbs != 0 && masked_msbs != mask) {
199 return TRANSLATE_FAIL;
200 }
201
202 int ptshift = (levels - 1) * ptidxbits;
203 int i;
204
205#if !TCG_OVERSIZED_GUEST
206restart:
207#endif
208 for (i = 0; i < levels; i++, ptshift -= ptidxbits) {
209 target_ulong idx = (addr >> (PGSHIFT + ptshift)) &
210 ((1 << ptidxbits) - 1);
211
212
213 target_ulong pte_addr = base + idx * ptesize;
214#if defined(TARGET_RISCV32)
215 target_ulong pte = ldl_phys(cs->as, pte_addr);
216#elif defined(TARGET_RISCV64)
217 target_ulong pte = ldq_phys(cs->as, pte_addr);
218#endif
219 target_ulong ppn = pte >> PTE_PPN_SHIFT;
220
221 if (!(pte & PTE_V)) {
222
223 return TRANSLATE_FAIL;
224 } else if (!(pte & (PTE_R | PTE_W | PTE_X))) {
225
226 base = ppn << PGSHIFT;
227 } else if ((pte & (PTE_R | PTE_W | PTE_X)) == PTE_W) {
228
229 return TRANSLATE_FAIL;
230 } else if ((pte & (PTE_R | PTE_W | PTE_X)) == (PTE_W | PTE_X)) {
231
232 return TRANSLATE_FAIL;
233 } else if ((pte & PTE_U) && ((mode != PRV_U) &&
234 (!sum || access_type == MMU_INST_FETCH))) {
235
236
237 return TRANSLATE_FAIL;
238 } else if (!(pte & PTE_U) && (mode != PRV_S)) {
239
240 return TRANSLATE_FAIL;
241 } else if (ppn & ((1ULL << ptshift) - 1)) {
242
243 return TRANSLATE_FAIL;
244 } else if (access_type == MMU_DATA_LOAD && !((pte & PTE_R) ||
245 ((pte & PTE_X) && mxr))) {
246
247 return TRANSLATE_FAIL;
248 } else if (access_type == MMU_DATA_STORE && !(pte & PTE_W)) {
249
250 return TRANSLATE_FAIL;
251 } else if (access_type == MMU_INST_FETCH && !(pte & PTE_X)) {
252
253 return TRANSLATE_FAIL;
254 } else {
255
256 target_ulong updated_pte = pte | PTE_A |
257 (access_type == MMU_DATA_STORE ? PTE_D : 0);
258
259
260 if (updated_pte != pte) {
261
262
263
264
265
266
267
268
269 MemoryRegion *mr;
270 hwaddr l = sizeof(target_ulong), addr1;
271 mr = address_space_translate(cs->as, pte_addr,
272 &addr1, &l, false, MEMTXATTRS_UNSPECIFIED);
273 if (memory_region_is_ram(mr)) {
274 target_ulong *pte_pa =
275 qemu_map_ram_ptr(mr->ram_block, addr1);
276#if TCG_OVERSIZED_GUEST
277
278
279 *pte_pa = pte = updated_pte;
280#else
281 target_ulong old_pte =
282 atomic_cmpxchg(pte_pa, pte, updated_pte);
283 if (old_pte != pte) {
284 goto restart;
285 } else {
286 pte = updated_pte;
287 }
288#endif
289 } else {
290
291
292 return TRANSLATE_FAIL;
293 }
294 }
295
296
297
298 target_ulong vpn = addr >> PGSHIFT;
299 *physical = (ppn | (vpn & ((1L << ptshift) - 1))) << PGSHIFT;
300
301
302 if ((pte & PTE_R) || ((pte & PTE_X) && mxr)) {
303 *prot |= PAGE_READ;
304 }
305 if ((pte & PTE_X)) {
306 *prot |= PAGE_EXEC;
307 }
308
309
310 if ((pte & PTE_W) &&
311 (access_type == MMU_DATA_STORE || (pte & PTE_D))) {
312 *prot |= PAGE_WRITE;
313 }
314 return TRANSLATE_SUCCESS;
315 }
316 }
317 return TRANSLATE_FAIL;
318}
319
320static void raise_mmu_exception(CPURISCVState *env, target_ulong address,
321 MMUAccessType access_type)
322{
323 CPUState *cs = CPU(riscv_env_get_cpu(env));
324 int page_fault_exceptions =
325 (env->priv_ver >= PRIV_VERSION_1_10_0) &&
326 get_field(env->satp, SATP_MODE) != VM_1_10_MBARE;
327 switch (access_type) {
328 case MMU_INST_FETCH:
329 cs->exception_index = page_fault_exceptions ?
330 RISCV_EXCP_INST_PAGE_FAULT : RISCV_EXCP_INST_ACCESS_FAULT;
331 break;
332 case MMU_DATA_LOAD:
333 cs->exception_index = page_fault_exceptions ?
334 RISCV_EXCP_LOAD_PAGE_FAULT : RISCV_EXCP_LOAD_ACCESS_FAULT;
335 break;
336 case MMU_DATA_STORE:
337 cs->exception_index = page_fault_exceptions ?
338 RISCV_EXCP_STORE_PAGE_FAULT : RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
339 break;
340 default:
341 g_assert_not_reached();
342 }
343 env->badaddr = address;
344}
345
346hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
347{
348 RISCVCPU *cpu = RISCV_CPU(cs);
349 hwaddr phys_addr;
350 int prot;
351 int mmu_idx = cpu_mmu_index(&cpu->env, false);
352
353 if (get_physical_address(&cpu->env, &phys_addr, &prot, addr, 0, mmu_idx)) {
354 return -1;
355 }
356 return phys_addr;
357}
358
359void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
360 MMUAccessType access_type, int mmu_idx,
361 uintptr_t retaddr)
362{
363 RISCVCPU *cpu = RISCV_CPU(cs);
364 CPURISCVState *env = &cpu->env;
365 switch (access_type) {
366 case MMU_INST_FETCH:
367 cs->exception_index = RISCV_EXCP_INST_ADDR_MIS;
368 break;
369 case MMU_DATA_LOAD:
370 cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS;
371 break;
372 case MMU_DATA_STORE:
373 cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS;
374 break;
375 default:
376 g_assert_not_reached();
377 }
378 env->badaddr = addr;
379 riscv_raise_exception(env, cs->exception_index, retaddr);
380}
381
382
383void tlb_fill(CPUState *cs, target_ulong addr, int size,
384 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
385{
386 int ret;
387 ret = riscv_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
388 if (ret == TRANSLATE_FAIL) {
389 RISCVCPU *cpu = RISCV_CPU(cs);
390 CPURISCVState *env = &cpu->env;
391 riscv_raise_exception(env, cs->exception_index, retaddr);
392 }
393}
394
395#endif
396
397int riscv_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
398 int rw, int mmu_idx)
399{
400 RISCVCPU *cpu = RISCV_CPU(cs);
401 CPURISCVState *env = &cpu->env;
402#if !defined(CONFIG_USER_ONLY)
403 hwaddr pa = 0;
404 int prot;
405#endif
406 int ret = TRANSLATE_FAIL;
407
408 qemu_log_mask(CPU_LOG_MMU,
409 "%s pc " TARGET_FMT_lx " ad %" VADDR_PRIx " rw %d mmu_idx \
410 %d\n", __func__, env->pc, address, rw, mmu_idx);
411
412#if !defined(CONFIG_USER_ONLY)
413 ret = get_physical_address(env, &pa, &prot, address, rw, mmu_idx);
414 qemu_log_mask(CPU_LOG_MMU,
415 "%s address=%" VADDR_PRIx " ret %d physical " TARGET_FMT_plx
416 " prot %d\n", __func__, address, ret, pa, prot);
417 if (riscv_feature(env, RISCV_FEATURE_PMP) &&
418 !pmp_hart_has_privs(env, pa, TARGET_PAGE_SIZE, 1 << rw)) {
419 ret = TRANSLATE_FAIL;
420 }
421 if (ret == TRANSLATE_SUCCESS) {
422 tlb_set_page(cs, address & TARGET_PAGE_MASK, pa & TARGET_PAGE_MASK,
423 prot, mmu_idx, TARGET_PAGE_SIZE);
424 } else if (ret == TRANSLATE_FAIL) {
425 raise_mmu_exception(env, address, rw);
426 }
427#else
428 switch (rw) {
429 case MMU_INST_FETCH:
430 cs->exception_index = RISCV_EXCP_INST_PAGE_FAULT;
431 break;
432 case MMU_DATA_LOAD:
433 cs->exception_index = RISCV_EXCP_LOAD_PAGE_FAULT;
434 break;
435 case MMU_DATA_STORE:
436 cs->exception_index = RISCV_EXCP_STORE_PAGE_FAULT;
437 break;
438 }
439#endif
440 return ret;
441}
442
443
444
445
446
447
448
449void riscv_cpu_do_interrupt(CPUState *cs)
450{
451#if !defined(CONFIG_USER_ONLY)
452
453 RISCVCPU *cpu = RISCV_CPU(cs);
454 CPURISCVState *env = &cpu->env;
455
456
457
458
459 bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG);
460 target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK;
461 target_ulong deleg = async ? env->mideleg : env->medeleg;
462 target_ulong tval = 0;
463
464 static const int ecall_cause_map[] = {
465 [PRV_U] = RISCV_EXCP_U_ECALL,
466 [PRV_S] = RISCV_EXCP_S_ECALL,
467 [PRV_H] = RISCV_EXCP_H_ECALL,
468 [PRV_M] = RISCV_EXCP_M_ECALL
469 };
470
471 if (!async) {
472
473 switch (cause) {
474 case RISCV_EXCP_INST_ADDR_MIS:
475 case RISCV_EXCP_INST_ACCESS_FAULT:
476 case RISCV_EXCP_LOAD_ADDR_MIS:
477 case RISCV_EXCP_STORE_AMO_ADDR_MIS:
478 case RISCV_EXCP_LOAD_ACCESS_FAULT:
479 case RISCV_EXCP_STORE_AMO_ACCESS_FAULT:
480 case RISCV_EXCP_INST_PAGE_FAULT:
481 case RISCV_EXCP_LOAD_PAGE_FAULT:
482 case RISCV_EXCP_STORE_PAGE_FAULT:
483 tval = env->badaddr;
484 break;
485 default:
486 break;
487 }
488
489 if (cause == RISCV_EXCP_U_ECALL) {
490 assert(env->priv <= 3);
491 cause = ecall_cause_map[env->priv];
492 }
493 }
494
495 trace_riscv_trap(env->mhartid, async, cause, env->pc, tval, cause < 16 ?
496 (async ? riscv_intr_names : riscv_excp_names)[cause] : "(unknown)");
497
498 if (env->priv <= PRV_S &&
499 cause < TARGET_LONG_BITS && ((deleg >> cause) & 1)) {
500
501 target_ulong s = env->mstatus;
502 s = set_field(s, MSTATUS_SPIE, env->priv_ver >= PRIV_VERSION_1_10_0 ?
503 get_field(s, MSTATUS_SIE) : get_field(s, MSTATUS_UIE << env->priv));
504 s = set_field(s, MSTATUS_SPP, env->priv);
505 s = set_field(s, MSTATUS_SIE, 0);
506 env->mstatus = s;
507 env->scause = cause | ~(((target_ulong)-1) >> async);
508 env->sepc = env->pc;
509 env->sbadaddr = tval;
510 env->pc = (env->stvec >> 2 << 2) +
511 ((async && (env->stvec & 3) == 1) ? cause * 4 : 0);
512 riscv_cpu_set_mode(env, PRV_S);
513 } else {
514
515 target_ulong s = env->mstatus;
516 s = set_field(s, MSTATUS_MPIE, env->priv_ver >= PRIV_VERSION_1_10_0 ?
517 get_field(s, MSTATUS_MIE) : get_field(s, MSTATUS_UIE << env->priv));
518 s = set_field(s, MSTATUS_MPP, env->priv);
519 s = set_field(s, MSTATUS_MIE, 0);
520 env->mstatus = s;
521 env->mcause = cause | ~(((target_ulong)-1) >> async);
522 env->mepc = env->pc;
523 env->mbadaddr = tval;
524 env->pc = (env->mtvec >> 2 << 2) +
525 ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0);
526 riscv_cpu_set_mode(env, PRV_M);
527 }
528
529
530
531
532
533
534
535#endif
536 cs->exception_index = EXCP_NONE;
537}
538