1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "qemu/osdep.h"
20#include <sys/ioctl.h>
21
22#include <linux/kvm.h>
23
24#include "qemu/timer.h"
25#include "qapi/error.h"
26#include "qemu/error-report.h"
27#include "qemu/main-loop.h"
28#include "qapi/visitor.h"
29#include "sysemu/sysemu.h"
30#include "sysemu/kvm.h"
31#include "sysemu/kvm_int.h"
32#include "cpu.h"
33#include "trace.h"
34#include "hw/pci/pci.h"
35#include "exec/memattrs.h"
36#include "exec/address-spaces.h"
37#include "hw/boards.h"
38#include "hw/irq.h"
39#include "qemu/log.h"
40#include "hw/loader.h"
41#include "kvm_riscv.h"
42#include "sbi_ecall_interface.h"
43#include "chardev/char-fe.h"
44#include "migration/migration.h"
45#include "sysemu/runstate.h"
46
47static uint64_t kvm_riscv_reg_id(CPURISCVState *env, uint64_t type,
48 uint64_t idx)
49{
50 uint64_t id = KVM_REG_RISCV | type | idx;
51
52 switch (riscv_cpu_mxl(env)) {
53 case MXL_RV32:
54 id |= KVM_REG_SIZE_U32;
55 break;
56 case MXL_RV64:
57 id |= KVM_REG_SIZE_U64;
58 break;
59 default:
60 g_assert_not_reached();
61 }
62 return id;
63}
64
65#define RISCV_CORE_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, \
66 KVM_REG_RISCV_CORE_REG(name))
67
68#define RISCV_CSR_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_CSR, \
69 KVM_REG_RISCV_CSR_REG(name))
70
71#define RISCV_TIMER_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_TIMER, \
72 KVM_REG_RISCV_TIMER_REG(name))
73
74#define RISCV_FP_F_REG(env, idx) kvm_riscv_reg_id(env, KVM_REG_RISCV_FP_F, idx)
75
76#define RISCV_FP_D_REG(env, idx) kvm_riscv_reg_id(env, KVM_REG_RISCV_FP_D, idx)
77
78#define KVM_RISCV_GET_CSR(cs, env, csr, reg) \
79 do { \
80 int ret = kvm_get_one_reg(cs, RISCV_CSR_REG(env, csr), ®); \
81 if (ret) { \
82 return ret; \
83 } \
84 } while (0)
85
86#define KVM_RISCV_SET_CSR(cs, env, csr, reg) \
87 do { \
88 int ret = kvm_set_one_reg(cs, RISCV_CSR_REG(env, csr), ®); \
89 if (ret) { \
90 return ret; \
91 } \
92 } while (0)
93
94#define KVM_RISCV_GET_TIMER(cs, env, name, reg) \
95 do { \
96 int ret = kvm_get_one_reg(cs, RISCV_TIMER_REG(env, name), ®); \
97 if (ret) { \
98 abort(); \
99 } \
100 } while (0)
101
102#define KVM_RISCV_SET_TIMER(cs, env, name, reg) \
103 do { \
104 int ret = kvm_set_one_reg(cs, RISCV_TIMER_REG(env, name), ®); \
105 if (ret) { \
106 abort(); \
107 } \
108 } while (0)
109
110typedef struct KVMCPUConfig {
111 const char *name;
112 const char *description;
113 target_ulong offset;
114 int kvm_reg_id;
115 bool user_set;
116 bool supported;
117} KVMCPUConfig;
118
119#define KVM_MISA_CFG(_bit, _reg_id) \
120 {.offset = _bit, .kvm_reg_id = _reg_id}
121
122
123static KVMCPUConfig kvm_misa_ext_cfgs[] = {
124 KVM_MISA_CFG(RVA, KVM_RISCV_ISA_EXT_A),
125 KVM_MISA_CFG(RVC, KVM_RISCV_ISA_EXT_C),
126 KVM_MISA_CFG(RVD, KVM_RISCV_ISA_EXT_D),
127 KVM_MISA_CFG(RVF, KVM_RISCV_ISA_EXT_F),
128 KVM_MISA_CFG(RVH, KVM_RISCV_ISA_EXT_H),
129 KVM_MISA_CFG(RVI, KVM_RISCV_ISA_EXT_I),
130 KVM_MISA_CFG(RVM, KVM_RISCV_ISA_EXT_M),
131};
132
133static void kvm_cpu_set_misa_ext_cfg(Object *obj, Visitor *v,
134 const char *name,
135 void *opaque, Error **errp)
136{
137 KVMCPUConfig *misa_ext_cfg = opaque;
138 target_ulong misa_bit = misa_ext_cfg->offset;
139 RISCVCPU *cpu = RISCV_CPU(obj);
140 CPURISCVState *env = &cpu->env;
141 bool value, host_bit;
142
143 if (!visit_type_bool(v, name, &value, errp)) {
144 return;
145 }
146
147 host_bit = env->misa_ext_mask & misa_bit;
148
149 if (value == host_bit) {
150 return;
151 }
152
153 if (!value) {
154 misa_ext_cfg->user_set = true;
155 return;
156 }
157
158
159
160
161
162 error_setg(errp, "Enabling MISA bit '%s' is not allowed: it's not "
163 "enabled in the host", misa_ext_cfg->name);
164}
165
166static void kvm_riscv_update_cpu_misa_ext(RISCVCPU *cpu, CPUState *cs)
167{
168 CPURISCVState *env = &cpu->env;
169 uint64_t id, reg;
170 int i, ret;
171
172 for (i = 0; i < ARRAY_SIZE(kvm_misa_ext_cfgs); i++) {
173 KVMCPUConfig *misa_cfg = &kvm_misa_ext_cfgs[i];
174 target_ulong misa_bit = misa_cfg->offset;
175
176 if (!misa_cfg->user_set) {
177 continue;
178 }
179
180
181 reg = 0;
182 id = kvm_riscv_reg_id(env, KVM_REG_RISCV_ISA_EXT,
183 misa_cfg->kvm_reg_id);
184 ret = kvm_set_one_reg(cs, id, ®);
185 if (ret != 0) {
186
187
188
189
190
191
192
193 error_report("Unable to set KVM reg %s, error %d",
194 misa_cfg->name, ret);
195 exit(EXIT_FAILURE);
196 }
197 env->misa_ext &= ~misa_bit;
198 }
199}
200
201#define CPUCFG(_prop) offsetof(struct RISCVCPUConfig, _prop)
202
203#define KVM_EXT_CFG(_name, _prop, _reg_id) \
204 {.name = _name, .offset = CPUCFG(_prop), \
205 .kvm_reg_id = _reg_id}
206
207static KVMCPUConfig kvm_multi_ext_cfgs[] = {
208 KVM_EXT_CFG("zicbom", ext_icbom, KVM_RISCV_ISA_EXT_ZICBOM),
209 KVM_EXT_CFG("zicboz", ext_icboz, KVM_RISCV_ISA_EXT_ZICBOZ),
210 KVM_EXT_CFG("zihintpause", ext_zihintpause, KVM_RISCV_ISA_EXT_ZIHINTPAUSE),
211 KVM_EXT_CFG("zbb", ext_zbb, KVM_RISCV_ISA_EXT_ZBB),
212 KVM_EXT_CFG("ssaia", ext_ssaia, KVM_RISCV_ISA_EXT_SSAIA),
213 KVM_EXT_CFG("sstc", ext_sstc, KVM_RISCV_ISA_EXT_SSTC),
214 KVM_EXT_CFG("svinval", ext_svinval, KVM_RISCV_ISA_EXT_SVINVAL),
215 KVM_EXT_CFG("svpbmt", ext_svpbmt, KVM_RISCV_ISA_EXT_SVPBMT),
216};
217
218static void *kvmconfig_get_cfg_addr(RISCVCPU *cpu, KVMCPUConfig *kvmcfg)
219{
220 return (void *)&cpu->cfg + kvmcfg->offset;
221}
222
223static void kvm_cpu_cfg_set(RISCVCPU *cpu, KVMCPUConfig *multi_ext,
224 uint32_t val)
225{
226 bool *ext_enabled = kvmconfig_get_cfg_addr(cpu, multi_ext);
227
228 *ext_enabled = val;
229}
230
231static uint32_t kvm_cpu_cfg_get(RISCVCPU *cpu,
232 KVMCPUConfig *multi_ext)
233{
234 bool *ext_enabled = kvmconfig_get_cfg_addr(cpu, multi_ext);
235
236 return *ext_enabled;
237}
238
239static void kvm_cpu_set_multi_ext_cfg(Object *obj, Visitor *v,
240 const char *name,
241 void *opaque, Error **errp)
242{
243 KVMCPUConfig *multi_ext_cfg = opaque;
244 RISCVCPU *cpu = RISCV_CPU(obj);
245 bool value, host_val;
246
247 if (!visit_type_bool(v, name, &value, errp)) {
248 return;
249 }
250
251 host_val = kvm_cpu_cfg_get(cpu, multi_ext_cfg);
252
253
254
255
256
257 if (value == host_val) {
258 return;
259 }
260
261 if (!multi_ext_cfg->supported) {
262
263
264
265
266
267 if (value) {
268 error_setg(errp, "KVM does not support disabling extension %s",
269 multi_ext_cfg->name);
270 }
271
272 return;
273 }
274
275 multi_ext_cfg->user_set = true;
276 kvm_cpu_cfg_set(cpu, multi_ext_cfg, value);
277}
278
279static KVMCPUConfig kvm_cbom_blocksize = {
280 .name = "cbom_blocksize",
281 .offset = CPUCFG(cbom_blocksize),
282 .kvm_reg_id = KVM_REG_RISCV_CONFIG_REG(zicbom_block_size)
283};
284
285static KVMCPUConfig kvm_cboz_blocksize = {
286 .name = "cboz_blocksize",
287 .offset = CPUCFG(cboz_blocksize),
288 .kvm_reg_id = KVM_REG_RISCV_CONFIG_REG(zicboz_block_size)
289};
290
291static void kvm_cpu_set_cbomz_blksize(Object *obj, Visitor *v,
292 const char *name,
293 void *opaque, Error **errp)
294{
295 KVMCPUConfig *cbomz_cfg = opaque;
296 RISCVCPU *cpu = RISCV_CPU(obj);
297 uint16_t value, *host_val;
298
299 if (!visit_type_uint16(v, name, &value, errp)) {
300 return;
301 }
302
303 host_val = kvmconfig_get_cfg_addr(cpu, cbomz_cfg);
304
305 if (value != *host_val) {
306 error_report("Unable to set %s to a different value than "
307 "the host (%u)",
308 cbomz_cfg->name, *host_val);
309 exit(EXIT_FAILURE);
310 }
311
312 cbomz_cfg->user_set = true;
313}
314
315static void kvm_riscv_update_cpu_cfg_isa_ext(RISCVCPU *cpu, CPUState *cs)
316{
317 CPURISCVState *env = &cpu->env;
318 uint64_t id, reg;
319 int i, ret;
320
321 for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) {
322 KVMCPUConfig *multi_ext_cfg = &kvm_multi_ext_cfgs[i];
323
324 if (!multi_ext_cfg->user_set) {
325 continue;
326 }
327
328 id = kvm_riscv_reg_id(env, KVM_REG_RISCV_ISA_EXT,
329 multi_ext_cfg->kvm_reg_id);
330 reg = kvm_cpu_cfg_get(cpu, multi_ext_cfg);
331 ret = kvm_set_one_reg(cs, id, ®);
332 if (ret != 0) {
333 error_report("Unable to %s extension %s in KVM, error %d",
334 reg ? "enable" : "disable",
335 multi_ext_cfg->name, ret);
336 exit(EXIT_FAILURE);
337 }
338 }
339}
340
341static void kvm_riscv_add_cpu_user_properties(Object *cpu_obj)
342{
343 int i;
344
345 for (i = 0; i < ARRAY_SIZE(kvm_misa_ext_cfgs); i++) {
346 KVMCPUConfig *misa_cfg = &kvm_misa_ext_cfgs[i];
347 int bit = misa_cfg->offset;
348
349 misa_cfg->name = riscv_get_misa_ext_name(bit);
350 misa_cfg->description = riscv_get_misa_ext_description(bit);
351
352 object_property_add(cpu_obj, misa_cfg->name, "bool",
353 NULL,
354 kvm_cpu_set_misa_ext_cfg,
355 NULL, misa_cfg);
356 object_property_set_description(cpu_obj, misa_cfg->name,
357 misa_cfg->description);
358 }
359
360 for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) {
361 KVMCPUConfig *multi_cfg = &kvm_multi_ext_cfgs[i];
362
363 object_property_add(cpu_obj, multi_cfg->name, "bool",
364 NULL,
365 kvm_cpu_set_multi_ext_cfg,
366 NULL, multi_cfg);
367 }
368
369 object_property_add(cpu_obj, "cbom_blocksize", "uint16",
370 NULL, kvm_cpu_set_cbomz_blksize,
371 NULL, &kvm_cbom_blocksize);
372
373 object_property_add(cpu_obj, "cboz_blocksize", "uint16",
374 NULL, kvm_cpu_set_cbomz_blksize,
375 NULL, &kvm_cboz_blocksize);
376}
377
378static int kvm_riscv_get_regs_core(CPUState *cs)
379{
380 int ret = 0;
381 int i;
382 target_ulong reg;
383 CPURISCVState *env = &RISCV_CPU(cs)->env;
384
385 ret = kvm_get_one_reg(cs, RISCV_CORE_REG(env, regs.pc), ®);
386 if (ret) {
387 return ret;
388 }
389 env->pc = reg;
390
391 for (i = 1; i < 32; i++) {
392 uint64_t id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, i);
393 ret = kvm_get_one_reg(cs, id, ®);
394 if (ret) {
395 return ret;
396 }
397 env->gpr[i] = reg;
398 }
399
400 return ret;
401}
402
403static int kvm_riscv_put_regs_core(CPUState *cs)
404{
405 int ret = 0;
406 int i;
407 target_ulong reg;
408 CPURISCVState *env = &RISCV_CPU(cs)->env;
409
410 reg = env->pc;
411 ret = kvm_set_one_reg(cs, RISCV_CORE_REG(env, regs.pc), ®);
412 if (ret) {
413 return ret;
414 }
415
416 for (i = 1; i < 32; i++) {
417 uint64_t id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, i);
418 reg = env->gpr[i];
419 ret = kvm_set_one_reg(cs, id, ®);
420 if (ret) {
421 return ret;
422 }
423 }
424
425 return ret;
426}
427
428static int kvm_riscv_get_regs_csr(CPUState *cs)
429{
430 int ret = 0;
431 CPURISCVState *env = &RISCV_CPU(cs)->env;
432
433 KVM_RISCV_GET_CSR(cs, env, sstatus, env->mstatus);
434 KVM_RISCV_GET_CSR(cs, env, sie, env->mie);
435 KVM_RISCV_GET_CSR(cs, env, stvec, env->stvec);
436 KVM_RISCV_GET_CSR(cs, env, sscratch, env->sscratch);
437 KVM_RISCV_GET_CSR(cs, env, sepc, env->sepc);
438 KVM_RISCV_GET_CSR(cs, env, scause, env->scause);
439 KVM_RISCV_GET_CSR(cs, env, stval, env->stval);
440 KVM_RISCV_GET_CSR(cs, env, sip, env->mip);
441 KVM_RISCV_GET_CSR(cs, env, satp, env->satp);
442 return ret;
443}
444
445static int kvm_riscv_put_regs_csr(CPUState *cs)
446{
447 int ret = 0;
448 CPURISCVState *env = &RISCV_CPU(cs)->env;
449
450 KVM_RISCV_SET_CSR(cs, env, sstatus, env->mstatus);
451 KVM_RISCV_SET_CSR(cs, env, sie, env->mie);
452 KVM_RISCV_SET_CSR(cs, env, stvec, env->stvec);
453 KVM_RISCV_SET_CSR(cs, env, sscratch, env->sscratch);
454 KVM_RISCV_SET_CSR(cs, env, sepc, env->sepc);
455 KVM_RISCV_SET_CSR(cs, env, scause, env->scause);
456 KVM_RISCV_SET_CSR(cs, env, stval, env->stval);
457 KVM_RISCV_SET_CSR(cs, env, sip, env->mip);
458 KVM_RISCV_SET_CSR(cs, env, satp, env->satp);
459
460 return ret;
461}
462
463static int kvm_riscv_get_regs_fp(CPUState *cs)
464{
465 int ret = 0;
466 int i;
467 CPURISCVState *env = &RISCV_CPU(cs)->env;
468
469 if (riscv_has_ext(env, RVD)) {
470 uint64_t reg;
471 for (i = 0; i < 32; i++) {
472 ret = kvm_get_one_reg(cs, RISCV_FP_D_REG(env, i), ®);
473 if (ret) {
474 return ret;
475 }
476 env->fpr[i] = reg;
477 }
478 return ret;
479 }
480
481 if (riscv_has_ext(env, RVF)) {
482 uint32_t reg;
483 for (i = 0; i < 32; i++) {
484 ret = kvm_get_one_reg(cs, RISCV_FP_F_REG(env, i), ®);
485 if (ret) {
486 return ret;
487 }
488 env->fpr[i] = reg;
489 }
490 return ret;
491 }
492
493 return ret;
494}
495
496static int kvm_riscv_put_regs_fp(CPUState *cs)
497{
498 int ret = 0;
499 int i;
500 CPURISCVState *env = &RISCV_CPU(cs)->env;
501
502 if (riscv_has_ext(env, RVD)) {
503 uint64_t reg;
504 for (i = 0; i < 32; i++) {
505 reg = env->fpr[i];
506 ret = kvm_set_one_reg(cs, RISCV_FP_D_REG(env, i), ®);
507 if (ret) {
508 return ret;
509 }
510 }
511 return ret;
512 }
513
514 if (riscv_has_ext(env, RVF)) {
515 uint32_t reg;
516 for (i = 0; i < 32; i++) {
517 reg = env->fpr[i];
518 ret = kvm_set_one_reg(cs, RISCV_FP_F_REG(env, i), ®);
519 if (ret) {
520 return ret;
521 }
522 }
523 return ret;
524 }
525
526 return ret;
527}
528
529static void kvm_riscv_get_regs_timer(CPUState *cs)
530{
531 CPURISCVState *env = &RISCV_CPU(cs)->env;
532
533 if (env->kvm_timer_dirty) {
534 return;
535 }
536
537 KVM_RISCV_GET_TIMER(cs, env, time, env->kvm_timer_time);
538 KVM_RISCV_GET_TIMER(cs, env, compare, env->kvm_timer_compare);
539 KVM_RISCV_GET_TIMER(cs, env, state, env->kvm_timer_state);
540 KVM_RISCV_GET_TIMER(cs, env, frequency, env->kvm_timer_frequency);
541
542 env->kvm_timer_dirty = true;
543}
544
545static void kvm_riscv_put_regs_timer(CPUState *cs)
546{
547 uint64_t reg;
548 CPURISCVState *env = &RISCV_CPU(cs)->env;
549
550 if (!env->kvm_timer_dirty) {
551 return;
552 }
553
554 KVM_RISCV_SET_TIMER(cs, env, time, env->kvm_timer_time);
555 KVM_RISCV_SET_TIMER(cs, env, compare, env->kvm_timer_compare);
556
557
558
559
560
561
562
563 if (env->kvm_timer_state) {
564 KVM_RISCV_SET_TIMER(cs, env, state, env->kvm_timer_state);
565 }
566
567
568
569
570
571
572 if (migration_is_running(migrate_get_current()->state)) {
573 KVM_RISCV_GET_TIMER(cs, env, frequency, reg);
574 if (reg != env->kvm_timer_frequency) {
575 error_report("Dst Hosts timer frequency != Src Hosts");
576 }
577 }
578
579 env->kvm_timer_dirty = false;
580}
581
582typedef struct KVMScratchCPU {
583 int kvmfd;
584 int vmfd;
585 int cpufd;
586} KVMScratchCPU;
587
588
589
590
591
592static bool kvm_riscv_create_scratch_vcpu(KVMScratchCPU *scratch)
593{
594 int kvmfd = -1, vmfd = -1, cpufd = -1;
595
596 kvmfd = qemu_open_old("/dev/kvm", O_RDWR);
597 if (kvmfd < 0) {
598 goto err;
599 }
600 do {
601 vmfd = ioctl(kvmfd, KVM_CREATE_VM, 0);
602 } while (vmfd == -1 && errno == EINTR);
603 if (vmfd < 0) {
604 goto err;
605 }
606 cpufd = ioctl(vmfd, KVM_CREATE_VCPU, 0);
607 if (cpufd < 0) {
608 goto err;
609 }
610
611 scratch->kvmfd = kvmfd;
612 scratch->vmfd = vmfd;
613 scratch->cpufd = cpufd;
614
615 return true;
616
617 err:
618 if (cpufd >= 0) {
619 close(cpufd);
620 }
621 if (vmfd >= 0) {
622 close(vmfd);
623 }
624 if (kvmfd >= 0) {
625 close(kvmfd);
626 }
627
628 return false;
629}
630
631static void kvm_riscv_destroy_scratch_vcpu(KVMScratchCPU *scratch)
632{
633 close(scratch->cpufd);
634 close(scratch->vmfd);
635 close(scratch->kvmfd);
636}
637
638static void kvm_riscv_init_machine_ids(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
639{
640 CPURISCVState *env = &cpu->env;
641 struct kvm_one_reg reg;
642 int ret;
643
644 reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
645 KVM_REG_RISCV_CONFIG_REG(mvendorid));
646 reg.addr = (uint64_t)&cpu->cfg.mvendorid;
647 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
648 if (ret != 0) {
649 error_report("Unable to retrieve mvendorid from host, error %d", ret);
650 }
651
652 reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
653 KVM_REG_RISCV_CONFIG_REG(marchid));
654 reg.addr = (uint64_t)&cpu->cfg.marchid;
655 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
656 if (ret != 0) {
657 error_report("Unable to retrieve marchid from host, error %d", ret);
658 }
659
660 reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
661 KVM_REG_RISCV_CONFIG_REG(mimpid));
662 reg.addr = (uint64_t)&cpu->cfg.mimpid;
663 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
664 if (ret != 0) {
665 error_report("Unable to retrieve mimpid from host, error %d", ret);
666 }
667}
668
669static void kvm_riscv_init_misa_ext_mask(RISCVCPU *cpu,
670 KVMScratchCPU *kvmcpu)
671{
672 CPURISCVState *env = &cpu->env;
673 struct kvm_one_reg reg;
674 int ret;
675
676 reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
677 KVM_REG_RISCV_CONFIG_REG(isa));
678 reg.addr = (uint64_t)&env->misa_ext_mask;
679 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
680
681 if (ret) {
682 error_report("Unable to fetch ISA register from KVM, "
683 "error %d", ret);
684 kvm_riscv_destroy_scratch_vcpu(kvmcpu);
685 exit(EXIT_FAILURE);
686 }
687
688 env->misa_ext = env->misa_ext_mask;
689}
690
691static void kvm_riscv_read_cbomz_blksize(RISCVCPU *cpu, KVMScratchCPU *kvmcpu,
692 KVMCPUConfig *cbomz_cfg)
693{
694 CPURISCVState *env = &cpu->env;
695 struct kvm_one_reg reg;
696 int ret;
697
698 reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
699 cbomz_cfg->kvm_reg_id);
700 reg.addr = (uint64_t)kvmconfig_get_cfg_addr(cpu, cbomz_cfg);
701 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
702 if (ret != 0) {
703 error_report("Unable to read KVM reg %s, error %d",
704 cbomz_cfg->name, ret);
705 exit(EXIT_FAILURE);
706 }
707}
708
709static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
710{
711 CPURISCVState *env = &cpu->env;
712 uint64_t val;
713 int i, ret;
714
715 for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) {
716 KVMCPUConfig *multi_ext_cfg = &kvm_multi_ext_cfgs[i];
717 struct kvm_one_reg reg;
718
719 reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_ISA_EXT,
720 multi_ext_cfg->kvm_reg_id);
721 reg.addr = (uint64_t)&val;
722 ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®);
723 if (ret != 0) {
724 if (errno == EINVAL) {
725
726 multi_ext_cfg->supported = false;
727 val = false;
728 } else {
729 error_report("Unable to read ISA_EXT KVM register %s, "
730 "error %d", multi_ext_cfg->name, ret);
731 kvm_riscv_destroy_scratch_vcpu(kvmcpu);
732 exit(EXIT_FAILURE);
733 }
734 } else {
735 multi_ext_cfg->supported = true;
736 }
737
738 kvm_cpu_cfg_set(cpu, multi_ext_cfg, val);
739 }
740
741 if (cpu->cfg.ext_icbom) {
742 kvm_riscv_read_cbomz_blksize(cpu, kvmcpu, &kvm_cbom_blocksize);
743 }
744
745 if (cpu->cfg.ext_icboz) {
746 kvm_riscv_read_cbomz_blksize(cpu, kvmcpu, &kvm_cboz_blocksize);
747 }
748}
749
750void kvm_riscv_init_user_properties(Object *cpu_obj)
751{
752 RISCVCPU *cpu = RISCV_CPU(cpu_obj);
753 KVMScratchCPU kvmcpu;
754
755 if (!kvm_riscv_create_scratch_vcpu(&kvmcpu)) {
756 return;
757 }
758
759 kvm_riscv_add_cpu_user_properties(cpu_obj);
760 kvm_riscv_init_machine_ids(cpu, &kvmcpu);
761 kvm_riscv_init_misa_ext_mask(cpu, &kvmcpu);
762 kvm_riscv_init_multiext_cfg(cpu, &kvmcpu);
763
764 kvm_riscv_destroy_scratch_vcpu(&kvmcpu);
765}
766
767const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
768 KVM_CAP_LAST_INFO
769};
770
771int kvm_arch_get_registers(CPUState *cs)
772{
773 int ret = 0;
774
775 ret = kvm_riscv_get_regs_core(cs);
776 if (ret) {
777 return ret;
778 }
779
780 ret = kvm_riscv_get_regs_csr(cs);
781 if (ret) {
782 return ret;
783 }
784
785 ret = kvm_riscv_get_regs_fp(cs);
786 if (ret) {
787 return ret;
788 }
789
790 return ret;
791}
792
793int kvm_arch_put_registers(CPUState *cs, int level)
794{
795 int ret = 0;
796
797 ret = kvm_riscv_put_regs_core(cs);
798 if (ret) {
799 return ret;
800 }
801
802 ret = kvm_riscv_put_regs_csr(cs);
803 if (ret) {
804 return ret;
805 }
806
807 ret = kvm_riscv_put_regs_fp(cs);
808 if (ret) {
809 return ret;
810 }
811
812 return ret;
813}
814
815int kvm_arch_release_virq_post(int virq)
816{
817 return 0;
818}
819
820int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
821 uint64_t address, uint32_t data, PCIDevice *dev)
822{
823 return 0;
824}
825
826int kvm_arch_destroy_vcpu(CPUState *cs)
827{
828 return 0;
829}
830
831unsigned long kvm_arch_vcpu_id(CPUState *cpu)
832{
833 return cpu->cpu_index;
834}
835
836static void kvm_riscv_vm_state_change(void *opaque, bool running,
837 RunState state)
838{
839 CPUState *cs = opaque;
840
841 if (running) {
842 kvm_riscv_put_regs_timer(cs);
843 } else {
844 kvm_riscv_get_regs_timer(cs);
845 }
846}
847
848void kvm_arch_init_irq_routing(KVMState *s)
849{
850}
851
852static int kvm_vcpu_set_machine_ids(RISCVCPU *cpu, CPUState *cs)
853{
854 CPURISCVState *env = &cpu->env;
855 target_ulong reg;
856 uint64_t id;
857 int ret;
858
859 id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
860 KVM_REG_RISCV_CONFIG_REG(mvendorid));
861
862
863
864
865
866 reg = cpu->cfg.mvendorid;
867 ret = kvm_set_one_reg(cs, id, ®);
868 if (ret != 0) {
869 return ret;
870 }
871
872 id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
873 KVM_REG_RISCV_CONFIG_REG(marchid));
874 ret = kvm_set_one_reg(cs, id, &cpu->cfg.marchid);
875 if (ret != 0) {
876 return ret;
877 }
878
879 id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
880 KVM_REG_RISCV_CONFIG_REG(mimpid));
881 ret = kvm_set_one_reg(cs, id, &cpu->cfg.mimpid);
882
883 return ret;
884}
885
886int kvm_arch_init_vcpu(CPUState *cs)
887{
888 int ret = 0;
889 RISCVCPU *cpu = RISCV_CPU(cs);
890
891 qemu_add_vm_change_state_handler(kvm_riscv_vm_state_change, cs);
892
893 if (!object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_CPU_HOST)) {
894 ret = kvm_vcpu_set_machine_ids(cpu, cs);
895 if (ret != 0) {
896 return ret;
897 }
898 }
899
900 kvm_riscv_update_cpu_misa_ext(cpu, cs);
901 kvm_riscv_update_cpu_cfg_isa_ext(cpu, cs);
902
903 return ret;
904}
905
906int kvm_arch_msi_data_to_gsi(uint32_t data)
907{
908 abort();
909}
910
911int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
912 int vector, PCIDevice *dev)
913{
914 return 0;
915}
916
917int kvm_arch_get_default_type(MachineState *ms)
918{
919 return 0;
920}
921
922int kvm_arch_init(MachineState *ms, KVMState *s)
923{
924 return 0;
925}
926
927int kvm_arch_irqchip_create(KVMState *s)
928{
929 return 0;
930}
931
932int kvm_arch_process_async_events(CPUState *cs)
933{
934 return 0;
935}
936
937void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
938{
939}
940
941MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
942{
943 return MEMTXATTRS_UNSPECIFIED;
944}
945
946bool kvm_arch_stop_on_emulation_error(CPUState *cs)
947{
948 return true;
949}
950
951static int kvm_riscv_handle_sbi(CPUState *cs, struct kvm_run *run)
952{
953 int ret = 0;
954 unsigned char ch;
955 switch (run->riscv_sbi.extension_id) {
956 case SBI_EXT_0_1_CONSOLE_PUTCHAR:
957 ch = run->riscv_sbi.args[0];
958 qemu_chr_fe_write(serial_hd(0)->be, &ch, sizeof(ch));
959 break;
960 case SBI_EXT_0_1_CONSOLE_GETCHAR:
961 ret = qemu_chr_fe_read_all(serial_hd(0)->be, &ch, sizeof(ch));
962 if (ret == sizeof(ch)) {
963 run->riscv_sbi.ret[0] = ch;
964 } else {
965 run->riscv_sbi.ret[0] = -1;
966 }
967 ret = 0;
968 break;
969 default:
970 qemu_log_mask(LOG_UNIMP,
971 "%s: un-handled SBI EXIT, specific reasons is %lu\n",
972 __func__, run->riscv_sbi.extension_id);
973 ret = -1;
974 break;
975 }
976 return ret;
977}
978
979int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
980{
981 int ret = 0;
982 switch (run->exit_reason) {
983 case KVM_EXIT_RISCV_SBI:
984 ret = kvm_riscv_handle_sbi(cs, run);
985 break;
986 default:
987 qemu_log_mask(LOG_UNIMP, "%s: un-handled exit reason %d\n",
988 __func__, run->exit_reason);
989 ret = -1;
990 break;
991 }
992 return ret;
993}
994
995void kvm_riscv_reset_vcpu(RISCVCPU *cpu)
996{
997 CPURISCVState *env = &cpu->env;
998
999 if (!kvm_enabled()) {
1000 return;
1001 }
1002 env->pc = cpu->env.kernel_addr;
1003 env->gpr[10] = kvm_arch_vcpu_id(CPU(cpu));
1004 env->gpr[11] = cpu->env.fdt_addr;
1005 env->satp = 0;
1006}
1007
1008void kvm_riscv_set_irq(RISCVCPU *cpu, int irq, int level)
1009{
1010 int ret;
1011 unsigned virq = level ? KVM_INTERRUPT_SET : KVM_INTERRUPT_UNSET;
1012
1013 if (irq != IRQ_S_EXT) {
1014 perror("kvm riscv set irq != IRQ_S_EXT\n");
1015 abort();
1016 }
1017
1018 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_INTERRUPT, &virq);
1019 if (ret < 0) {
1020 perror("Set irq failed");
1021 abort();
1022 }
1023}
1024
1025bool kvm_arch_cpu_check_are_resettable(void)
1026{
1027 return true;
1028}
1029
1030void kvm_arch_accel_class_init(ObjectClass *oc)
1031{
1032}
1033