1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include "qemu/osdep.h"
18#include <dirent.h>
19#include <sys/ioctl.h>
20#include <sys/vfs.h>
21
22#include <linux/kvm.h>
23
24#include "qapi/error.h"
25#include "qemu/error-report.h"
26#include "cpu.h"
27#include "cpu-models.h"
28#include "qemu/timer.h"
29#include "sysemu/hw_accel.h"
30#include "kvm_ppc.h"
31#include "sysemu/cpus.h"
32#include "sysemu/device_tree.h"
33#include "mmu-hash64.h"
34
35#include "hw/sysbus.h"
36#include "hw/ppc/spapr.h"
37#include "hw/ppc/spapr_cpu_core.h"
38#include "hw/hw.h"
39#include "hw/ppc/ppc.h"
40#include "migration/qemu-file-types.h"
41#include "sysemu/watchdog.h"
42#include "trace.h"
43#include "exec/gdbstub.h"
44#include "exec/memattrs.h"
45#include "exec/ram_addr.h"
46#include "sysemu/hostmem.h"
47#include "qemu/cutils.h"
48#include "qemu/main-loop.h"
49#include "qemu/mmap-alloc.h"
50#include "elf.h"
51#include "sysemu/kvm_int.h"
52
53#define PROC_DEVTREE_CPU "/proc/device-tree/cpus/"
54
55#define DEBUG_RETURN_GUEST 0
56#define DEBUG_RETURN_GDB 1
57
58const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
59 KVM_CAP_LAST_INFO
60};
61
62static int cap_interrupt_unset;
63static int cap_segstate;
64static int cap_booke_sregs;
65static int cap_ppc_smt;
66static int cap_ppc_smt_possible;
67static int cap_spapr_tce;
68static int cap_spapr_tce_64;
69static int cap_spapr_multitce;
70static int cap_spapr_vfio;
71static int cap_hior;
72static int cap_one_reg;
73static int cap_epr;
74static int cap_ppc_watchdog;
75static int cap_papr;
76static int cap_htab_fd;
77static int cap_fixup_hcalls;
78static int cap_htm;
79static int cap_mmu_radix;
80static int cap_mmu_hash_v3;
81static int cap_xive;
82static int cap_resize_hpt;
83static int cap_ppc_pvr_compat;
84static int cap_ppc_safe_cache;
85static int cap_ppc_safe_bounds_check;
86static int cap_ppc_safe_indirect_branch;
87static int cap_ppc_count_cache_flush_assist;
88static int cap_ppc_nested_kvm_hv;
89static int cap_large_decr;
90static int cap_fwnmi;
91static int cap_rpt_invalidate;
92
93static uint32_t debug_inst_opcode;
94
95
96
97
98
99
100
101static bool kvmppc_is_pr(KVMState *ks)
102{
103
104 return kvm_vm_check_extension(ks, KVM_CAP_PPC_GET_PVINFO) != 0;
105}
106
107static int kvm_ppc_register_host_cpu_type(void);
108static void kvmppc_get_cpu_characteristics(KVMState *s);
109static int kvmppc_get_dec_bits(void);
110
111int kvm_arch_init(MachineState *ms, KVMState *s)
112{
113 cap_interrupt_unset = kvm_check_extension(s, KVM_CAP_PPC_UNSET_IRQ);
114 cap_segstate = kvm_check_extension(s, KVM_CAP_PPC_SEGSTATE);
115 cap_booke_sregs = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_SREGS);
116 cap_ppc_smt_possible = kvm_vm_check_extension(s, KVM_CAP_PPC_SMT_POSSIBLE);
117 cap_spapr_tce = kvm_check_extension(s, KVM_CAP_SPAPR_TCE);
118 cap_spapr_tce_64 = kvm_check_extension(s, KVM_CAP_SPAPR_TCE_64);
119 cap_spapr_multitce = kvm_check_extension(s, KVM_CAP_SPAPR_MULTITCE);
120 cap_spapr_vfio = kvm_vm_check_extension(s, KVM_CAP_SPAPR_TCE_VFIO);
121 cap_one_reg = kvm_check_extension(s, KVM_CAP_ONE_REG);
122 cap_hior = kvm_check_extension(s, KVM_CAP_PPC_HIOR);
123 cap_epr = kvm_check_extension(s, KVM_CAP_PPC_EPR);
124 cap_ppc_watchdog = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_WATCHDOG);
125
126
127
128
129 cap_htab_fd = kvm_vm_check_extension(s, KVM_CAP_PPC_HTAB_FD);
130 cap_fixup_hcalls = kvm_check_extension(s, KVM_CAP_PPC_FIXUP_HCALL);
131 cap_ppc_smt = kvm_vm_check_extension(s, KVM_CAP_PPC_SMT);
132 cap_htm = kvm_vm_check_extension(s, KVM_CAP_PPC_HTM);
133 cap_mmu_radix = kvm_vm_check_extension(s, KVM_CAP_PPC_MMU_RADIX);
134 cap_mmu_hash_v3 = kvm_vm_check_extension(s, KVM_CAP_PPC_MMU_HASH_V3);
135 cap_xive = kvm_vm_check_extension(s, KVM_CAP_PPC_IRQ_XIVE);
136 cap_resize_hpt = kvm_vm_check_extension(s, KVM_CAP_SPAPR_RESIZE_HPT);
137 kvmppc_get_cpu_characteristics(s);
138 cap_ppc_nested_kvm_hv = kvm_vm_check_extension(s, KVM_CAP_PPC_NESTED_HV);
139 cap_large_decr = kvmppc_get_dec_bits();
140 cap_fwnmi = kvm_vm_check_extension(s, KVM_CAP_PPC_FWNMI);
141
142
143
144
145
146
147
148 cap_ppc_pvr_compat = false;
149
150 if (!kvm_check_extension(s, KVM_CAP_PPC_IRQ_LEVEL)) {
151 error_report("KVM: Host kernel doesn't have level irq capability");
152 exit(1);
153 }
154
155 cap_rpt_invalidate = kvm_vm_check_extension(s, KVM_CAP_PPC_RPT_INVALIDATE);
156 kvm_ppc_register_host_cpu_type();
157
158 return 0;
159}
160
161int kvm_arch_irqchip_create(KVMState *s)
162{
163 return 0;
164}
165
166static int kvm_arch_sync_sregs(PowerPCCPU *cpu)
167{
168 CPUPPCState *cenv = &cpu->env;
169 CPUState *cs = CPU(cpu);
170 struct kvm_sregs sregs;
171 int ret;
172
173 if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
174
175
176
177
178
179
180
181 return 0;
182 } else {
183 if (!cap_segstate) {
184 fprintf(stderr, "kvm error: missing PVR setting capability\n");
185 return -ENOSYS;
186 }
187 }
188
189 ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
190 if (ret) {
191 return ret;
192 }
193
194 sregs.pvr = cenv->spr[SPR_PVR];
195 return kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
196}
197
198
199static int kvm_booke206_tlb_init(PowerPCCPU *cpu)
200{
201 CPUPPCState *env = &cpu->env;
202 CPUState *cs = CPU(cpu);
203 struct kvm_book3e_206_tlb_params params = {};
204 struct kvm_config_tlb cfg = {};
205 unsigned int entries = 0;
206 int ret, i;
207
208 if (!kvm_enabled() ||
209 !kvm_check_extension(cs->kvm_state, KVM_CAP_SW_TLB)) {
210 return 0;
211 }
212
213 assert(ARRAY_SIZE(params.tlb_sizes) == BOOKE206_MAX_TLBN);
214
215 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
216 params.tlb_sizes[i] = booke206_tlb_size(env, i);
217 params.tlb_ways[i] = booke206_tlb_ways(env, i);
218 entries += params.tlb_sizes[i];
219 }
220
221 assert(entries == env->nb_tlb);
222 assert(sizeof(struct kvm_book3e_206_tlb_entry) == sizeof(ppcmas_tlb_t));
223
224 env->tlb_dirty = true;
225
226 cfg.array = (uintptr_t)env->tlb.tlbm;
227 cfg.array_len = sizeof(ppcmas_tlb_t) * entries;
228 cfg.params = (uintptr_t)¶ms;
229 cfg.mmu_type = KVM_MMU_FSL_BOOKE_NOHV;
230
231 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_SW_TLB, 0, (uintptr_t)&cfg);
232 if (ret < 0) {
233 fprintf(stderr, "%s: couldn't enable KVM_CAP_SW_TLB: %s\n",
234 __func__, strerror(-ret));
235 return ret;
236 }
237
238 env->kvm_sw_tlb = true;
239 return 0;
240}
241
242
243#if defined(TARGET_PPC64)
244static void kvm_get_smmu_info(struct kvm_ppc_smmu_info *info, Error **errp)
245{
246 int ret;
247
248 assert(kvm_state != NULL);
249
250 if (!kvm_check_extension(kvm_state, KVM_CAP_PPC_GET_SMMU_INFO)) {
251 error_setg(errp, "KVM doesn't expose the MMU features it supports");
252 error_append_hint(errp, "Consider switching to a newer KVM\n");
253 return;
254 }
255
256 ret = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_SMMU_INFO, info);
257 if (ret == 0) {
258 return;
259 }
260
261 error_setg_errno(errp, -ret,
262 "KVM failed to provide the MMU features it supports");
263}
264
265struct ppc_radix_page_info *kvm_get_radix_page_info(void)
266{
267 KVMState *s = KVM_STATE(current_accel());
268 struct ppc_radix_page_info *radix_page_info;
269 struct kvm_ppc_rmmu_info rmmu_info = { };
270 int i;
271
272 if (!kvm_check_extension(s, KVM_CAP_PPC_MMU_RADIX)) {
273 return NULL;
274 }
275 if (kvm_vm_ioctl(s, KVM_PPC_GET_RMMU_INFO, &rmmu_info)) {
276 return NULL;
277 }
278 radix_page_info = g_malloc0(sizeof(*radix_page_info));
279 radix_page_info->count = 0;
280 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
281 if (rmmu_info.ap_encodings[i]) {
282 radix_page_info->entries[i] = rmmu_info.ap_encodings[i];
283 radix_page_info->count++;
284 }
285 }
286 return radix_page_info;
287}
288
289target_ulong kvmppc_configure_v3_mmu(PowerPCCPU *cpu,
290 bool radix, bool gtse,
291 uint64_t proc_tbl)
292{
293 CPUState *cs = CPU(cpu);
294 int ret;
295 uint64_t flags = 0;
296 struct kvm_ppc_mmuv3_cfg cfg = {
297 .process_table = proc_tbl,
298 };
299
300 if (radix) {
301 flags |= KVM_PPC_MMUV3_RADIX;
302 }
303 if (gtse) {
304 flags |= KVM_PPC_MMUV3_GTSE;
305 }
306 cfg.flags = flags;
307 ret = kvm_vm_ioctl(cs->kvm_state, KVM_PPC_CONFIGURE_V3_MMU, &cfg);
308 switch (ret) {
309 case 0:
310 return H_SUCCESS;
311 case -EINVAL:
312 return H_PARAMETER;
313 case -ENODEV:
314 return H_NOT_AVAILABLE;
315 default:
316 return H_HARDWARE;
317 }
318}
319
320bool kvmppc_hpt_needs_host_contiguous_pages(void)
321{
322 static struct kvm_ppc_smmu_info smmu_info;
323
324 if (!kvm_enabled()) {
325 return false;
326 }
327
328 kvm_get_smmu_info(&smmu_info, &error_fatal);
329 return !!(smmu_info.flags & KVM_PPC_PAGE_SIZES_REAL);
330}
331
332void kvm_check_mmu(PowerPCCPU *cpu, Error **errp)
333{
334 struct kvm_ppc_smmu_info smmu_info;
335 int iq, ik, jq, jk;
336 Error *local_err = NULL;
337
338
339 if (!cpu->hash64_opts || !kvm_enabled()) {
340 return;
341 }
342
343 kvm_get_smmu_info(&smmu_info, &local_err);
344 if (local_err) {
345 error_propagate(errp, local_err);
346 return;
347 }
348
349 if (ppc_hash64_has(cpu, PPC_HASH64_1TSEG)
350 && !(smmu_info.flags & KVM_PPC_1T_SEGMENTS)) {
351 error_setg(errp,
352 "KVM does not support 1TiB segments which guest expects");
353 return;
354 }
355
356 if (smmu_info.slb_size < cpu->hash64_opts->slb_size) {
357 error_setg(errp, "KVM only supports %u SLB entries, but guest needs %u",
358 smmu_info.slb_size, cpu->hash64_opts->slb_size);
359 return;
360 }
361
362
363
364
365
366 for (iq = 0; iq < ARRAY_SIZE(cpu->hash64_opts->sps); iq++) {
367 PPCHash64SegmentPageSizes *qsps = &cpu->hash64_opts->sps[iq];
368 struct kvm_ppc_one_seg_page_size *ksps;
369
370 for (ik = 0; ik < ARRAY_SIZE(smmu_info.sps); ik++) {
371 if (qsps->page_shift == smmu_info.sps[ik].page_shift) {
372 break;
373 }
374 }
375 if (ik >= ARRAY_SIZE(smmu_info.sps)) {
376 error_setg(errp, "KVM doesn't support for base page shift %u",
377 qsps->page_shift);
378 return;
379 }
380
381 ksps = &smmu_info.sps[ik];
382 if (ksps->slb_enc != qsps->slb_enc) {
383 error_setg(errp,
384"KVM uses SLB encoding 0x%x for page shift %u, but guest expects 0x%x",
385 ksps->slb_enc, ksps->page_shift, qsps->slb_enc);
386 return;
387 }
388
389 for (jq = 0; jq < ARRAY_SIZE(qsps->enc); jq++) {
390 for (jk = 0; jk < ARRAY_SIZE(ksps->enc); jk++) {
391 if (qsps->enc[jq].page_shift == ksps->enc[jk].page_shift) {
392 break;
393 }
394 }
395
396 if (jk >= ARRAY_SIZE(ksps->enc)) {
397 error_setg(errp, "KVM doesn't support page shift %u/%u",
398 qsps->enc[jq].page_shift, qsps->page_shift);
399 return;
400 }
401 if (qsps->enc[jq].pte_enc != ksps->enc[jk].pte_enc) {
402 error_setg(errp,
403"KVM uses PTE encoding 0x%x for page shift %u/%u, but guest expects 0x%x",
404 ksps->enc[jk].pte_enc, qsps->enc[jq].page_shift,
405 qsps->page_shift, qsps->enc[jq].pte_enc);
406 return;
407 }
408 }
409 }
410
411 if (ppc_hash64_has(cpu, PPC_HASH64_CI_LARGEPAGE)) {
412
413
414
415
416
417
418
419
420 if (qemu_real_host_page_size() < 0x10000) {
421 error_setg(errp,
422 "KVM can't supply 64kiB CI pages, which guest expects");
423 }
424 }
425}
426#endif
427
428unsigned long kvm_arch_vcpu_id(CPUState *cpu)
429{
430 return POWERPC_CPU(cpu)->vcpu_id;
431}
432
433
434
435
436
437#define MAX_HW_BKPTS 4
438
439static struct HWBreakpoint {
440 target_ulong addr;
441 int type;
442} hw_debug_points[MAX_HW_BKPTS];
443
444static CPUWatchpoint hw_watchpoint;
445
446
447static int max_hw_breakpoint;
448static int max_hw_watchpoint;
449static int nb_hw_breakpoint;
450static int nb_hw_watchpoint;
451
452static void kvmppc_hw_debug_points_init(CPUPPCState *cenv)
453{
454 if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
455 max_hw_breakpoint = 2;
456 max_hw_watchpoint = 2;
457 }
458
459 if ((max_hw_breakpoint + max_hw_watchpoint) > MAX_HW_BKPTS) {
460 fprintf(stderr, "Error initializing h/w breakpoints\n");
461 return;
462 }
463}
464
465int kvm_arch_init_vcpu(CPUState *cs)
466{
467 PowerPCCPU *cpu = POWERPC_CPU(cs);
468 CPUPPCState *cenv = &cpu->env;
469 int ret;
470
471
472 ret = kvm_arch_sync_sregs(cpu);
473 if (ret) {
474 if (ret == -EINVAL) {
475 error_report("Register sync failed... If you're using kvm-hv.ko,"
476 " only \"-cpu host\" is possible");
477 }
478 return ret;
479 }
480
481 switch (cenv->mmu_model) {
482 case POWERPC_MMU_BOOKE206:
483
484 ret = kvm_booke206_tlb_init(cpu);
485 break;
486 case POWERPC_MMU_2_07:
487 if (!cap_htm && !kvmppc_is_pr(cs->kvm_state)) {
488
489
490
491
492
493
494 if (qemu_getauxval(AT_HWCAP2) & PPC_FEATURE2_HAS_HTM) {
495 cap_htm = true;
496 }
497 }
498 break;
499 default:
500 break;
501 }
502
503 kvm_get_one_reg(cs, KVM_REG_PPC_DEBUG_INST, &debug_inst_opcode);
504 kvmppc_hw_debug_points_init(cenv);
505
506 return ret;
507}
508
509int kvm_arch_destroy_vcpu(CPUState *cs)
510{
511 return 0;
512}
513
514static void kvm_sw_tlb_put(PowerPCCPU *cpu)
515{
516 CPUPPCState *env = &cpu->env;
517 CPUState *cs = CPU(cpu);
518 struct kvm_dirty_tlb dirty_tlb;
519 unsigned char *bitmap;
520 int ret;
521
522 if (!env->kvm_sw_tlb) {
523 return;
524 }
525
526 bitmap = g_malloc((env->nb_tlb + 7) / 8);
527 memset(bitmap, 0xFF, (env->nb_tlb + 7) / 8);
528
529 dirty_tlb.bitmap = (uintptr_t)bitmap;
530 dirty_tlb.num_dirty = env->nb_tlb;
531
532 ret = kvm_vcpu_ioctl(cs, KVM_DIRTY_TLB, &dirty_tlb);
533 if (ret) {
534 fprintf(stderr, "%s: KVM_DIRTY_TLB: %s\n",
535 __func__, strerror(-ret));
536 }
537
538 g_free(bitmap);
539}
540
541static void kvm_get_one_spr(CPUState *cs, uint64_t id, int spr)
542{
543 PowerPCCPU *cpu = POWERPC_CPU(cs);
544 CPUPPCState *env = &cpu->env;
545
546 union {
547 uint32_t u32;
548 uint64_t u64;
549 } val = { };
550 struct kvm_one_reg reg = {
551 .id = id,
552 .addr = (uintptr_t) &val,
553 };
554 int ret;
555
556 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
557 if (ret != 0) {
558 trace_kvm_failed_spr_get(spr, strerror(errno));
559 } else {
560 switch (id & KVM_REG_SIZE_MASK) {
561 case KVM_REG_SIZE_U32:
562 env->spr[spr] = val.u32;
563 break;
564
565 case KVM_REG_SIZE_U64:
566 env->spr[spr] = val.u64;
567 break;
568
569 default:
570
571 abort();
572 }
573 }
574}
575
576static void kvm_put_one_spr(CPUState *cs, uint64_t id, int spr)
577{
578 PowerPCCPU *cpu = POWERPC_CPU(cs);
579 CPUPPCState *env = &cpu->env;
580 union {
581 uint32_t u32;
582 uint64_t u64;
583 } val;
584 struct kvm_one_reg reg = {
585 .id = id,
586 .addr = (uintptr_t) &val,
587 };
588 int ret;
589
590 switch (id & KVM_REG_SIZE_MASK) {
591 case KVM_REG_SIZE_U32:
592 val.u32 = env->spr[spr];
593 break;
594
595 case KVM_REG_SIZE_U64:
596 val.u64 = env->spr[spr];
597 break;
598
599 default:
600
601 abort();
602 }
603
604 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
605 if (ret != 0) {
606 trace_kvm_failed_spr_set(spr, strerror(errno));
607 }
608}
609
610static int kvm_put_fp(CPUState *cs)
611{
612 PowerPCCPU *cpu = POWERPC_CPU(cs);
613 CPUPPCState *env = &cpu->env;
614 struct kvm_one_reg reg;
615 int i;
616 int ret;
617
618 if (env->insns_flags & PPC_FLOAT) {
619 uint64_t fpscr = env->fpscr;
620 bool vsx = !!(env->insns_flags2 & PPC2_VSX);
621
622 reg.id = KVM_REG_PPC_FPSCR;
623 reg.addr = (uintptr_t)&fpscr;
624 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
625 if (ret < 0) {
626 trace_kvm_failed_fpscr_set(strerror(errno));
627 return ret;
628 }
629
630 for (i = 0; i < 32; i++) {
631 uint64_t vsr[2];
632 uint64_t *fpr = cpu_fpr_ptr(&cpu->env, i);
633 uint64_t *vsrl = cpu_vsrl_ptr(&cpu->env, i);
634
635#if HOST_BIG_ENDIAN
636 vsr[0] = float64_val(*fpr);
637 vsr[1] = *vsrl;
638#else
639 vsr[0] = *vsrl;
640 vsr[1] = float64_val(*fpr);
641#endif
642 reg.addr = (uintptr_t) &vsr;
643 reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
644
645 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
646 if (ret < 0) {
647 trace_kvm_failed_fp_set(vsx ? "VSR" : "FPR", i,
648 strerror(errno));
649 return ret;
650 }
651 }
652 }
653
654 if (env->insns_flags & PPC_ALTIVEC) {
655 reg.id = KVM_REG_PPC_VSCR;
656 reg.addr = (uintptr_t)&env->vscr;
657 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
658 if (ret < 0) {
659 trace_kvm_failed_vscr_set(strerror(errno));
660 return ret;
661 }
662
663 for (i = 0; i < 32; i++) {
664 reg.id = KVM_REG_PPC_VR(i);
665 reg.addr = (uintptr_t)cpu_avr_ptr(env, i);
666 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
667 if (ret < 0) {
668 trace_kvm_failed_vr_set(i, strerror(errno));
669 return ret;
670 }
671 }
672 }
673
674 return 0;
675}
676
677static int kvm_get_fp(CPUState *cs)
678{
679 PowerPCCPU *cpu = POWERPC_CPU(cs);
680 CPUPPCState *env = &cpu->env;
681 struct kvm_one_reg reg;
682 int i;
683 int ret;
684
685 if (env->insns_flags & PPC_FLOAT) {
686 uint64_t fpscr;
687 bool vsx = !!(env->insns_flags2 & PPC2_VSX);
688
689 reg.id = KVM_REG_PPC_FPSCR;
690 reg.addr = (uintptr_t)&fpscr;
691 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
692 if (ret < 0) {
693 trace_kvm_failed_fpscr_get(strerror(errno));
694 return ret;
695 } else {
696 env->fpscr = fpscr;
697 }
698
699 for (i = 0; i < 32; i++) {
700 uint64_t vsr[2];
701 uint64_t *fpr = cpu_fpr_ptr(&cpu->env, i);
702 uint64_t *vsrl = cpu_vsrl_ptr(&cpu->env, i);
703
704 reg.addr = (uintptr_t) &vsr;
705 reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
706
707 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
708 if (ret < 0) {
709 trace_kvm_failed_fp_get(vsx ? "VSR" : "FPR", i,
710 strerror(errno));
711 return ret;
712 } else {
713#if HOST_BIG_ENDIAN
714 *fpr = vsr[0];
715 if (vsx) {
716 *vsrl = vsr[1];
717 }
718#else
719 *fpr = vsr[1];
720 if (vsx) {
721 *vsrl = vsr[0];
722 }
723#endif
724 }
725 }
726 }
727
728 if (env->insns_flags & PPC_ALTIVEC) {
729 reg.id = KVM_REG_PPC_VSCR;
730 reg.addr = (uintptr_t)&env->vscr;
731 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
732 if (ret < 0) {
733 trace_kvm_failed_vscr_get(strerror(errno));
734 return ret;
735 }
736
737 for (i = 0; i < 32; i++) {
738 reg.id = KVM_REG_PPC_VR(i);
739 reg.addr = (uintptr_t)cpu_avr_ptr(env, i);
740 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
741 if (ret < 0) {
742 trace_kvm_failed_vr_get(i, strerror(errno));
743 return ret;
744 }
745 }
746 }
747
748 return 0;
749}
750
751#if defined(TARGET_PPC64)
752static int kvm_get_vpa(CPUState *cs)
753{
754 PowerPCCPU *cpu = POWERPC_CPU(cs);
755 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
756 struct kvm_one_reg reg;
757 int ret;
758
759 reg.id = KVM_REG_PPC_VPA_ADDR;
760 reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
761 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
762 if (ret < 0) {
763 trace_kvm_failed_vpa_addr_get(strerror(errno));
764 return ret;
765 }
766
767 assert((uintptr_t)&spapr_cpu->slb_shadow_size
768 == ((uintptr_t)&spapr_cpu->slb_shadow_addr + 8));
769 reg.id = KVM_REG_PPC_VPA_SLB;
770 reg.addr = (uintptr_t)&spapr_cpu->slb_shadow_addr;
771 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
772 if (ret < 0) {
773 trace_kvm_failed_slb_get(strerror(errno));
774 return ret;
775 }
776
777 assert((uintptr_t)&spapr_cpu->dtl_size
778 == ((uintptr_t)&spapr_cpu->dtl_addr + 8));
779 reg.id = KVM_REG_PPC_VPA_DTL;
780 reg.addr = (uintptr_t)&spapr_cpu->dtl_addr;
781 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
782 if (ret < 0) {
783 trace_kvm_failed_dtl_get(strerror(errno));
784 return ret;
785 }
786
787 return 0;
788}
789
790static int kvm_put_vpa(CPUState *cs)
791{
792 PowerPCCPU *cpu = POWERPC_CPU(cs);
793 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
794 struct kvm_one_reg reg;
795 int ret;
796
797
798
799
800
801
802
803 assert(spapr_cpu->vpa_addr
804 || !(spapr_cpu->slb_shadow_addr || spapr_cpu->dtl_addr));
805
806 if (spapr_cpu->vpa_addr) {
807 reg.id = KVM_REG_PPC_VPA_ADDR;
808 reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
809 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
810 if (ret < 0) {
811 trace_kvm_failed_vpa_addr_set(strerror(errno));
812 return ret;
813 }
814 }
815
816 assert((uintptr_t)&spapr_cpu->slb_shadow_size
817 == ((uintptr_t)&spapr_cpu->slb_shadow_addr + 8));
818 reg.id = KVM_REG_PPC_VPA_SLB;
819 reg.addr = (uintptr_t)&spapr_cpu->slb_shadow_addr;
820 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
821 if (ret < 0) {
822 trace_kvm_failed_slb_set(strerror(errno));
823 return ret;
824 }
825
826 assert((uintptr_t)&spapr_cpu->dtl_size
827 == ((uintptr_t)&spapr_cpu->dtl_addr + 8));
828 reg.id = KVM_REG_PPC_VPA_DTL;
829 reg.addr = (uintptr_t)&spapr_cpu->dtl_addr;
830 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
831 if (ret < 0) {
832 trace_kvm_failed_dtl_set(strerror(errno));
833 return ret;
834 }
835
836 if (!spapr_cpu->vpa_addr) {
837 reg.id = KVM_REG_PPC_VPA_ADDR;
838 reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
839 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
840 if (ret < 0) {
841 trace_kvm_failed_null_vpa_addr_set(strerror(errno));
842 return ret;
843 }
844 }
845
846 return 0;
847}
848#endif
849
850int kvmppc_put_books_sregs(PowerPCCPU *cpu)
851{
852 CPUPPCState *env = &cpu->env;
853 struct kvm_sregs sregs = { };
854 int i;
855
856 sregs.pvr = env->spr[SPR_PVR];
857
858 if (cpu->vhyp) {
859 PPCVirtualHypervisorClass *vhc =
860 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
861 sregs.u.s.sdr1 = vhc->encode_hpt_for_kvm_pr(cpu->vhyp);
862 } else {
863 sregs.u.s.sdr1 = env->spr[SPR_SDR1];
864 }
865
866
867#ifdef TARGET_PPC64
868 for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
869 sregs.u.s.ppc64.slb[i].slbe = env->slb[i].esid;
870 if (env->slb[i].esid & SLB_ESID_V) {
871 sregs.u.s.ppc64.slb[i].slbe |= i;
872 }
873 sregs.u.s.ppc64.slb[i].slbv = env->slb[i].vsid;
874 }
875#endif
876
877
878 for (i = 0; i < 16; i++) {
879 sregs.u.s.ppc32.sr[i] = env->sr[i];
880 }
881
882
883 for (i = 0; i < 8; i++) {
884
885 sregs.u.s.ppc32.dbat[i] = ((uint64_t)env->DBAT[0][i] << 32)
886 | env->DBAT[1][i];
887 sregs.u.s.ppc32.ibat[i] = ((uint64_t)env->IBAT[0][i] << 32)
888 | env->IBAT[1][i];
889 }
890
891 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
892}
893
894int kvm_arch_put_registers(CPUState *cs, int level)
895{
896 PowerPCCPU *cpu = POWERPC_CPU(cs);
897 CPUPPCState *env = &cpu->env;
898 struct kvm_regs regs;
899 int ret;
900 int i;
901
902 ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s);
903 if (ret < 0) {
904 return ret;
905 }
906
907 regs.ctr = env->ctr;
908 regs.lr = env->lr;
909 regs.xer = cpu_read_xer(env);
910 regs.msr = env->msr;
911 regs.pc = env->nip;
912
913 regs.srr0 = env->spr[SPR_SRR0];
914 regs.srr1 = env->spr[SPR_SRR1];
915
916 regs.sprg0 = env->spr[SPR_SPRG0];
917 regs.sprg1 = env->spr[SPR_SPRG1];
918 regs.sprg2 = env->spr[SPR_SPRG2];
919 regs.sprg3 = env->spr[SPR_SPRG3];
920 regs.sprg4 = env->spr[SPR_SPRG4];
921 regs.sprg5 = env->spr[SPR_SPRG5];
922 regs.sprg6 = env->spr[SPR_SPRG6];
923 regs.sprg7 = env->spr[SPR_SPRG7];
924
925 regs.pid = env->spr[SPR_BOOKE_PID];
926
927 for (i = 0; i < 32; i++) {
928 regs.gpr[i] = env->gpr[i];
929 }
930
931 regs.cr = 0;
932 for (i = 0; i < 8; i++) {
933 regs.cr |= (env->crf[i] & 15) << (4 * (7 - i));
934 }
935
936 ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s);
937 if (ret < 0) {
938 return ret;
939 }
940
941 kvm_put_fp(cs);
942
943 if (env->tlb_dirty) {
944 kvm_sw_tlb_put(cpu);
945 env->tlb_dirty = false;
946 }
947
948 if (cap_segstate && (level >= KVM_PUT_RESET_STATE)) {
949 ret = kvmppc_put_books_sregs(cpu);
950 if (ret < 0) {
951 return ret;
952 }
953 }
954
955 if (cap_hior && (level >= KVM_PUT_RESET_STATE)) {
956 kvm_put_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
957 }
958
959 if (cap_one_reg) {
960 int i;
961
962
963
964
965
966
967
968 for (i = 0; i < 1024; i++) {
969 uint64_t id = env->spr_cb[i].one_reg_id;
970
971 if (id != 0) {
972 kvm_put_one_spr(cs, id, i);
973 }
974 }
975
976#ifdef TARGET_PPC64
977 if (FIELD_EX64(env->msr, MSR, TS)) {
978 for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
979 kvm_set_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
980 }
981 for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
982 kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
983 }
984 kvm_set_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
985 kvm_set_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
986 kvm_set_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
987 kvm_set_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
988 kvm_set_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
989 kvm_set_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
990 kvm_set_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
991 kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
992 kvm_set_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
993 kvm_set_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
994 }
995
996 if (cap_papr) {
997 if (kvm_put_vpa(cs) < 0) {
998 trace_kvm_failed_put_vpa();
999 }
1000 }
1001
1002 kvm_set_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
1003
1004 if (level > KVM_PUT_RUNTIME_STATE) {
1005 kvm_put_one_spr(cs, KVM_REG_PPC_DPDES, SPR_DPDES);
1006 }
1007#endif
1008 }
1009
1010 return ret;
1011}
1012
1013static void kvm_sync_excp(CPUPPCState *env, int vector, int ivor)
1014{
1015 env->excp_vectors[vector] = env->spr[ivor] + env->spr[SPR_BOOKE_IVPR];
1016}
1017
1018static int kvmppc_get_booke_sregs(PowerPCCPU *cpu)
1019{
1020 CPUPPCState *env = &cpu->env;
1021 struct kvm_sregs sregs;
1022 int ret;
1023
1024 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
1025 if (ret < 0) {
1026 return ret;
1027 }
1028
1029 if (sregs.u.e.features & KVM_SREGS_E_BASE) {
1030 env->spr[SPR_BOOKE_CSRR0] = sregs.u.e.csrr0;
1031 env->spr[SPR_BOOKE_CSRR1] = sregs.u.e.csrr1;
1032 env->spr[SPR_BOOKE_ESR] = sregs.u.e.esr;
1033 env->spr[SPR_BOOKE_DEAR] = sregs.u.e.dear;
1034 env->spr[SPR_BOOKE_MCSR] = sregs.u.e.mcsr;
1035 env->spr[SPR_BOOKE_TSR] = sregs.u.e.tsr;
1036 env->spr[SPR_BOOKE_TCR] = sregs.u.e.tcr;
1037 env->spr[SPR_DECR] = sregs.u.e.dec;
1038 env->spr[SPR_TBL] = sregs.u.e.tb & 0xffffffff;
1039 env->spr[SPR_TBU] = sregs.u.e.tb >> 32;
1040 env->spr[SPR_VRSAVE] = sregs.u.e.vrsave;
1041 }
1042
1043 if (sregs.u.e.features & KVM_SREGS_E_ARCH206) {
1044 env->spr[SPR_BOOKE_PIR] = sregs.u.e.pir;
1045 env->spr[SPR_BOOKE_MCSRR0] = sregs.u.e.mcsrr0;
1046 env->spr[SPR_BOOKE_MCSRR1] = sregs.u.e.mcsrr1;
1047 env->spr[SPR_BOOKE_DECAR] = sregs.u.e.decar;
1048 env->spr[SPR_BOOKE_IVPR] = sregs.u.e.ivpr;
1049 }
1050
1051 if (sregs.u.e.features & KVM_SREGS_E_64) {
1052 env->spr[SPR_BOOKE_EPCR] = sregs.u.e.epcr;
1053 }
1054
1055 if (sregs.u.e.features & KVM_SREGS_E_SPRG8) {
1056 env->spr[SPR_BOOKE_SPRG8] = sregs.u.e.sprg8;
1057 }
1058
1059 if (sregs.u.e.features & KVM_SREGS_E_IVOR) {
1060 env->spr[SPR_BOOKE_IVOR0] = sregs.u.e.ivor_low[0];
1061 kvm_sync_excp(env, POWERPC_EXCP_CRITICAL, SPR_BOOKE_IVOR0);
1062 env->spr[SPR_BOOKE_IVOR1] = sregs.u.e.ivor_low[1];
1063 kvm_sync_excp(env, POWERPC_EXCP_MCHECK, SPR_BOOKE_IVOR1);
1064 env->spr[SPR_BOOKE_IVOR2] = sregs.u.e.ivor_low[2];
1065 kvm_sync_excp(env, POWERPC_EXCP_DSI, SPR_BOOKE_IVOR2);
1066 env->spr[SPR_BOOKE_IVOR3] = sregs.u.e.ivor_low[3];
1067 kvm_sync_excp(env, POWERPC_EXCP_ISI, SPR_BOOKE_IVOR3);
1068 env->spr[SPR_BOOKE_IVOR4] = sregs.u.e.ivor_low[4];
1069 kvm_sync_excp(env, POWERPC_EXCP_EXTERNAL, SPR_BOOKE_IVOR4);
1070 env->spr[SPR_BOOKE_IVOR5] = sregs.u.e.ivor_low[5];
1071 kvm_sync_excp(env, POWERPC_EXCP_ALIGN, SPR_BOOKE_IVOR5);
1072 env->spr[SPR_BOOKE_IVOR6] = sregs.u.e.ivor_low[6];
1073 kvm_sync_excp(env, POWERPC_EXCP_PROGRAM, SPR_BOOKE_IVOR6);
1074 env->spr[SPR_BOOKE_IVOR7] = sregs.u.e.ivor_low[7];
1075 kvm_sync_excp(env, POWERPC_EXCP_FPU, SPR_BOOKE_IVOR7);
1076 env->spr[SPR_BOOKE_IVOR8] = sregs.u.e.ivor_low[8];
1077 kvm_sync_excp(env, POWERPC_EXCP_SYSCALL, SPR_BOOKE_IVOR8);
1078 env->spr[SPR_BOOKE_IVOR9] = sregs.u.e.ivor_low[9];
1079 kvm_sync_excp(env, POWERPC_EXCP_APU, SPR_BOOKE_IVOR9);
1080 env->spr[SPR_BOOKE_IVOR10] = sregs.u.e.ivor_low[10];
1081 kvm_sync_excp(env, POWERPC_EXCP_DECR, SPR_BOOKE_IVOR10);
1082 env->spr[SPR_BOOKE_IVOR11] = sregs.u.e.ivor_low[11];
1083 kvm_sync_excp(env, POWERPC_EXCP_FIT, SPR_BOOKE_IVOR11);
1084 env->spr[SPR_BOOKE_IVOR12] = sregs.u.e.ivor_low[12];
1085 kvm_sync_excp(env, POWERPC_EXCP_WDT, SPR_BOOKE_IVOR12);
1086 env->spr[SPR_BOOKE_IVOR13] = sregs.u.e.ivor_low[13];
1087 kvm_sync_excp(env, POWERPC_EXCP_DTLB, SPR_BOOKE_IVOR13);
1088 env->spr[SPR_BOOKE_IVOR14] = sregs.u.e.ivor_low[14];
1089 kvm_sync_excp(env, POWERPC_EXCP_ITLB, SPR_BOOKE_IVOR14);
1090 env->spr[SPR_BOOKE_IVOR15] = sregs.u.e.ivor_low[15];
1091 kvm_sync_excp(env, POWERPC_EXCP_DEBUG, SPR_BOOKE_IVOR15);
1092
1093 if (sregs.u.e.features & KVM_SREGS_E_SPE) {
1094 env->spr[SPR_BOOKE_IVOR32] = sregs.u.e.ivor_high[0];
1095 kvm_sync_excp(env, POWERPC_EXCP_SPEU, SPR_BOOKE_IVOR32);
1096 env->spr[SPR_BOOKE_IVOR33] = sregs.u.e.ivor_high[1];
1097 kvm_sync_excp(env, POWERPC_EXCP_EFPDI, SPR_BOOKE_IVOR33);
1098 env->spr[SPR_BOOKE_IVOR34] = sregs.u.e.ivor_high[2];
1099 kvm_sync_excp(env, POWERPC_EXCP_EFPRI, SPR_BOOKE_IVOR34);
1100 }
1101
1102 if (sregs.u.e.features & KVM_SREGS_E_PM) {
1103 env->spr[SPR_BOOKE_IVOR35] = sregs.u.e.ivor_high[3];
1104 kvm_sync_excp(env, POWERPC_EXCP_EPERFM, SPR_BOOKE_IVOR35);
1105 }
1106
1107 if (sregs.u.e.features & KVM_SREGS_E_PC) {
1108 env->spr[SPR_BOOKE_IVOR36] = sregs.u.e.ivor_high[4];
1109 kvm_sync_excp(env, POWERPC_EXCP_DOORI, SPR_BOOKE_IVOR36);
1110 env->spr[SPR_BOOKE_IVOR37] = sregs.u.e.ivor_high[5];
1111 kvm_sync_excp(env, POWERPC_EXCP_DOORCI, SPR_BOOKE_IVOR37);
1112 }
1113 }
1114
1115 if (sregs.u.e.features & KVM_SREGS_E_ARCH206_MMU) {
1116 env->spr[SPR_BOOKE_MAS0] = sregs.u.e.mas0;
1117 env->spr[SPR_BOOKE_MAS1] = sregs.u.e.mas1;
1118 env->spr[SPR_BOOKE_MAS2] = sregs.u.e.mas2;
1119 env->spr[SPR_BOOKE_MAS3] = sregs.u.e.mas7_3 & 0xffffffff;
1120 env->spr[SPR_BOOKE_MAS4] = sregs.u.e.mas4;
1121 env->spr[SPR_BOOKE_MAS6] = sregs.u.e.mas6;
1122 env->spr[SPR_BOOKE_MAS7] = sregs.u.e.mas7_3 >> 32;
1123 env->spr[SPR_MMUCFG] = sregs.u.e.mmucfg;
1124 env->spr[SPR_BOOKE_TLB0CFG] = sregs.u.e.tlbcfg[0];
1125 env->spr[SPR_BOOKE_TLB1CFG] = sregs.u.e.tlbcfg[1];
1126 }
1127
1128 if (sregs.u.e.features & KVM_SREGS_EXP) {
1129 env->spr[SPR_BOOKE_EPR] = sregs.u.e.epr;
1130 }
1131
1132 if (sregs.u.e.features & KVM_SREGS_E_PD) {
1133 env->spr[SPR_BOOKE_EPLC] = sregs.u.e.eplc;
1134 env->spr[SPR_BOOKE_EPSC] = sregs.u.e.epsc;
1135 }
1136
1137 if (sregs.u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
1138 env->spr[SPR_E500_SVR] = sregs.u.e.impl.fsl.svr;
1139 env->spr[SPR_Exxx_MCAR] = sregs.u.e.impl.fsl.mcar;
1140 env->spr[SPR_HID0] = sregs.u.e.impl.fsl.hid0;
1141
1142 if (sregs.u.e.impl.fsl.features & KVM_SREGS_E_FSL_PIDn) {
1143 env->spr[SPR_BOOKE_PID1] = sregs.u.e.impl.fsl.pid1;
1144 env->spr[SPR_BOOKE_PID2] = sregs.u.e.impl.fsl.pid2;
1145 }
1146 }
1147
1148 return 0;
1149}
1150
1151static int kvmppc_get_books_sregs(PowerPCCPU *cpu)
1152{
1153 CPUPPCState *env = &cpu->env;
1154 struct kvm_sregs sregs;
1155 int ret;
1156 int i;
1157
1158 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
1159 if (ret < 0) {
1160 return ret;
1161 }
1162
1163 if (!cpu->vhyp) {
1164 ppc_store_sdr1(env, sregs.u.s.sdr1);
1165 }
1166
1167
1168#ifdef TARGET_PPC64
1169
1170
1171
1172
1173
1174
1175 memset(env->slb, 0, sizeof(env->slb));
1176 for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
1177 target_ulong rb = sregs.u.s.ppc64.slb[i].slbe;
1178 target_ulong rs = sregs.u.s.ppc64.slb[i].slbv;
1179
1180
1181
1182 if (rb & SLB_ESID_V) {
1183 ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs);
1184 }
1185 }
1186#endif
1187
1188
1189 for (i = 0; i < 16; i++) {
1190 env->sr[i] = sregs.u.s.ppc32.sr[i];
1191 }
1192
1193
1194 for (i = 0; i < 8; i++) {
1195 env->DBAT[0][i] = sregs.u.s.ppc32.dbat[i] & 0xffffffff;
1196 env->DBAT[1][i] = sregs.u.s.ppc32.dbat[i] >> 32;
1197 env->IBAT[0][i] = sregs.u.s.ppc32.ibat[i] & 0xffffffff;
1198 env->IBAT[1][i] = sregs.u.s.ppc32.ibat[i] >> 32;
1199 }
1200
1201 return 0;
1202}
1203
1204int kvm_arch_get_registers(CPUState *cs)
1205{
1206 PowerPCCPU *cpu = POWERPC_CPU(cs);
1207 CPUPPCState *env = &cpu->env;
1208 struct kvm_regs regs;
1209 uint32_t cr;
1210 int i, ret;
1211
1212 ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s);
1213 if (ret < 0) {
1214 return ret;
1215 }
1216
1217 cr = regs.cr;
1218 for (i = 7; i >= 0; i--) {
1219 env->crf[i] = cr & 15;
1220 cr >>= 4;
1221 }
1222
1223 env->ctr = regs.ctr;
1224 env->lr = regs.lr;
1225 cpu_write_xer(env, regs.xer);
1226 env->msr = regs.msr;
1227 env->nip = regs.pc;
1228
1229 env->spr[SPR_SRR0] = regs.srr0;
1230 env->spr[SPR_SRR1] = regs.srr1;
1231
1232 env->spr[SPR_SPRG0] = regs.sprg0;
1233 env->spr[SPR_SPRG1] = regs.sprg1;
1234 env->spr[SPR_SPRG2] = regs.sprg2;
1235 env->spr[SPR_SPRG3] = regs.sprg3;
1236 env->spr[SPR_SPRG4] = regs.sprg4;
1237 env->spr[SPR_SPRG5] = regs.sprg5;
1238 env->spr[SPR_SPRG6] = regs.sprg6;
1239 env->spr[SPR_SPRG7] = regs.sprg7;
1240
1241 env->spr[SPR_BOOKE_PID] = regs.pid;
1242
1243 for (i = 0; i < 32; i++) {
1244 env->gpr[i] = regs.gpr[i];
1245 }
1246
1247 kvm_get_fp(cs);
1248
1249 if (cap_booke_sregs) {
1250 ret = kvmppc_get_booke_sregs(cpu);
1251 if (ret < 0) {
1252 return ret;
1253 }
1254 }
1255
1256 if (cap_segstate) {
1257 ret = kvmppc_get_books_sregs(cpu);
1258 if (ret < 0) {
1259 return ret;
1260 }
1261 }
1262
1263 if (cap_hior) {
1264 kvm_get_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
1265 }
1266
1267 if (cap_one_reg) {
1268 int i;
1269
1270
1271
1272
1273
1274
1275
1276 for (i = 0; i < 1024; i++) {
1277 uint64_t id = env->spr_cb[i].one_reg_id;
1278
1279 if (id != 0) {
1280 kvm_get_one_spr(cs, id, i);
1281 }
1282 }
1283
1284#ifdef TARGET_PPC64
1285 if (FIELD_EX64(env->msr, MSR, TS)) {
1286 for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
1287 kvm_get_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
1288 }
1289 for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
1290 kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
1291 }
1292 kvm_get_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
1293 kvm_get_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
1294 kvm_get_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
1295 kvm_get_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
1296 kvm_get_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
1297 kvm_get_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
1298 kvm_get_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
1299 kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
1300 kvm_get_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
1301 kvm_get_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
1302 }
1303
1304 if (cap_papr) {
1305 if (kvm_get_vpa(cs) < 0) {
1306 trace_kvm_failed_get_vpa();
1307 }
1308 }
1309
1310 kvm_get_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
1311 kvm_get_one_spr(cs, KVM_REG_PPC_DPDES, SPR_DPDES);
1312#endif
1313 }
1314
1315 return 0;
1316}
1317
1318int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level)
1319{
1320 unsigned virq = level ? KVM_INTERRUPT_SET_LEVEL : KVM_INTERRUPT_UNSET;
1321
1322 if (irq != PPC_INTERRUPT_EXT) {
1323 return 0;
1324 }
1325
1326 if (!kvm_enabled() || !cap_interrupt_unset) {
1327 return 0;
1328 }
1329
1330 kvm_vcpu_ioctl(CPU(cpu), KVM_INTERRUPT, &virq);
1331
1332 return 0;
1333}
1334
1335void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
1336{
1337 return;
1338}
1339
1340MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
1341{
1342 return MEMTXATTRS_UNSPECIFIED;
1343}
1344
1345int kvm_arch_process_async_events(CPUState *cs)
1346{
1347 return cs->halted;
1348}
1349
1350static int kvmppc_handle_halt(PowerPCCPU *cpu)
1351{
1352 CPUState *cs = CPU(cpu);
1353 CPUPPCState *env = &cpu->env;
1354
1355 if (!(cs->interrupt_request & CPU_INTERRUPT_HARD) &&
1356 FIELD_EX64(env->msr, MSR, EE)) {
1357 cs->halted = 1;
1358 cs->exception_index = EXCP_HLT;
1359 }
1360
1361 return 0;
1362}
1363
1364
1365static int kvmppc_handle_dcr_read(CPUPPCState *env,
1366 uint32_t dcrn, uint32_t *data)
1367{
1368 if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0) {
1369 fprintf(stderr, "Read to unhandled DCR (0x%x)\n", dcrn);
1370 }
1371
1372 return 0;
1373}
1374
1375static int kvmppc_handle_dcr_write(CPUPPCState *env,
1376 uint32_t dcrn, uint32_t data)
1377{
1378 if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0) {
1379 fprintf(stderr, "Write to unhandled DCR (0x%x)\n", dcrn);
1380 }
1381
1382 return 0;
1383}
1384
1385int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
1386{
1387
1388 uint32_t sc = debug_inst_opcode;
1389
1390 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
1391 sizeof(sc), 0) ||
1392 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 1)) {
1393 return -EINVAL;
1394 }
1395
1396 return 0;
1397}
1398
1399int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
1400{
1401 uint32_t sc;
1402
1403 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 0) ||
1404 sc != debug_inst_opcode ||
1405 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
1406 sizeof(sc), 1)) {
1407 return -EINVAL;
1408 }
1409
1410 return 0;
1411}
1412
1413static int find_hw_breakpoint(target_ulong addr, int type)
1414{
1415 int n;
1416
1417 assert((nb_hw_breakpoint + nb_hw_watchpoint)
1418 <= ARRAY_SIZE(hw_debug_points));
1419
1420 for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
1421 if (hw_debug_points[n].addr == addr &&
1422 hw_debug_points[n].type == type) {
1423 return n;
1424 }
1425 }
1426
1427 return -1;
1428}
1429
1430static int find_hw_watchpoint(target_ulong addr, int *flag)
1431{
1432 int n;
1433
1434 n = find_hw_breakpoint(addr, GDB_WATCHPOINT_ACCESS);
1435 if (n >= 0) {
1436 *flag = BP_MEM_ACCESS;
1437 return n;
1438 }
1439
1440 n = find_hw_breakpoint(addr, GDB_WATCHPOINT_WRITE);
1441 if (n >= 0) {
1442 *flag = BP_MEM_WRITE;
1443 return n;
1444 }
1445
1446 n = find_hw_breakpoint(addr, GDB_WATCHPOINT_READ);
1447 if (n >= 0) {
1448 *flag = BP_MEM_READ;
1449 return n;
1450 }
1451
1452 return -1;
1453}
1454
1455int kvm_arch_insert_hw_breakpoint(target_ulong addr,
1456 target_ulong len, int type)
1457{
1458 if ((nb_hw_breakpoint + nb_hw_watchpoint) >= ARRAY_SIZE(hw_debug_points)) {
1459 return -ENOBUFS;
1460 }
1461
1462 hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].addr = addr;
1463 hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].type = type;
1464
1465 switch (type) {
1466 case GDB_BREAKPOINT_HW:
1467 if (nb_hw_breakpoint >= max_hw_breakpoint) {
1468 return -ENOBUFS;
1469 }
1470
1471 if (find_hw_breakpoint(addr, type) >= 0) {
1472 return -EEXIST;
1473 }
1474
1475 nb_hw_breakpoint++;
1476 break;
1477
1478 case GDB_WATCHPOINT_WRITE:
1479 case GDB_WATCHPOINT_READ:
1480 case GDB_WATCHPOINT_ACCESS:
1481 if (nb_hw_watchpoint >= max_hw_watchpoint) {
1482 return -ENOBUFS;
1483 }
1484
1485 if (find_hw_breakpoint(addr, type) >= 0) {
1486 return -EEXIST;
1487 }
1488
1489 nb_hw_watchpoint++;
1490 break;
1491
1492 default:
1493 return -ENOSYS;
1494 }
1495
1496 return 0;
1497}
1498
1499int kvm_arch_remove_hw_breakpoint(target_ulong addr,
1500 target_ulong len, int type)
1501{
1502 int n;
1503
1504 n = find_hw_breakpoint(addr, type);
1505 if (n < 0) {
1506 return -ENOENT;
1507 }
1508
1509 switch (type) {
1510 case GDB_BREAKPOINT_HW:
1511 nb_hw_breakpoint--;
1512 break;
1513
1514 case GDB_WATCHPOINT_WRITE:
1515 case GDB_WATCHPOINT_READ:
1516 case GDB_WATCHPOINT_ACCESS:
1517 nb_hw_watchpoint--;
1518 break;
1519
1520 default:
1521 return -ENOSYS;
1522 }
1523 hw_debug_points[n] = hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint];
1524
1525 return 0;
1526}
1527
1528void kvm_arch_remove_all_hw_breakpoints(void)
1529{
1530 nb_hw_breakpoint = nb_hw_watchpoint = 0;
1531}
1532
1533void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
1534{
1535 int n;
1536
1537
1538 if (kvm_sw_breakpoints_active(cs)) {
1539 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
1540 }
1541
1542 assert((nb_hw_breakpoint + nb_hw_watchpoint)
1543 <= ARRAY_SIZE(hw_debug_points));
1544 assert((nb_hw_breakpoint + nb_hw_watchpoint) <= ARRAY_SIZE(dbg->arch.bp));
1545
1546 if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
1547 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
1548 memset(dbg->arch.bp, 0, sizeof(dbg->arch.bp));
1549 for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
1550 switch (hw_debug_points[n].type) {
1551 case GDB_BREAKPOINT_HW:
1552 dbg->arch.bp[n].type = KVMPPC_DEBUG_BREAKPOINT;
1553 break;
1554 case GDB_WATCHPOINT_WRITE:
1555 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE;
1556 break;
1557 case GDB_WATCHPOINT_READ:
1558 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_READ;
1559 break;
1560 case GDB_WATCHPOINT_ACCESS:
1561 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE |
1562 KVMPPC_DEBUG_WATCH_READ;
1563 break;
1564 default:
1565 cpu_abort(cs, "Unsupported breakpoint type\n");
1566 }
1567 dbg->arch.bp[n].addr = hw_debug_points[n].addr;
1568 }
1569 }
1570}
1571
1572static int kvm_handle_hw_breakpoint(CPUState *cs,
1573 struct kvm_debug_exit_arch *arch_info)
1574{
1575 int handle = DEBUG_RETURN_GUEST;
1576 int n;
1577 int flag = 0;
1578
1579 if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
1580 if (arch_info->status & KVMPPC_DEBUG_BREAKPOINT) {
1581 n = find_hw_breakpoint(arch_info->address, GDB_BREAKPOINT_HW);
1582 if (n >= 0) {
1583 handle = DEBUG_RETURN_GDB;
1584 }
1585 } else if (arch_info->status & (KVMPPC_DEBUG_WATCH_READ |
1586 KVMPPC_DEBUG_WATCH_WRITE)) {
1587 n = find_hw_watchpoint(arch_info->address, &flag);
1588 if (n >= 0) {
1589 handle = DEBUG_RETURN_GDB;
1590 cs->watchpoint_hit = &hw_watchpoint;
1591 hw_watchpoint.vaddr = hw_debug_points[n].addr;
1592 hw_watchpoint.flags = flag;
1593 }
1594 }
1595 }
1596 return handle;
1597}
1598
1599static int kvm_handle_singlestep(void)
1600{
1601 return DEBUG_RETURN_GDB;
1602}
1603
1604static int kvm_handle_sw_breakpoint(void)
1605{
1606 return DEBUG_RETURN_GDB;
1607}
1608
1609static int kvm_handle_debug(PowerPCCPU *cpu, struct kvm_run *run)
1610{
1611 CPUState *cs = CPU(cpu);
1612 CPUPPCState *env = &cpu->env;
1613 struct kvm_debug_exit_arch *arch_info = &run->debug.arch;
1614
1615 if (cs->singlestep_enabled) {
1616 return kvm_handle_singlestep();
1617 }
1618
1619 if (arch_info->status) {
1620 return kvm_handle_hw_breakpoint(cs, arch_info);
1621 }
1622
1623 if (kvm_find_sw_breakpoint(cs, arch_info->address)) {
1624 return kvm_handle_sw_breakpoint();
1625 }
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648 cpu_synchronize_state(cs);
1649
1650
1651
1652
1653 env->nip += 4;
1654 cs->exception_index = POWERPC_EXCP_PROGRAM;
1655 env->error_code = POWERPC_EXCP_INVAL;
1656 ppc_cpu_do_interrupt(cs);
1657
1658 return DEBUG_RETURN_GUEST;
1659}
1660
1661int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
1662{
1663 PowerPCCPU *cpu = POWERPC_CPU(cs);
1664 CPUPPCState *env = &cpu->env;
1665 int ret;
1666
1667 qemu_mutex_lock_iothread();
1668
1669 switch (run->exit_reason) {
1670 case KVM_EXIT_DCR:
1671 if (run->dcr.is_write) {
1672 trace_kvm_handle_dcr_write();
1673 ret = kvmppc_handle_dcr_write(env, run->dcr.dcrn, run->dcr.data);
1674 } else {
1675 trace_kvm_handle_dcr_read();
1676 ret = kvmppc_handle_dcr_read(env, run->dcr.dcrn, &run->dcr.data);
1677 }
1678 break;
1679 case KVM_EXIT_HLT:
1680 trace_kvm_handle_halt();
1681 ret = kvmppc_handle_halt(cpu);
1682 break;
1683#if defined(TARGET_PPC64)
1684 case KVM_EXIT_PAPR_HCALL:
1685 trace_kvm_handle_papr_hcall(run->papr_hcall.nr);
1686 run->papr_hcall.ret = spapr_hypercall(cpu,
1687 run->papr_hcall.nr,
1688 run->papr_hcall.args);
1689 ret = 0;
1690 break;
1691#endif
1692 case KVM_EXIT_EPR:
1693 trace_kvm_handle_epr();
1694 run->epr.epr = ldl_phys(cs->as, env->mpic_iack);
1695 ret = 0;
1696 break;
1697 case KVM_EXIT_WATCHDOG:
1698 trace_kvm_handle_watchdog_expiry();
1699 watchdog_perform_action();
1700 ret = 0;
1701 break;
1702
1703 case KVM_EXIT_DEBUG:
1704 trace_kvm_handle_debug_exception();
1705 if (kvm_handle_debug(cpu, run)) {
1706 ret = EXCP_DEBUG;
1707 break;
1708 }
1709
1710 ret = 0;
1711 break;
1712
1713#if defined(TARGET_PPC64)
1714 case KVM_EXIT_NMI:
1715 trace_kvm_handle_nmi_exception();
1716 ret = kvm_handle_nmi(cpu, run);
1717 break;
1718#endif
1719
1720 default:
1721 fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
1722 ret = -1;
1723 break;
1724 }
1725
1726 qemu_mutex_unlock_iothread();
1727 return ret;
1728}
1729
1730int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
1731{
1732 CPUState *cs = CPU(cpu);
1733 uint32_t bits = tsr_bits;
1734 struct kvm_one_reg reg = {
1735 .id = KVM_REG_PPC_OR_TSR,
1736 .addr = (uintptr_t) &bits,
1737 };
1738
1739 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1740}
1741
1742int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
1743{
1744
1745 CPUState *cs = CPU(cpu);
1746 uint32_t bits = tsr_bits;
1747 struct kvm_one_reg reg = {
1748 .id = KVM_REG_PPC_CLEAR_TSR,
1749 .addr = (uintptr_t) &bits,
1750 };
1751
1752 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1753}
1754
1755int kvmppc_set_tcr(PowerPCCPU *cpu)
1756{
1757 CPUState *cs = CPU(cpu);
1758 CPUPPCState *env = &cpu->env;
1759 uint32_t tcr = env->spr[SPR_BOOKE_TCR];
1760
1761 struct kvm_one_reg reg = {
1762 .id = KVM_REG_PPC_TCR,
1763 .addr = (uintptr_t) &tcr,
1764 };
1765
1766 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1767}
1768
1769int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu)
1770{
1771 CPUState *cs = CPU(cpu);
1772 int ret;
1773
1774 if (!kvm_enabled()) {
1775 return -1;
1776 }
1777
1778 if (!cap_ppc_watchdog) {
1779 printf("warning: KVM does not support watchdog");
1780 return -1;
1781 }
1782
1783 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_BOOKE_WATCHDOG, 0);
1784 if (ret < 0) {
1785 fprintf(stderr, "%s: couldn't enable KVM_CAP_PPC_BOOKE_WATCHDOG: %s\n",
1786 __func__, strerror(-ret));
1787 return ret;
1788 }
1789
1790 return ret;
1791}
1792
1793static int read_cpuinfo(const char *field, char *value, int len)
1794{
1795 FILE *f;
1796 int ret = -1;
1797 int field_len = strlen(field);
1798 char line[512];
1799
1800 f = fopen("/proc/cpuinfo", "r");
1801 if (!f) {
1802 return -1;
1803 }
1804
1805 do {
1806 if (!fgets(line, sizeof(line), f)) {
1807 break;
1808 }
1809 if (!strncmp(line, field, field_len)) {
1810 pstrcpy(value, len, line);
1811 ret = 0;
1812 break;
1813 }
1814 } while (*line);
1815
1816 fclose(f);
1817
1818 return ret;
1819}
1820
1821static uint32_t kvmppc_get_tbfreq_procfs(void)
1822{
1823 char line[512];
1824 char *ns;
1825 uint32_t tbfreq_fallback = NANOSECONDS_PER_SECOND;
1826 uint32_t tbfreq_procfs;
1827
1828 if (read_cpuinfo("timebase", line, sizeof(line))) {
1829 return tbfreq_fallback;
1830 }
1831
1832 ns = strchr(line, ':');
1833 if (!ns) {
1834 return tbfreq_fallback;
1835 }
1836
1837 tbfreq_procfs = atoi(++ns);
1838
1839
1840 return tbfreq_procfs ? tbfreq_procfs : tbfreq_fallback;
1841}
1842
1843uint32_t kvmppc_get_tbfreq(void)
1844{
1845 static uint32_t cached_tbfreq;
1846
1847 if (!cached_tbfreq) {
1848 cached_tbfreq = kvmppc_get_tbfreq_procfs();
1849 }
1850
1851 return cached_tbfreq;
1852}
1853
1854bool kvmppc_get_host_serial(char **value)
1855{
1856 return g_file_get_contents("/proc/device-tree/system-id", value, NULL,
1857 NULL);
1858}
1859
1860bool kvmppc_get_host_model(char **value)
1861{
1862 return g_file_get_contents("/proc/device-tree/model", value, NULL, NULL);
1863}
1864
1865
1866static int kvmppc_find_cpu_dt(char *buf, int buf_len)
1867{
1868 struct dirent *dirp;
1869 DIR *dp;
1870
1871 dp = opendir(PROC_DEVTREE_CPU);
1872 if (!dp) {
1873 printf("Can't open directory " PROC_DEVTREE_CPU "\n");
1874 return -1;
1875 }
1876
1877 buf[0] = '\0';
1878 while ((dirp = readdir(dp)) != NULL) {
1879 FILE *f;
1880
1881
1882 if (strcmp(dirp->d_name, ".") == 0 || strcmp(dirp->d_name, "..") == 0) {
1883 continue;
1884 }
1885
1886 snprintf(buf, buf_len, "%s%s/clock-frequency", PROC_DEVTREE_CPU,
1887 dirp->d_name);
1888 f = fopen(buf, "r");
1889 if (f) {
1890 snprintf(buf, buf_len, "%s%s", PROC_DEVTREE_CPU, dirp->d_name);
1891 fclose(f);
1892 break;
1893 }
1894 buf[0] = '\0';
1895 }
1896 closedir(dp);
1897 if (buf[0] == '\0') {
1898 printf("Unknown host!\n");
1899 return -1;
1900 }
1901
1902 return 0;
1903}
1904
1905static uint64_t kvmppc_read_int_dt(const char *filename)
1906{
1907 union {
1908 uint32_t v32;
1909 uint64_t v64;
1910 } u;
1911 FILE *f;
1912 int len;
1913
1914 f = fopen(filename, "rb");
1915 if (!f) {
1916 return -1;
1917 }
1918
1919 len = fread(&u, 1, sizeof(u), f);
1920 fclose(f);
1921 switch (len) {
1922 case 4:
1923
1924 return be32_to_cpu(u.v32);
1925 case 8:
1926 return be64_to_cpu(u.v64);
1927 }
1928
1929 return 0;
1930}
1931
1932
1933
1934
1935
1936
1937static uint64_t kvmppc_read_int_cpu_dt(const char *propname)
1938{
1939 char buf[PATH_MAX], *tmp;
1940 uint64_t val;
1941
1942 if (kvmppc_find_cpu_dt(buf, sizeof(buf))) {
1943 return -1;
1944 }
1945
1946 tmp = g_strdup_printf("%s/%s", buf, propname);
1947 val = kvmppc_read_int_dt(tmp);
1948 g_free(tmp);
1949
1950 return val;
1951}
1952
1953uint64_t kvmppc_get_clockfreq(void)
1954{
1955 return kvmppc_read_int_cpu_dt("clock-frequency");
1956}
1957
1958static int kvmppc_get_dec_bits(void)
1959{
1960 int nr_bits = kvmppc_read_int_cpu_dt("ibm,dec-bits");
1961
1962 if (nr_bits > 0) {
1963 return nr_bits;
1964 }
1965 return 0;
1966}
1967
1968static int kvmppc_get_pvinfo(CPUPPCState *env, struct kvm_ppc_pvinfo *pvinfo)
1969{
1970 CPUState *cs = env_cpu(env);
1971
1972 if (kvm_vm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO) &&
1973 !kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_PVINFO, pvinfo)) {
1974 return 0;
1975 }
1976
1977 return 1;
1978}
1979
1980int kvmppc_get_hasidle(CPUPPCState *env)
1981{
1982 struct kvm_ppc_pvinfo pvinfo;
1983
1984 if (!kvmppc_get_pvinfo(env, &pvinfo) &&
1985 (pvinfo.flags & KVM_PPC_PVINFO_FLAGS_EV_IDLE)) {
1986 return 1;
1987 }
1988
1989 return 0;
1990}
1991
1992int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len)
1993{
1994 uint32_t *hc = (uint32_t *)buf;
1995 struct kvm_ppc_pvinfo pvinfo;
1996
1997 if (!kvmppc_get_pvinfo(env, &pvinfo)) {
1998 memcpy(buf, pvinfo.hcall, buf_len);
1999 return 0;
2000 }
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011 hc[0] = cpu_to_be32(0x08000048);
2012 hc[1] = cpu_to_be32(0x3860ffff);
2013 hc[2] = cpu_to_be32(0x48000008);
2014 hc[3] = cpu_to_be32(bswap32(0x3860ffff));
2015
2016 return 1;
2017}
2018
2019static inline int kvmppc_enable_hcall(KVMState *s, target_ulong hcall)
2020{
2021 return kvm_vm_enable_cap(s, KVM_CAP_PPC_ENABLE_HCALL, 0, hcall, 1);
2022}
2023
2024void kvmppc_enable_logical_ci_hcalls(void)
2025{
2026
2027
2028
2029
2030
2031
2032 kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_LOAD);
2033 kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_STORE);
2034}
2035
2036void kvmppc_enable_set_mode_hcall(void)
2037{
2038 kvmppc_enable_hcall(kvm_state, H_SET_MODE);
2039}
2040
2041void kvmppc_enable_clear_ref_mod_hcalls(void)
2042{
2043 kvmppc_enable_hcall(kvm_state, H_CLEAR_REF);
2044 kvmppc_enable_hcall(kvm_state, H_CLEAR_MOD);
2045}
2046
2047void kvmppc_enable_h_page_init(void)
2048{
2049 kvmppc_enable_hcall(kvm_state, H_PAGE_INIT);
2050}
2051
2052void kvmppc_enable_h_rpt_invalidate(void)
2053{
2054 kvmppc_enable_hcall(kvm_state, H_RPT_INVALIDATE);
2055}
2056
2057void kvmppc_set_papr(PowerPCCPU *cpu)
2058{
2059 CPUState *cs = CPU(cpu);
2060 int ret;
2061
2062 if (!kvm_enabled()) {
2063 return;
2064 }
2065
2066 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_PAPR, 0);
2067 if (ret) {
2068 error_report("This vCPU type or KVM version does not support PAPR");
2069 exit(1);
2070 }
2071
2072
2073
2074
2075
2076 cap_papr = 1;
2077}
2078
2079int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr)
2080{
2081 return kvm_set_one_reg(CPU(cpu), KVM_REG_PPC_ARCH_COMPAT, &compat_pvr);
2082}
2083
2084void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy)
2085{
2086 CPUState *cs = CPU(cpu);
2087 int ret;
2088
2089 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_EPR, 0, mpic_proxy);
2090 if (ret && mpic_proxy) {
2091 error_report("This KVM version does not support EPR");
2092 exit(1);
2093 }
2094}
2095
2096bool kvmppc_get_fwnmi(void)
2097{
2098 return cap_fwnmi;
2099}
2100
2101int kvmppc_set_fwnmi(PowerPCCPU *cpu)
2102{
2103 CPUState *cs = CPU(cpu);
2104
2105 return kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_FWNMI, 0);
2106}
2107
2108int kvmppc_smt_threads(void)
2109{
2110 return cap_ppc_smt ? cap_ppc_smt : 1;
2111}
2112
2113int kvmppc_set_smt_threads(int smt)
2114{
2115 int ret;
2116
2117 ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_SMT, 0, smt, 0);
2118 if (!ret) {
2119 cap_ppc_smt = smt;
2120 }
2121 return ret;
2122}
2123
2124void kvmppc_error_append_smt_possible_hint(Error *const *errp)
2125{
2126 int i;
2127 GString *g;
2128 char *s;
2129
2130 assert(kvm_enabled());
2131 if (cap_ppc_smt_possible) {
2132 g = g_string_new("Available VSMT modes:");
2133 for (i = 63; i >= 0; i--) {
2134 if ((1UL << i) & cap_ppc_smt_possible) {
2135 g_string_append_printf(g, " %lu", (1UL << i));
2136 }
2137 }
2138 s = g_string_free(g, false);
2139 error_append_hint(errp, "%s.\n", s);
2140 g_free(s);
2141 } else {
2142 error_append_hint(errp,
2143 "This KVM seems to be too old to support VSMT.\n");
2144 }
2145}
2146
2147
2148#ifdef TARGET_PPC64
2149uint64_t kvmppc_vrma_limit(unsigned int hash_shift)
2150{
2151 struct kvm_ppc_smmu_info info;
2152 long rampagesize, best_page_shift;
2153 int i;
2154
2155
2156
2157
2158
2159 kvm_get_smmu_info(&info, &error_fatal);
2160 rampagesize = qemu_minrampagesize();
2161 best_page_shift = 0;
2162
2163 for (i = 0; i < KVM_PPC_PAGE_SIZES_MAX_SZ; i++) {
2164 struct kvm_ppc_one_seg_page_size *sps = &info.sps[i];
2165
2166 if (!sps->page_shift) {
2167 continue;
2168 }
2169
2170 if ((sps->page_shift > best_page_shift)
2171 && ((1UL << sps->page_shift) <= rampagesize)) {
2172 best_page_shift = sps->page_shift;
2173 }
2174 }
2175
2176 return 1ULL << (best_page_shift + hash_shift - 7);
2177}
2178#endif
2179
2180bool kvmppc_spapr_use_multitce(void)
2181{
2182 return cap_spapr_multitce;
2183}
2184
2185int kvmppc_spapr_enable_inkernel_multitce(void)
2186{
2187 int ret;
2188
2189 ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_ENABLE_HCALL, 0,
2190 H_PUT_TCE_INDIRECT, 1);
2191 if (!ret) {
2192 ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_ENABLE_HCALL, 0,
2193 H_STUFF_TCE, 1);
2194 }
2195
2196 return ret;
2197}
2198
2199void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t page_shift,
2200 uint64_t bus_offset, uint32_t nb_table,
2201 int *pfd, bool need_vfio)
2202{
2203 long len;
2204 int fd;
2205 void *table;
2206
2207
2208
2209
2210
2211 *pfd = -1;
2212 if (!cap_spapr_tce || (need_vfio && !cap_spapr_vfio)) {
2213 return NULL;
2214 }
2215
2216 if (cap_spapr_tce_64) {
2217 struct kvm_create_spapr_tce_64 args = {
2218 .liobn = liobn,
2219 .page_shift = page_shift,
2220 .offset = bus_offset >> page_shift,
2221 .size = nb_table,
2222 .flags = 0
2223 };
2224 fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE_64, &args);
2225 if (fd < 0) {
2226 fprintf(stderr,
2227 "KVM: Failed to create TCE64 table for liobn 0x%x\n",
2228 liobn);
2229 return NULL;
2230 }
2231 } else if (cap_spapr_tce) {
2232 uint64_t window_size = (uint64_t) nb_table << page_shift;
2233 struct kvm_create_spapr_tce args = {
2234 .liobn = liobn,
2235 .window_size = window_size,
2236 };
2237 if ((window_size != args.window_size) || bus_offset) {
2238 return NULL;
2239 }
2240 fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE, &args);
2241 if (fd < 0) {
2242 fprintf(stderr, "KVM: Failed to create TCE table for liobn 0x%x\n",
2243 liobn);
2244 return NULL;
2245 }
2246 } else {
2247 return NULL;
2248 }
2249
2250 len = nb_table * sizeof(uint64_t);
2251
2252
2253 table = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
2254 if (table == MAP_FAILED) {
2255 fprintf(stderr, "KVM: Failed to map TCE table for liobn 0x%x\n",
2256 liobn);
2257 close(fd);
2258 return NULL;
2259 }
2260
2261 *pfd = fd;
2262 return table;
2263}
2264
2265int kvmppc_remove_spapr_tce(void *table, int fd, uint32_t nb_table)
2266{
2267 long len;
2268
2269 if (fd < 0) {
2270 return -1;
2271 }
2272
2273 len = nb_table * sizeof(uint64_t);
2274 if ((munmap(table, len) < 0) ||
2275 (close(fd) < 0)) {
2276 fprintf(stderr, "KVM: Unexpected error removing TCE table: %s",
2277 strerror(errno));
2278
2279 }
2280
2281 return 0;
2282}
2283
2284int kvmppc_reset_htab(int shift_hint)
2285{
2286 uint32_t shift = shift_hint;
2287
2288 if (!kvm_enabled()) {
2289
2290 return 0;
2291 }
2292 if (kvm_vm_check_extension(kvm_state, KVM_CAP_PPC_ALLOC_HTAB)) {
2293 int ret;
2294 ret = kvm_vm_ioctl(kvm_state, KVM_PPC_ALLOCATE_HTAB, &shift);
2295 if (ret == -ENOTTY) {
2296
2297
2298
2299
2300
2301
2302 return 0;
2303 } else if (ret < 0) {
2304 return ret;
2305 }
2306 return shift;
2307 }
2308
2309
2310
2311
2312
2313
2314
2315 if (kvmppc_is_pr(kvm_state)) {
2316
2317 return 0;
2318 } else {
2319
2320 return 24;
2321 }
2322}
2323
2324static inline uint32_t mfpvr(void)
2325{
2326 uint32_t pvr;
2327
2328 asm ("mfpvr %0"
2329 : "=r"(pvr));
2330 return pvr;
2331}
2332
2333static void alter_insns(uint64_t *word, uint64_t flags, bool on)
2334{
2335 if (on) {
2336 *word |= flags;
2337 } else {
2338 *word &= ~flags;
2339 }
2340}
2341
2342static void kvmppc_host_cpu_class_init(ObjectClass *oc, void *data)
2343{
2344 PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
2345 uint32_t dcache_size = kvmppc_read_int_cpu_dt("d-cache-size");
2346 uint32_t icache_size = kvmppc_read_int_cpu_dt("i-cache-size");
2347
2348
2349 pcc->pvr = mfpvr();
2350
2351 alter_insns(&pcc->insns_flags, PPC_ALTIVEC,
2352 qemu_getauxval(AT_HWCAP) & PPC_FEATURE_HAS_ALTIVEC);
2353 alter_insns(&pcc->insns_flags2, PPC2_VSX,
2354 qemu_getauxval(AT_HWCAP) & PPC_FEATURE_HAS_VSX);
2355 alter_insns(&pcc->insns_flags2, PPC2_DFP,
2356 qemu_getauxval(AT_HWCAP) & PPC_FEATURE_HAS_DFP);
2357
2358 if (dcache_size != -1) {
2359 pcc->l1_dcache_size = dcache_size;
2360 }
2361
2362 if (icache_size != -1) {
2363 pcc->l1_icache_size = icache_size;
2364 }
2365
2366#if defined(TARGET_PPC64)
2367 pcc->radix_page_info = kvm_get_radix_page_info();
2368
2369 if ((pcc->pvr & 0xffffff00) == CPU_POWERPC_POWER9_DD1) {
2370
2371
2372
2373
2374
2375
2376 pcc->pcr_supported &= ~(PCR_COMPAT_3_00 | PCR_COMPAT_2_07
2377 | PCR_COMPAT_2_06 | PCR_COMPAT_2_05);
2378 }
2379#endif
2380}
2381
2382bool kvmppc_has_cap_epr(void)
2383{
2384 return cap_epr;
2385}
2386
2387bool kvmppc_has_cap_fixup_hcalls(void)
2388{
2389 return cap_fixup_hcalls;
2390}
2391
2392bool kvmppc_has_cap_htm(void)
2393{
2394 return cap_htm;
2395}
2396
2397bool kvmppc_has_cap_mmu_radix(void)
2398{
2399 return cap_mmu_radix;
2400}
2401
2402bool kvmppc_has_cap_mmu_hash_v3(void)
2403{
2404 return cap_mmu_hash_v3;
2405}
2406
2407static bool kvmppc_power8_host(void)
2408{
2409 bool ret = false;
2410#ifdef TARGET_PPC64
2411 {
2412 uint32_t base_pvr = CPU_POWERPC_POWER_SERVER_MASK & mfpvr();
2413 ret = (base_pvr == CPU_POWERPC_POWER8E_BASE) ||
2414 (base_pvr == CPU_POWERPC_POWER8NVL_BASE) ||
2415 (base_pvr == CPU_POWERPC_POWER8_BASE);
2416 }
2417#endif
2418 return ret;
2419}
2420
2421static int parse_cap_ppc_safe_cache(struct kvm_ppc_cpu_char c)
2422{
2423 bool l1d_thread_priv_req = !kvmppc_power8_host();
2424
2425 if (~c.behaviour & c.behaviour_mask & H_CPU_BEHAV_L1D_FLUSH_PR) {
2426 return 2;
2427 } else if ((!l1d_thread_priv_req ||
2428 c.character & c.character_mask & H_CPU_CHAR_L1D_THREAD_PRIV) &&
2429 (c.character & c.character_mask
2430 & (H_CPU_CHAR_L1D_FLUSH_ORI30 | H_CPU_CHAR_L1D_FLUSH_TRIG2))) {
2431 return 1;
2432 }
2433
2434 return 0;
2435}
2436
2437static int parse_cap_ppc_safe_bounds_check(struct kvm_ppc_cpu_char c)
2438{
2439 if (~c.behaviour & c.behaviour_mask & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR) {
2440 return 2;
2441 } else if (c.character & c.character_mask & H_CPU_CHAR_SPEC_BAR_ORI31) {
2442 return 1;
2443 }
2444
2445 return 0;
2446}
2447
2448static int parse_cap_ppc_safe_indirect_branch(struct kvm_ppc_cpu_char c)
2449{
2450 if ((~c.behaviour & c.behaviour_mask & H_CPU_BEHAV_FLUSH_COUNT_CACHE) &&
2451 (~c.character & c.character_mask & H_CPU_CHAR_CACHE_COUNT_DIS) &&
2452 (~c.character & c.character_mask & H_CPU_CHAR_BCCTRL_SERIALISED)) {
2453 return SPAPR_CAP_FIXED_NA;
2454 } else if (c.behaviour & c.behaviour_mask & H_CPU_BEHAV_FLUSH_COUNT_CACHE) {
2455 return SPAPR_CAP_WORKAROUND;
2456 } else if (c.character & c.character_mask & H_CPU_CHAR_CACHE_COUNT_DIS) {
2457 return SPAPR_CAP_FIXED_CCD;
2458 } else if (c.character & c.character_mask & H_CPU_CHAR_BCCTRL_SERIALISED) {
2459 return SPAPR_CAP_FIXED_IBS;
2460 }
2461
2462 return 0;
2463}
2464
2465static int parse_cap_ppc_count_cache_flush_assist(struct kvm_ppc_cpu_char c)
2466{
2467 if (c.character & c.character_mask & H_CPU_CHAR_BCCTR_FLUSH_ASSIST) {
2468 return 1;
2469 }
2470 return 0;
2471}
2472
2473bool kvmppc_has_cap_xive(void)
2474{
2475 return cap_xive;
2476}
2477
2478static void kvmppc_get_cpu_characteristics(KVMState *s)
2479{
2480 struct kvm_ppc_cpu_char c;
2481 int ret;
2482
2483
2484 cap_ppc_safe_cache = 0;
2485 cap_ppc_safe_bounds_check = 0;
2486 cap_ppc_safe_indirect_branch = 0;
2487
2488 ret = kvm_vm_check_extension(s, KVM_CAP_PPC_GET_CPU_CHAR);
2489 if (!ret) {
2490 return;
2491 }
2492 ret = kvm_vm_ioctl(s, KVM_PPC_GET_CPU_CHAR, &c);
2493 if (ret < 0) {
2494 return;
2495 }
2496
2497 cap_ppc_safe_cache = parse_cap_ppc_safe_cache(c);
2498 cap_ppc_safe_bounds_check = parse_cap_ppc_safe_bounds_check(c);
2499 cap_ppc_safe_indirect_branch = parse_cap_ppc_safe_indirect_branch(c);
2500 cap_ppc_count_cache_flush_assist =
2501 parse_cap_ppc_count_cache_flush_assist(c);
2502}
2503
2504int kvmppc_get_cap_safe_cache(void)
2505{
2506 return cap_ppc_safe_cache;
2507}
2508
2509int kvmppc_get_cap_safe_bounds_check(void)
2510{
2511 return cap_ppc_safe_bounds_check;
2512}
2513
2514int kvmppc_get_cap_safe_indirect_branch(void)
2515{
2516 return cap_ppc_safe_indirect_branch;
2517}
2518
2519int kvmppc_get_cap_count_cache_flush_assist(void)
2520{
2521 return cap_ppc_count_cache_flush_assist;
2522}
2523
2524bool kvmppc_has_cap_nested_kvm_hv(void)
2525{
2526 return !!cap_ppc_nested_kvm_hv;
2527}
2528
2529int kvmppc_set_cap_nested_kvm_hv(int enable)
2530{
2531 return kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_NESTED_HV, 0, enable);
2532}
2533
2534bool kvmppc_has_cap_spapr_vfio(void)
2535{
2536 return cap_spapr_vfio;
2537}
2538
2539int kvmppc_get_cap_large_decr(void)
2540{
2541 return cap_large_decr;
2542}
2543
2544int kvmppc_enable_cap_large_decr(PowerPCCPU *cpu, int enable)
2545{
2546 CPUState *cs = CPU(cpu);
2547 uint64_t lpcr = 0;
2548
2549 kvm_get_one_reg(cs, KVM_REG_PPC_LPCR_64, &lpcr);
2550
2551 if (!!(lpcr & LPCR_LD) != !!enable) {
2552 if (enable) {
2553 lpcr |= LPCR_LD;
2554 } else {
2555 lpcr &= ~LPCR_LD;
2556 }
2557 kvm_set_one_reg(cs, KVM_REG_PPC_LPCR_64, &lpcr);
2558 kvm_get_one_reg(cs, KVM_REG_PPC_LPCR_64, &lpcr);
2559
2560 if (!!(lpcr & LPCR_LD) != !!enable) {
2561 return -1;
2562 }
2563 }
2564
2565 return 0;
2566}
2567
2568int kvmppc_has_cap_rpt_invalidate(void)
2569{
2570 return cap_rpt_invalidate;
2571}
2572
2573PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void)
2574{
2575 uint32_t host_pvr = mfpvr();
2576 PowerPCCPUClass *pvr_pcc;
2577
2578 pvr_pcc = ppc_cpu_class_by_pvr(host_pvr);
2579 if (pvr_pcc == NULL) {
2580 pvr_pcc = ppc_cpu_class_by_pvr_mask(host_pvr);
2581 }
2582
2583 return pvr_pcc;
2584}
2585
2586static void pseries_machine_class_fixup(ObjectClass *oc, void *opaque)
2587{
2588 MachineClass *mc = MACHINE_CLASS(oc);
2589
2590 mc->default_cpu_type = TYPE_HOST_POWERPC_CPU;
2591}
2592
2593static int kvm_ppc_register_host_cpu_type(void)
2594{
2595 TypeInfo type_info = {
2596 .name = TYPE_HOST_POWERPC_CPU,
2597 .class_init = kvmppc_host_cpu_class_init,
2598 };
2599 PowerPCCPUClass *pvr_pcc;
2600 ObjectClass *oc;
2601 DeviceClass *dc;
2602 int i;
2603
2604 pvr_pcc = kvm_ppc_get_host_cpu_class();
2605 if (pvr_pcc == NULL) {
2606 return -1;
2607 }
2608 type_info.parent = object_class_get_name(OBJECT_CLASS(pvr_pcc));
2609 type_register(&type_info);
2610
2611 object_class_foreach(pseries_machine_class_fixup, TYPE_SPAPR_MACHINE,
2612 false, NULL);
2613
2614 oc = object_class_by_name(type_info.name);
2615 g_assert(oc);
2616
2617
2618
2619
2620
2621
2622 dc = DEVICE_CLASS(ppc_cpu_get_family_class(pvr_pcc));
2623 for (i = 0; ppc_cpu_aliases[i].alias != NULL; i++) {
2624 if (strcasecmp(ppc_cpu_aliases[i].alias, dc->desc) == 0) {
2625 char *suffix;
2626
2627 ppc_cpu_aliases[i].model = g_strdup(object_class_get_name(oc));
2628 suffix = strstr(ppc_cpu_aliases[i].model, POWERPC_CPU_TYPE_SUFFIX);
2629 if (suffix) {
2630 *suffix = 0;
2631 }
2632 break;
2633 }
2634 }
2635
2636 return 0;
2637}
2638
2639int kvmppc_define_rtas_kernel_token(uint32_t token, const char *function)
2640{
2641 struct kvm_rtas_token_args args = {
2642 .token = token,
2643 };
2644
2645 if (!kvm_check_extension(kvm_state, KVM_CAP_PPC_RTAS)) {
2646 return -ENOENT;
2647 }
2648
2649 strncpy(args.name, function, sizeof(args.name) - 1);
2650
2651 return kvm_vm_ioctl(kvm_state, KVM_PPC_RTAS_DEFINE_TOKEN, &args);
2652}
2653
2654int kvmppc_get_htab_fd(bool write, uint64_t index, Error **errp)
2655{
2656 struct kvm_get_htab_fd s = {
2657 .flags = write ? KVM_GET_HTAB_WRITE : 0,
2658 .start_index = index,
2659 };
2660 int ret;
2661
2662 if (!cap_htab_fd) {
2663 error_setg(errp, "KVM version doesn't support %s the HPT",
2664 write ? "writing" : "reading");
2665 return -ENOTSUP;
2666 }
2667
2668 ret = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &s);
2669 if (ret < 0) {
2670 error_setg(errp, "Unable to open fd for %s HPT %s KVM: %s",
2671 write ? "writing" : "reading", write ? "to" : "from",
2672 strerror(errno));
2673 return -errno;
2674 }
2675
2676 return ret;
2677}
2678
2679int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns)
2680{
2681 int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
2682 uint8_t buf[bufsize];
2683 ssize_t rc;
2684
2685 do {
2686 rc = read(fd, buf, bufsize);
2687 if (rc < 0) {
2688 fprintf(stderr, "Error reading data from KVM HTAB fd: %s\n",
2689 strerror(errno));
2690 return rc;
2691 } else if (rc) {
2692 uint8_t *buffer = buf;
2693 ssize_t n = rc;
2694 while (n) {
2695 struct kvm_get_htab_header *head =
2696 (struct kvm_get_htab_header *) buffer;
2697 size_t chunksize = sizeof(*head) +
2698 HASH_PTE_SIZE_64 * head->n_valid;
2699
2700 qemu_put_be32(f, head->index);
2701 qemu_put_be16(f, head->n_valid);
2702 qemu_put_be16(f, head->n_invalid);
2703 qemu_put_buffer(f, (void *)(head + 1),
2704 HASH_PTE_SIZE_64 * head->n_valid);
2705
2706 buffer += chunksize;
2707 n -= chunksize;
2708 }
2709 }
2710 } while ((rc != 0)
2711 && ((max_ns < 0) ||
2712 ((qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) < max_ns)));
2713
2714 return (rc == 0) ? 1 : 0;
2715}
2716
2717int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
2718 uint16_t n_valid, uint16_t n_invalid, Error **errp)
2719{
2720 struct kvm_get_htab_header *buf;
2721 size_t chunksize = sizeof(*buf) + n_valid * HASH_PTE_SIZE_64;
2722 ssize_t rc;
2723
2724 buf = alloca(chunksize);
2725 buf->index = index;
2726 buf->n_valid = n_valid;
2727 buf->n_invalid = n_invalid;
2728
2729 qemu_get_buffer(f, (void *)(buf + 1), HASH_PTE_SIZE_64 * n_valid);
2730
2731 rc = write(fd, buf, chunksize);
2732 if (rc < 0) {
2733 error_setg_errno(errp, errno, "Error writing the KVM hash table");
2734 return -errno;
2735 }
2736 if (rc != chunksize) {
2737
2738 error_setg(errp, "Short write while restoring the KVM hash table");
2739 return -ENOSPC;
2740 }
2741 return 0;
2742}
2743
2744bool kvm_arch_stop_on_emulation_error(CPUState *cpu)
2745{
2746 return true;
2747}
2748
2749void kvm_arch_init_irq_routing(KVMState *s)
2750{
2751}
2752
2753void kvmppc_read_hptes(ppc_hash_pte64_t *hptes, hwaddr ptex, int n)
2754{
2755 int fd, rc;
2756 int i;
2757
2758 fd = kvmppc_get_htab_fd(false, ptex, &error_abort);
2759
2760 i = 0;
2761 while (i < n) {
2762 struct kvm_get_htab_header *hdr;
2763 int m = n < HPTES_PER_GROUP ? n : HPTES_PER_GROUP;
2764 char buf[sizeof(*hdr) + m * HASH_PTE_SIZE_64];
2765
2766 rc = read(fd, buf, sizeof(buf));
2767 if (rc < 0) {
2768 hw_error("kvmppc_read_hptes: Unable to read HPTEs");
2769 }
2770
2771 hdr = (struct kvm_get_htab_header *)buf;
2772 while ((i < n) && ((char *)hdr < (buf + rc))) {
2773 int invalid = hdr->n_invalid, valid = hdr->n_valid;
2774
2775 if (hdr->index != (ptex + i)) {
2776 hw_error("kvmppc_read_hptes: Unexpected HPTE index %"PRIu32
2777 " != (%"HWADDR_PRIu" + %d", hdr->index, ptex, i);
2778 }
2779
2780 if (n - i < valid) {
2781 valid = n - i;
2782 }
2783 memcpy(hptes + i, hdr + 1, HASH_PTE_SIZE_64 * valid);
2784 i += valid;
2785
2786 if ((n - i) < invalid) {
2787 invalid = n - i;
2788 }
2789 memset(hptes + i, 0, invalid * HASH_PTE_SIZE_64);
2790 i += invalid;
2791
2792 hdr = (struct kvm_get_htab_header *)
2793 ((char *)(hdr + 1) + HASH_PTE_SIZE_64 * hdr->n_valid);
2794 }
2795 }
2796
2797 close(fd);
2798}
2799
2800void kvmppc_write_hpte(hwaddr ptex, uint64_t pte0, uint64_t pte1)
2801{
2802 int fd, rc;
2803 struct {
2804 struct kvm_get_htab_header hdr;
2805 uint64_t pte0;
2806 uint64_t pte1;
2807 } buf;
2808
2809 fd = kvmppc_get_htab_fd(true, 0 , &error_abort);
2810
2811 buf.hdr.n_valid = 1;
2812 buf.hdr.n_invalid = 0;
2813 buf.hdr.index = ptex;
2814 buf.pte0 = cpu_to_be64(pte0);
2815 buf.pte1 = cpu_to_be64(pte1);
2816
2817 rc = write(fd, &buf, sizeof(buf));
2818 if (rc != sizeof(buf)) {
2819 hw_error("kvmppc_write_hpte: Unable to update KVM HPT");
2820 }
2821 close(fd);
2822}
2823
2824int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
2825 uint64_t address, uint32_t data, PCIDevice *dev)
2826{
2827 return 0;
2828}
2829
2830int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
2831 int vector, PCIDevice *dev)
2832{
2833 return 0;
2834}
2835
2836int kvm_arch_release_virq_post(int virq)
2837{
2838 return 0;
2839}
2840
2841int kvm_arch_msi_data_to_gsi(uint32_t data)
2842{
2843 return data & 0xffff;
2844}
2845
2846#if defined(TARGET_PPC64)
2847int kvm_handle_nmi(PowerPCCPU *cpu, struct kvm_run *run)
2848{
2849 uint16_t flags = run->flags & KVM_RUN_PPC_NMI_DISP_MASK;
2850
2851 cpu_synchronize_state(CPU(cpu));
2852
2853 spapr_mce_req_event(cpu, flags == KVM_RUN_PPC_NMI_DISP_FULLY_RECOV);
2854
2855 return 0;
2856}
2857#endif
2858
2859int kvmppc_enable_hwrng(void)
2860{
2861 if (!kvm_enabled() || !kvm_check_extension(kvm_state, KVM_CAP_PPC_HWRNG)) {
2862 return -1;
2863 }
2864
2865 return kvmppc_enable_hcall(kvm_state, H_RANDOM);
2866}
2867
2868void kvmppc_check_papr_resize_hpt(Error **errp)
2869{
2870 if (!kvm_enabled()) {
2871 return;
2872 }
2873
2874 if (cap_resize_hpt) {
2875 return;
2876 }
2877
2878
2879 if (kvmppc_is_pr(kvm_state)) {
2880 return;
2881 }
2882
2883 error_setg(errp,
2884 "Hash page table resizing not available with this KVM version");
2885}
2886
2887int kvmppc_resize_hpt_prepare(PowerPCCPU *cpu, target_ulong flags, int shift)
2888{
2889 CPUState *cs = CPU(cpu);
2890 struct kvm_ppc_resize_hpt rhpt = {
2891 .flags = flags,
2892 .shift = shift,
2893 };
2894
2895 if (!cap_resize_hpt) {
2896 return -ENOSYS;
2897 }
2898
2899 return kvm_vm_ioctl(cs->kvm_state, KVM_PPC_RESIZE_HPT_PREPARE, &rhpt);
2900}
2901
2902int kvmppc_resize_hpt_commit(PowerPCCPU *cpu, target_ulong flags, int shift)
2903{
2904 CPUState *cs = CPU(cpu);
2905 struct kvm_ppc_resize_hpt rhpt = {
2906 .flags = flags,
2907 .shift = shift,
2908 };
2909
2910 if (!cap_resize_hpt) {
2911 return -ENOSYS;
2912 }
2913
2914 return kvm_vm_ioctl(cs->kvm_state, KVM_PPC_RESIZE_HPT_COMMIT, &rhpt);
2915}
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932bool kvmppc_pvr_workaround_required(PowerPCCPU *cpu)
2933{
2934 CPUState *cs = CPU(cpu);
2935
2936 if (!kvm_enabled()) {
2937 return false;
2938 }
2939
2940 if (cap_ppc_pvr_compat) {
2941 return false;
2942 }
2943
2944 return !kvmppc_is_pr(cs->kvm_state);
2945}
2946
2947void kvmppc_set_reg_ppc_online(PowerPCCPU *cpu, unsigned int online)
2948{
2949 CPUState *cs = CPU(cpu);
2950
2951 if (kvm_enabled()) {
2952 kvm_set_one_reg(cs, KVM_REG_PPC_ONLINE, &online);
2953 }
2954}
2955
2956void kvmppc_set_reg_tb_offset(PowerPCCPU *cpu, int64_t tb_offset)
2957{
2958 CPUState *cs = CPU(cpu);
2959
2960 if (kvm_enabled()) {
2961 kvm_set_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &tb_offset);
2962 }
2963}
2964
2965bool kvm_arch_cpu_check_are_resettable(void)
2966{
2967 return true;
2968}
2969