1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include "qemu/osdep.h"
18#include <dirent.h>
19#include <sys/ioctl.h>
20#include <sys/vfs.h>
21
22#include <linux/kvm.h>
23
24#include "qapi/error.h"
25#include "qemu/error-report.h"
26#include "cpu.h"
27#include "cpu-models.h"
28#include "qemu/timer.h"
29#include "sysemu/hw_accel.h"
30#include "kvm_ppc.h"
31#include "sysemu/cpus.h"
32#include "sysemu/device_tree.h"
33#include "mmu-hash64.h"
34
35#include "hw/ppc/spapr.h"
36#include "hw/ppc/spapr_cpu_core.h"
37#include "hw/hw.h"
38#include "hw/ppc/ppc.h"
39#include "migration/qemu-file-types.h"
40#include "sysemu/watchdog.h"
41#include "trace.h"
42#include "exec/gdbstub.h"
43#include "exec/memattrs.h"
44#include "exec/ram_addr.h"
45#include "sysemu/hostmem.h"
46#include "qemu/cutils.h"
47#include "qemu/main-loop.h"
48#include "qemu/mmap-alloc.h"
49#include "elf.h"
50#include "sysemu/kvm_int.h"
51
52#define PROC_DEVTREE_CPU "/proc/device-tree/cpus/"
53
54#define DEBUG_RETURN_GUEST 0
55#define DEBUG_RETURN_GDB 1
56
57const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
58 KVM_CAP_LAST_INFO
59};
60
61static int cap_interrupt_unset;
62static int cap_segstate;
63static int cap_booke_sregs;
64static int cap_ppc_smt;
65static int cap_ppc_smt_possible;
66static int cap_spapr_tce;
67static int cap_spapr_tce_64;
68static int cap_spapr_multitce;
69static int cap_spapr_vfio;
70static int cap_hior;
71static int cap_one_reg;
72static int cap_epr;
73static int cap_ppc_watchdog;
74static int cap_papr;
75static int cap_htab_fd;
76static int cap_fixup_hcalls;
77static int cap_htm;
78static int cap_mmu_radix;
79static int cap_mmu_hash_v3;
80static int cap_xive;
81static int cap_resize_hpt;
82static int cap_ppc_pvr_compat;
83static int cap_ppc_safe_cache;
84static int cap_ppc_safe_bounds_check;
85static int cap_ppc_safe_indirect_branch;
86static int cap_ppc_count_cache_flush_assist;
87static int cap_ppc_nested_kvm_hv;
88static int cap_large_decr;
89static int cap_fwnmi;
90static int cap_rpt_invalidate;
91static int cap_ail_mode_3;
92
93static uint32_t debug_inst_opcode;
94
95
96
97
98
99
100
101static bool kvmppc_is_pr(KVMState *ks)
102{
103
104 return kvm_vm_check_extension(ks, KVM_CAP_PPC_GET_PVINFO) != 0;
105}
106
107static int kvm_ppc_register_host_cpu_type(void);
108static void kvmppc_get_cpu_characteristics(KVMState *s);
109static int kvmppc_get_dec_bits(void);
110
111int kvm_arch_get_default_type(MachineState *ms)
112{
113 return 0;
114}
115
116int kvm_arch_init(MachineState *ms, KVMState *s)
117{
118 cap_interrupt_unset = kvm_check_extension(s, KVM_CAP_PPC_UNSET_IRQ);
119 cap_segstate = kvm_check_extension(s, KVM_CAP_PPC_SEGSTATE);
120 cap_booke_sregs = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_SREGS);
121 cap_ppc_smt_possible = kvm_vm_check_extension(s, KVM_CAP_PPC_SMT_POSSIBLE);
122 cap_spapr_tce = kvm_check_extension(s, KVM_CAP_SPAPR_TCE);
123 cap_spapr_tce_64 = kvm_check_extension(s, KVM_CAP_SPAPR_TCE_64);
124 cap_spapr_multitce = kvm_check_extension(s, KVM_CAP_SPAPR_MULTITCE);
125 cap_spapr_vfio = kvm_vm_check_extension(s, KVM_CAP_SPAPR_TCE_VFIO);
126 cap_one_reg = kvm_check_extension(s, KVM_CAP_ONE_REG);
127 cap_hior = kvm_check_extension(s, KVM_CAP_PPC_HIOR);
128 cap_epr = kvm_check_extension(s, KVM_CAP_PPC_EPR);
129 cap_ppc_watchdog = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_WATCHDOG);
130
131
132
133
134 cap_htab_fd = kvm_vm_check_extension(s, KVM_CAP_PPC_HTAB_FD);
135 cap_fixup_hcalls = kvm_check_extension(s, KVM_CAP_PPC_FIXUP_HCALL);
136 cap_ppc_smt = kvm_vm_check_extension(s, KVM_CAP_PPC_SMT);
137 cap_htm = kvm_vm_check_extension(s, KVM_CAP_PPC_HTM);
138 cap_mmu_radix = kvm_vm_check_extension(s, KVM_CAP_PPC_MMU_RADIX);
139 cap_mmu_hash_v3 = kvm_vm_check_extension(s, KVM_CAP_PPC_MMU_HASH_V3);
140 cap_xive = kvm_vm_check_extension(s, KVM_CAP_PPC_IRQ_XIVE);
141 cap_resize_hpt = kvm_vm_check_extension(s, KVM_CAP_SPAPR_RESIZE_HPT);
142 kvmppc_get_cpu_characteristics(s);
143 cap_ppc_nested_kvm_hv = kvm_vm_check_extension(s, KVM_CAP_PPC_NESTED_HV);
144 cap_large_decr = kvmppc_get_dec_bits();
145 cap_fwnmi = kvm_vm_check_extension(s, KVM_CAP_PPC_FWNMI);
146
147
148
149
150
151
152
153 cap_ppc_pvr_compat = false;
154
155 if (!kvm_check_extension(s, KVM_CAP_PPC_IRQ_LEVEL)) {
156 error_report("KVM: Host kernel doesn't have level irq capability");
157 exit(1);
158 }
159
160 cap_rpt_invalidate = kvm_vm_check_extension(s, KVM_CAP_PPC_RPT_INVALIDATE);
161 cap_ail_mode_3 = kvm_vm_check_extension(s, KVM_CAP_PPC_AIL_MODE_3);
162 kvm_ppc_register_host_cpu_type();
163
164 return 0;
165}
166
167int kvm_arch_irqchip_create(KVMState *s)
168{
169 return 0;
170}
171
172static int kvm_arch_sync_sregs(PowerPCCPU *cpu)
173{
174 CPUPPCState *cenv = &cpu->env;
175 CPUState *cs = CPU(cpu);
176 struct kvm_sregs sregs;
177 int ret;
178
179 if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
180
181
182
183
184
185
186
187 return 0;
188 } else {
189 if (!cap_segstate) {
190 fprintf(stderr, "kvm error: missing PVR setting capability\n");
191 return -ENOSYS;
192 }
193 }
194
195 ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
196 if (ret) {
197 return ret;
198 }
199
200 sregs.pvr = cenv->spr[SPR_PVR];
201 return kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
202}
203
204
205static int kvm_booke206_tlb_init(PowerPCCPU *cpu)
206{
207 CPUPPCState *env = &cpu->env;
208 CPUState *cs = CPU(cpu);
209 struct kvm_book3e_206_tlb_params params = {};
210 struct kvm_config_tlb cfg = {};
211 unsigned int entries = 0;
212 int ret, i;
213
214 if (!kvm_enabled() ||
215 !kvm_check_extension(cs->kvm_state, KVM_CAP_SW_TLB)) {
216 return 0;
217 }
218
219 assert(ARRAY_SIZE(params.tlb_sizes) == BOOKE206_MAX_TLBN);
220
221 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
222 params.tlb_sizes[i] = booke206_tlb_size(env, i);
223 params.tlb_ways[i] = booke206_tlb_ways(env, i);
224 entries += params.tlb_sizes[i];
225 }
226
227 assert(entries == env->nb_tlb);
228 assert(sizeof(struct kvm_book3e_206_tlb_entry) == sizeof(ppcmas_tlb_t));
229
230 env->tlb_dirty = true;
231
232 cfg.array = (uintptr_t)env->tlb.tlbm;
233 cfg.array_len = sizeof(ppcmas_tlb_t) * entries;
234 cfg.params = (uintptr_t)¶ms;
235 cfg.mmu_type = KVM_MMU_FSL_BOOKE_NOHV;
236
237 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_SW_TLB, 0, (uintptr_t)&cfg);
238 if (ret < 0) {
239 fprintf(stderr, "%s: couldn't enable KVM_CAP_SW_TLB: %s\n",
240 __func__, strerror(-ret));
241 return ret;
242 }
243
244 env->kvm_sw_tlb = true;
245 return 0;
246}
247
248
249#if defined(TARGET_PPC64)
250static void kvm_get_smmu_info(struct kvm_ppc_smmu_info *info, Error **errp)
251{
252 int ret;
253
254 assert(kvm_state != NULL);
255
256 if (!kvm_check_extension(kvm_state, KVM_CAP_PPC_GET_SMMU_INFO)) {
257 error_setg(errp, "KVM doesn't expose the MMU features it supports");
258 error_append_hint(errp, "Consider switching to a newer KVM\n");
259 return;
260 }
261
262 ret = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_SMMU_INFO, info);
263 if (ret == 0) {
264 return;
265 }
266
267 error_setg_errno(errp, -ret,
268 "KVM failed to provide the MMU features it supports");
269}
270
271struct ppc_radix_page_info *kvm_get_radix_page_info(void)
272{
273 KVMState *s = KVM_STATE(current_accel());
274 struct ppc_radix_page_info *radix_page_info;
275 struct kvm_ppc_rmmu_info rmmu_info = { };
276 int i;
277
278 if (!kvm_check_extension(s, KVM_CAP_PPC_MMU_RADIX)) {
279 return NULL;
280 }
281 if (kvm_vm_ioctl(s, KVM_PPC_GET_RMMU_INFO, &rmmu_info)) {
282 return NULL;
283 }
284 radix_page_info = g_malloc0(sizeof(*radix_page_info));
285 radix_page_info->count = 0;
286 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
287 if (rmmu_info.ap_encodings[i]) {
288 radix_page_info->entries[i] = rmmu_info.ap_encodings[i];
289 radix_page_info->count++;
290 }
291 }
292 return radix_page_info;
293}
294
295target_ulong kvmppc_configure_v3_mmu(PowerPCCPU *cpu,
296 bool radix, bool gtse,
297 uint64_t proc_tbl)
298{
299 CPUState *cs = CPU(cpu);
300 int ret;
301 uint64_t flags = 0;
302 struct kvm_ppc_mmuv3_cfg cfg = {
303 .process_table = proc_tbl,
304 };
305
306 if (radix) {
307 flags |= KVM_PPC_MMUV3_RADIX;
308 }
309 if (gtse) {
310 flags |= KVM_PPC_MMUV3_GTSE;
311 }
312 cfg.flags = flags;
313 ret = kvm_vm_ioctl(cs->kvm_state, KVM_PPC_CONFIGURE_V3_MMU, &cfg);
314 switch (ret) {
315 case 0:
316 return H_SUCCESS;
317 case -EINVAL:
318 return H_PARAMETER;
319 case -ENODEV:
320 return H_NOT_AVAILABLE;
321 default:
322 return H_HARDWARE;
323 }
324}
325
326bool kvmppc_hpt_needs_host_contiguous_pages(void)
327{
328 static struct kvm_ppc_smmu_info smmu_info;
329
330 if (!kvm_enabled()) {
331 return false;
332 }
333
334 kvm_get_smmu_info(&smmu_info, &error_fatal);
335 return !!(smmu_info.flags & KVM_PPC_PAGE_SIZES_REAL);
336}
337
338void kvm_check_mmu(PowerPCCPU *cpu, Error **errp)
339{
340 struct kvm_ppc_smmu_info smmu_info;
341 int iq, ik, jq, jk;
342 Error *local_err = NULL;
343
344
345 if (!cpu->hash64_opts || !kvm_enabled()) {
346 return;
347 }
348
349 kvm_get_smmu_info(&smmu_info, &local_err);
350 if (local_err) {
351 error_propagate(errp, local_err);
352 return;
353 }
354
355 if (ppc_hash64_has(cpu, PPC_HASH64_1TSEG)
356 && !(smmu_info.flags & KVM_PPC_1T_SEGMENTS)) {
357 error_setg(errp,
358 "KVM does not support 1TiB segments which guest expects");
359 return;
360 }
361
362 if (smmu_info.slb_size < cpu->hash64_opts->slb_size) {
363 error_setg(errp, "KVM only supports %u SLB entries, but guest needs %u",
364 smmu_info.slb_size, cpu->hash64_opts->slb_size);
365 return;
366 }
367
368
369
370
371
372 for (iq = 0; iq < ARRAY_SIZE(cpu->hash64_opts->sps); iq++) {
373 PPCHash64SegmentPageSizes *qsps = &cpu->hash64_opts->sps[iq];
374 struct kvm_ppc_one_seg_page_size *ksps;
375
376 for (ik = 0; ik < ARRAY_SIZE(smmu_info.sps); ik++) {
377 if (qsps->page_shift == smmu_info.sps[ik].page_shift) {
378 break;
379 }
380 }
381 if (ik >= ARRAY_SIZE(smmu_info.sps)) {
382 error_setg(errp, "KVM doesn't support for base page shift %u",
383 qsps->page_shift);
384 return;
385 }
386
387 ksps = &smmu_info.sps[ik];
388 if (ksps->slb_enc != qsps->slb_enc) {
389 error_setg(errp,
390"KVM uses SLB encoding 0x%x for page shift %u, but guest expects 0x%x",
391 ksps->slb_enc, ksps->page_shift, qsps->slb_enc);
392 return;
393 }
394
395 for (jq = 0; jq < ARRAY_SIZE(qsps->enc); jq++) {
396 for (jk = 0; jk < ARRAY_SIZE(ksps->enc); jk++) {
397 if (qsps->enc[jq].page_shift == ksps->enc[jk].page_shift) {
398 break;
399 }
400 }
401
402 if (jk >= ARRAY_SIZE(ksps->enc)) {
403 error_setg(errp, "KVM doesn't support page shift %u/%u",
404 qsps->enc[jq].page_shift, qsps->page_shift);
405 return;
406 }
407 if (qsps->enc[jq].pte_enc != ksps->enc[jk].pte_enc) {
408 error_setg(errp,
409"KVM uses PTE encoding 0x%x for page shift %u/%u, but guest expects 0x%x",
410 ksps->enc[jk].pte_enc, qsps->enc[jq].page_shift,
411 qsps->page_shift, qsps->enc[jq].pte_enc);
412 return;
413 }
414 }
415 }
416
417 if (ppc_hash64_has(cpu, PPC_HASH64_CI_LARGEPAGE)) {
418
419
420
421
422
423
424
425
426 if (qemu_real_host_page_size() < 0x10000) {
427 error_setg(errp,
428 "KVM can't supply 64kiB CI pages, which guest expects");
429 }
430 }
431}
432#endif
433
434unsigned long kvm_arch_vcpu_id(CPUState *cpu)
435{
436 return POWERPC_CPU(cpu)->vcpu_id;
437}
438
439
440
441
442
443#define MAX_HW_BKPTS 4
444
445static struct HWBreakpoint {
446 target_ulong addr;
447 int type;
448} hw_debug_points[MAX_HW_BKPTS];
449
450static CPUWatchpoint hw_watchpoint;
451
452
453static int max_hw_breakpoint;
454static int max_hw_watchpoint;
455static int nb_hw_breakpoint;
456static int nb_hw_watchpoint;
457
458static void kvmppc_hw_debug_points_init(CPUPPCState *cenv)
459{
460 if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
461 max_hw_breakpoint = 2;
462 max_hw_watchpoint = 2;
463 }
464
465 if ((max_hw_breakpoint + max_hw_watchpoint) > MAX_HW_BKPTS) {
466 fprintf(stderr, "Error initializing h/w breakpoints\n");
467 return;
468 }
469}
470
471int kvm_arch_init_vcpu(CPUState *cs)
472{
473 PowerPCCPU *cpu = POWERPC_CPU(cs);
474 CPUPPCState *cenv = &cpu->env;
475 int ret;
476
477
478 ret = kvm_arch_sync_sregs(cpu);
479 if (ret) {
480 if (ret == -EINVAL) {
481 error_report("Register sync failed... If you're using kvm-hv.ko,"
482 " only \"-cpu host\" is possible");
483 }
484 return ret;
485 }
486
487 switch (cenv->mmu_model) {
488 case POWERPC_MMU_BOOKE206:
489
490 ret = kvm_booke206_tlb_init(cpu);
491 break;
492 case POWERPC_MMU_2_07:
493 if (!cap_htm && !kvmppc_is_pr(cs->kvm_state)) {
494
495
496
497
498
499
500 if (qemu_getauxval(AT_HWCAP2) & PPC_FEATURE2_HAS_HTM) {
501 cap_htm = true;
502 }
503 }
504 break;
505 default:
506 break;
507 }
508
509 kvm_get_one_reg(cs, KVM_REG_PPC_DEBUG_INST, &debug_inst_opcode);
510 kvmppc_hw_debug_points_init(cenv);
511
512 return ret;
513}
514
515int kvm_arch_destroy_vcpu(CPUState *cs)
516{
517 return 0;
518}
519
520static void kvm_sw_tlb_put(PowerPCCPU *cpu)
521{
522 CPUPPCState *env = &cpu->env;
523 CPUState *cs = CPU(cpu);
524 struct kvm_dirty_tlb dirty_tlb;
525 unsigned char *bitmap;
526 int ret;
527
528 if (!env->kvm_sw_tlb) {
529 return;
530 }
531
532 bitmap = g_malloc((env->nb_tlb + 7) / 8);
533 memset(bitmap, 0xFF, (env->nb_tlb + 7) / 8);
534
535 dirty_tlb.bitmap = (uintptr_t)bitmap;
536 dirty_tlb.num_dirty = env->nb_tlb;
537
538 ret = kvm_vcpu_ioctl(cs, KVM_DIRTY_TLB, &dirty_tlb);
539 if (ret) {
540 fprintf(stderr, "%s: KVM_DIRTY_TLB: %s\n",
541 __func__, strerror(-ret));
542 }
543
544 g_free(bitmap);
545}
546
547static void kvm_get_one_spr(CPUState *cs, uint64_t id, int spr)
548{
549 PowerPCCPU *cpu = POWERPC_CPU(cs);
550 CPUPPCState *env = &cpu->env;
551
552 union {
553 uint32_t u32;
554 uint64_t u64;
555 } val = { };
556 struct kvm_one_reg reg = {
557 .id = id,
558 .addr = (uintptr_t) &val,
559 };
560 int ret;
561
562 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
563 if (ret != 0) {
564 trace_kvm_failed_spr_get(spr, strerror(errno));
565 } else {
566 switch (id & KVM_REG_SIZE_MASK) {
567 case KVM_REG_SIZE_U32:
568 env->spr[spr] = val.u32;
569 break;
570
571 case KVM_REG_SIZE_U64:
572 env->spr[spr] = val.u64;
573 break;
574
575 default:
576
577 abort();
578 }
579 }
580}
581
582static void kvm_put_one_spr(CPUState *cs, uint64_t id, int spr)
583{
584 PowerPCCPU *cpu = POWERPC_CPU(cs);
585 CPUPPCState *env = &cpu->env;
586 union {
587 uint32_t u32;
588 uint64_t u64;
589 } val;
590 struct kvm_one_reg reg = {
591 .id = id,
592 .addr = (uintptr_t) &val,
593 };
594 int ret;
595
596 switch (id & KVM_REG_SIZE_MASK) {
597 case KVM_REG_SIZE_U32:
598 val.u32 = env->spr[spr];
599 break;
600
601 case KVM_REG_SIZE_U64:
602 val.u64 = env->spr[spr];
603 break;
604
605 default:
606
607 abort();
608 }
609
610 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
611 if (ret != 0) {
612 trace_kvm_failed_spr_set(spr, strerror(errno));
613 }
614}
615
616static int kvm_put_fp(CPUState *cs)
617{
618 PowerPCCPU *cpu = POWERPC_CPU(cs);
619 CPUPPCState *env = &cpu->env;
620 struct kvm_one_reg reg;
621 int i;
622 int ret;
623
624 if (env->insns_flags & PPC_FLOAT) {
625 uint64_t fpscr = env->fpscr;
626 bool vsx = !!(env->insns_flags2 & PPC2_VSX);
627
628 reg.id = KVM_REG_PPC_FPSCR;
629 reg.addr = (uintptr_t)&fpscr;
630 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
631 if (ret < 0) {
632 trace_kvm_failed_fpscr_set(strerror(errno));
633 return ret;
634 }
635
636 for (i = 0; i < 32; i++) {
637 uint64_t vsr[2];
638 uint64_t *fpr = cpu_fpr_ptr(&cpu->env, i);
639 uint64_t *vsrl = cpu_vsrl_ptr(&cpu->env, i);
640
641#if HOST_BIG_ENDIAN
642 vsr[0] = float64_val(*fpr);
643 vsr[1] = *vsrl;
644#else
645 vsr[0] = *vsrl;
646 vsr[1] = float64_val(*fpr);
647#endif
648 reg.addr = (uintptr_t) &vsr;
649 reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
650
651 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
652 if (ret < 0) {
653 trace_kvm_failed_fp_set(vsx ? "VSR" : "FPR", i,
654 strerror(errno));
655 return ret;
656 }
657 }
658 }
659
660 if (env->insns_flags & PPC_ALTIVEC) {
661 reg.id = KVM_REG_PPC_VSCR;
662 reg.addr = (uintptr_t)&env->vscr;
663 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
664 if (ret < 0) {
665 trace_kvm_failed_vscr_set(strerror(errno));
666 return ret;
667 }
668
669 for (i = 0; i < 32; i++) {
670 reg.id = KVM_REG_PPC_VR(i);
671 reg.addr = (uintptr_t)cpu_avr_ptr(env, i);
672 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
673 if (ret < 0) {
674 trace_kvm_failed_vr_set(i, strerror(errno));
675 return ret;
676 }
677 }
678 }
679
680 return 0;
681}
682
683static int kvm_get_fp(CPUState *cs)
684{
685 PowerPCCPU *cpu = POWERPC_CPU(cs);
686 CPUPPCState *env = &cpu->env;
687 struct kvm_one_reg reg;
688 int i;
689 int ret;
690
691 if (env->insns_flags & PPC_FLOAT) {
692 uint64_t fpscr;
693 bool vsx = !!(env->insns_flags2 & PPC2_VSX);
694
695 reg.id = KVM_REG_PPC_FPSCR;
696 reg.addr = (uintptr_t)&fpscr;
697 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
698 if (ret < 0) {
699 trace_kvm_failed_fpscr_get(strerror(errno));
700 return ret;
701 } else {
702 env->fpscr = fpscr;
703 }
704
705 for (i = 0; i < 32; i++) {
706 uint64_t vsr[2];
707 uint64_t *fpr = cpu_fpr_ptr(&cpu->env, i);
708 uint64_t *vsrl = cpu_vsrl_ptr(&cpu->env, i);
709
710 reg.addr = (uintptr_t) &vsr;
711 reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
712
713 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
714 if (ret < 0) {
715 trace_kvm_failed_fp_get(vsx ? "VSR" : "FPR", i,
716 strerror(errno));
717 return ret;
718 } else {
719#if HOST_BIG_ENDIAN
720 *fpr = vsr[0];
721 if (vsx) {
722 *vsrl = vsr[1];
723 }
724#else
725 *fpr = vsr[1];
726 if (vsx) {
727 *vsrl = vsr[0];
728 }
729#endif
730 }
731 }
732 }
733
734 if (env->insns_flags & PPC_ALTIVEC) {
735 reg.id = KVM_REG_PPC_VSCR;
736 reg.addr = (uintptr_t)&env->vscr;
737 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
738 if (ret < 0) {
739 trace_kvm_failed_vscr_get(strerror(errno));
740 return ret;
741 }
742
743 for (i = 0; i < 32; i++) {
744 reg.id = KVM_REG_PPC_VR(i);
745 reg.addr = (uintptr_t)cpu_avr_ptr(env, i);
746 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
747 if (ret < 0) {
748 trace_kvm_failed_vr_get(i, strerror(errno));
749 return ret;
750 }
751 }
752 }
753
754 return 0;
755}
756
757#if defined(TARGET_PPC64)
758static int kvm_get_vpa(CPUState *cs)
759{
760 PowerPCCPU *cpu = POWERPC_CPU(cs);
761 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
762 struct kvm_one_reg reg;
763 int ret;
764
765 reg.id = KVM_REG_PPC_VPA_ADDR;
766 reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
767 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
768 if (ret < 0) {
769 trace_kvm_failed_vpa_addr_get(strerror(errno));
770 return ret;
771 }
772
773 assert((uintptr_t)&spapr_cpu->slb_shadow_size
774 == ((uintptr_t)&spapr_cpu->slb_shadow_addr + 8));
775 reg.id = KVM_REG_PPC_VPA_SLB;
776 reg.addr = (uintptr_t)&spapr_cpu->slb_shadow_addr;
777 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
778 if (ret < 0) {
779 trace_kvm_failed_slb_get(strerror(errno));
780 return ret;
781 }
782
783 assert((uintptr_t)&spapr_cpu->dtl_size
784 == ((uintptr_t)&spapr_cpu->dtl_addr + 8));
785 reg.id = KVM_REG_PPC_VPA_DTL;
786 reg.addr = (uintptr_t)&spapr_cpu->dtl_addr;
787 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
788 if (ret < 0) {
789 trace_kvm_failed_dtl_get(strerror(errno));
790 return ret;
791 }
792
793 return 0;
794}
795
796static int kvm_put_vpa(CPUState *cs)
797{
798 PowerPCCPU *cpu = POWERPC_CPU(cs);
799 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
800 struct kvm_one_reg reg;
801 int ret;
802
803
804
805
806
807
808
809 assert(spapr_cpu->vpa_addr
810 || !(spapr_cpu->slb_shadow_addr || spapr_cpu->dtl_addr));
811
812 if (spapr_cpu->vpa_addr) {
813 reg.id = KVM_REG_PPC_VPA_ADDR;
814 reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
815 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
816 if (ret < 0) {
817 trace_kvm_failed_vpa_addr_set(strerror(errno));
818 return ret;
819 }
820 }
821
822 assert((uintptr_t)&spapr_cpu->slb_shadow_size
823 == ((uintptr_t)&spapr_cpu->slb_shadow_addr + 8));
824 reg.id = KVM_REG_PPC_VPA_SLB;
825 reg.addr = (uintptr_t)&spapr_cpu->slb_shadow_addr;
826 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
827 if (ret < 0) {
828 trace_kvm_failed_slb_set(strerror(errno));
829 return ret;
830 }
831
832 assert((uintptr_t)&spapr_cpu->dtl_size
833 == ((uintptr_t)&spapr_cpu->dtl_addr + 8));
834 reg.id = KVM_REG_PPC_VPA_DTL;
835 reg.addr = (uintptr_t)&spapr_cpu->dtl_addr;
836 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
837 if (ret < 0) {
838 trace_kvm_failed_dtl_set(strerror(errno));
839 return ret;
840 }
841
842 if (!spapr_cpu->vpa_addr) {
843 reg.id = KVM_REG_PPC_VPA_ADDR;
844 reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
845 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
846 if (ret < 0) {
847 trace_kvm_failed_null_vpa_addr_set(strerror(errno));
848 return ret;
849 }
850 }
851
852 return 0;
853}
854#endif
855
856int kvmppc_put_books_sregs(PowerPCCPU *cpu)
857{
858 CPUPPCState *env = &cpu->env;
859 struct kvm_sregs sregs = { };
860 int i;
861
862 sregs.pvr = env->spr[SPR_PVR];
863
864 if (cpu->vhyp) {
865 PPCVirtualHypervisorClass *vhc =
866 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
867 sregs.u.s.sdr1 = vhc->encode_hpt_for_kvm_pr(cpu->vhyp);
868 } else {
869 sregs.u.s.sdr1 = env->spr[SPR_SDR1];
870 }
871
872
873#ifdef TARGET_PPC64
874 for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
875 sregs.u.s.ppc64.slb[i].slbe = env->slb[i].esid;
876 if (env->slb[i].esid & SLB_ESID_V) {
877 sregs.u.s.ppc64.slb[i].slbe |= i;
878 }
879 sregs.u.s.ppc64.slb[i].slbv = env->slb[i].vsid;
880 }
881#endif
882
883
884 for (i = 0; i < 16; i++) {
885 sregs.u.s.ppc32.sr[i] = env->sr[i];
886 }
887
888
889 for (i = 0; i < 8; i++) {
890
891 sregs.u.s.ppc32.dbat[i] = ((uint64_t)env->DBAT[0][i] << 32)
892 | env->DBAT[1][i];
893 sregs.u.s.ppc32.ibat[i] = ((uint64_t)env->IBAT[0][i] << 32)
894 | env->IBAT[1][i];
895 }
896
897 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
898}
899
900int kvm_arch_put_registers(CPUState *cs, int level)
901{
902 PowerPCCPU *cpu = POWERPC_CPU(cs);
903 CPUPPCState *env = &cpu->env;
904 struct kvm_regs regs;
905 int ret;
906 int i;
907
908 ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s);
909 if (ret < 0) {
910 return ret;
911 }
912
913 regs.ctr = env->ctr;
914 regs.lr = env->lr;
915 regs.xer = cpu_read_xer(env);
916 regs.msr = env->msr;
917 regs.pc = env->nip;
918
919 regs.srr0 = env->spr[SPR_SRR0];
920 regs.srr1 = env->spr[SPR_SRR1];
921
922 regs.sprg0 = env->spr[SPR_SPRG0];
923 regs.sprg1 = env->spr[SPR_SPRG1];
924 regs.sprg2 = env->spr[SPR_SPRG2];
925 regs.sprg3 = env->spr[SPR_SPRG3];
926 regs.sprg4 = env->spr[SPR_SPRG4];
927 regs.sprg5 = env->spr[SPR_SPRG5];
928 regs.sprg6 = env->spr[SPR_SPRG6];
929 regs.sprg7 = env->spr[SPR_SPRG7];
930
931 regs.pid = env->spr[SPR_BOOKE_PID];
932
933 for (i = 0; i < 32; i++) {
934 regs.gpr[i] = env->gpr[i];
935 }
936
937 regs.cr = ppc_get_cr(env);
938
939 ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s);
940 if (ret < 0) {
941 return ret;
942 }
943
944 kvm_put_fp(cs);
945
946 if (env->tlb_dirty) {
947 kvm_sw_tlb_put(cpu);
948 env->tlb_dirty = false;
949 }
950
951 if (cap_segstate && (level >= KVM_PUT_RESET_STATE)) {
952 ret = kvmppc_put_books_sregs(cpu);
953 if (ret < 0) {
954 return ret;
955 }
956 }
957
958 if (cap_hior && (level >= KVM_PUT_RESET_STATE)) {
959 kvm_put_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
960 }
961
962 if (cap_one_reg) {
963 int i;
964
965
966
967
968
969
970
971 for (i = 0; i < 1024; i++) {
972 uint64_t id = env->spr_cb[i].one_reg_id;
973
974 if (id != 0) {
975 kvm_put_one_spr(cs, id, i);
976 }
977 }
978
979#ifdef TARGET_PPC64
980 if (FIELD_EX64(env->msr, MSR, TS)) {
981 for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
982 kvm_set_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
983 }
984 for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
985 kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
986 }
987 kvm_set_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
988 kvm_set_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
989 kvm_set_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
990 kvm_set_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
991 kvm_set_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
992 kvm_set_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
993 kvm_set_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
994 kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
995 kvm_set_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
996 kvm_set_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
997 }
998
999 if (cap_papr) {
1000 if (kvm_put_vpa(cs) < 0) {
1001 trace_kvm_failed_put_vpa();
1002 }
1003 }
1004
1005 kvm_set_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
1006
1007 if (level > KVM_PUT_RUNTIME_STATE) {
1008 kvm_put_one_spr(cs, KVM_REG_PPC_DPDES, SPR_DPDES);
1009 }
1010#endif
1011 }
1012
1013 return ret;
1014}
1015
1016static void kvm_sync_excp(CPUPPCState *env, int vector, int ivor)
1017{
1018 env->excp_vectors[vector] = env->spr[ivor] + env->spr[SPR_BOOKE_IVPR];
1019}
1020
1021static int kvmppc_get_booke_sregs(PowerPCCPU *cpu)
1022{
1023 CPUPPCState *env = &cpu->env;
1024 struct kvm_sregs sregs;
1025 int ret;
1026
1027 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
1028 if (ret < 0) {
1029 return ret;
1030 }
1031
1032 if (sregs.u.e.features & KVM_SREGS_E_BASE) {
1033 env->spr[SPR_BOOKE_CSRR0] = sregs.u.e.csrr0;
1034 env->spr[SPR_BOOKE_CSRR1] = sregs.u.e.csrr1;
1035 env->spr[SPR_BOOKE_ESR] = sregs.u.e.esr;
1036 env->spr[SPR_BOOKE_DEAR] = sregs.u.e.dear;
1037 env->spr[SPR_BOOKE_MCSR] = sregs.u.e.mcsr;
1038 env->spr[SPR_BOOKE_TSR] = sregs.u.e.tsr;
1039 env->spr[SPR_BOOKE_TCR] = sregs.u.e.tcr;
1040 env->spr[SPR_DECR] = sregs.u.e.dec;
1041 env->spr[SPR_TBL] = sregs.u.e.tb & 0xffffffff;
1042 env->spr[SPR_TBU] = sregs.u.e.tb >> 32;
1043 env->spr[SPR_VRSAVE] = sregs.u.e.vrsave;
1044 }
1045
1046 if (sregs.u.e.features & KVM_SREGS_E_ARCH206) {
1047 env->spr[SPR_BOOKE_PIR] = sregs.u.e.pir;
1048 env->spr[SPR_BOOKE_MCSRR0] = sregs.u.e.mcsrr0;
1049 env->spr[SPR_BOOKE_MCSRR1] = sregs.u.e.mcsrr1;
1050 env->spr[SPR_BOOKE_DECAR] = sregs.u.e.decar;
1051 env->spr[SPR_BOOKE_IVPR] = sregs.u.e.ivpr;
1052 }
1053
1054 if (sregs.u.e.features & KVM_SREGS_E_64) {
1055 env->spr[SPR_BOOKE_EPCR] = sregs.u.e.epcr;
1056 }
1057
1058 if (sregs.u.e.features & KVM_SREGS_E_SPRG8) {
1059 env->spr[SPR_BOOKE_SPRG8] = sregs.u.e.sprg8;
1060 }
1061
1062 if (sregs.u.e.features & KVM_SREGS_E_IVOR) {
1063 env->spr[SPR_BOOKE_IVOR0] = sregs.u.e.ivor_low[0];
1064 kvm_sync_excp(env, POWERPC_EXCP_CRITICAL, SPR_BOOKE_IVOR0);
1065 env->spr[SPR_BOOKE_IVOR1] = sregs.u.e.ivor_low[1];
1066 kvm_sync_excp(env, POWERPC_EXCP_MCHECK, SPR_BOOKE_IVOR1);
1067 env->spr[SPR_BOOKE_IVOR2] = sregs.u.e.ivor_low[2];
1068 kvm_sync_excp(env, POWERPC_EXCP_DSI, SPR_BOOKE_IVOR2);
1069 env->spr[SPR_BOOKE_IVOR3] = sregs.u.e.ivor_low[3];
1070 kvm_sync_excp(env, POWERPC_EXCP_ISI, SPR_BOOKE_IVOR3);
1071 env->spr[SPR_BOOKE_IVOR4] = sregs.u.e.ivor_low[4];
1072 kvm_sync_excp(env, POWERPC_EXCP_EXTERNAL, SPR_BOOKE_IVOR4);
1073 env->spr[SPR_BOOKE_IVOR5] = sregs.u.e.ivor_low[5];
1074 kvm_sync_excp(env, POWERPC_EXCP_ALIGN, SPR_BOOKE_IVOR5);
1075 env->spr[SPR_BOOKE_IVOR6] = sregs.u.e.ivor_low[6];
1076 kvm_sync_excp(env, POWERPC_EXCP_PROGRAM, SPR_BOOKE_IVOR6);
1077 env->spr[SPR_BOOKE_IVOR7] = sregs.u.e.ivor_low[7];
1078 kvm_sync_excp(env, POWERPC_EXCP_FPU, SPR_BOOKE_IVOR7);
1079 env->spr[SPR_BOOKE_IVOR8] = sregs.u.e.ivor_low[8];
1080 kvm_sync_excp(env, POWERPC_EXCP_SYSCALL, SPR_BOOKE_IVOR8);
1081 env->spr[SPR_BOOKE_IVOR9] = sregs.u.e.ivor_low[9];
1082 kvm_sync_excp(env, POWERPC_EXCP_APU, SPR_BOOKE_IVOR9);
1083 env->spr[SPR_BOOKE_IVOR10] = sregs.u.e.ivor_low[10];
1084 kvm_sync_excp(env, POWERPC_EXCP_DECR, SPR_BOOKE_IVOR10);
1085 env->spr[SPR_BOOKE_IVOR11] = sregs.u.e.ivor_low[11];
1086 kvm_sync_excp(env, POWERPC_EXCP_FIT, SPR_BOOKE_IVOR11);
1087 env->spr[SPR_BOOKE_IVOR12] = sregs.u.e.ivor_low[12];
1088 kvm_sync_excp(env, POWERPC_EXCP_WDT, SPR_BOOKE_IVOR12);
1089 env->spr[SPR_BOOKE_IVOR13] = sregs.u.e.ivor_low[13];
1090 kvm_sync_excp(env, POWERPC_EXCP_DTLB, SPR_BOOKE_IVOR13);
1091 env->spr[SPR_BOOKE_IVOR14] = sregs.u.e.ivor_low[14];
1092 kvm_sync_excp(env, POWERPC_EXCP_ITLB, SPR_BOOKE_IVOR14);
1093 env->spr[SPR_BOOKE_IVOR15] = sregs.u.e.ivor_low[15];
1094 kvm_sync_excp(env, POWERPC_EXCP_DEBUG, SPR_BOOKE_IVOR15);
1095
1096 if (sregs.u.e.features & KVM_SREGS_E_SPE) {
1097 env->spr[SPR_BOOKE_IVOR32] = sregs.u.e.ivor_high[0];
1098 kvm_sync_excp(env, POWERPC_EXCP_SPEU, SPR_BOOKE_IVOR32);
1099 env->spr[SPR_BOOKE_IVOR33] = sregs.u.e.ivor_high[1];
1100 kvm_sync_excp(env, POWERPC_EXCP_EFPDI, SPR_BOOKE_IVOR33);
1101 env->spr[SPR_BOOKE_IVOR34] = sregs.u.e.ivor_high[2];
1102 kvm_sync_excp(env, POWERPC_EXCP_EFPRI, SPR_BOOKE_IVOR34);
1103 }
1104
1105 if (sregs.u.e.features & KVM_SREGS_E_PM) {
1106 env->spr[SPR_BOOKE_IVOR35] = sregs.u.e.ivor_high[3];
1107 kvm_sync_excp(env, POWERPC_EXCP_EPERFM, SPR_BOOKE_IVOR35);
1108 }
1109
1110 if (sregs.u.e.features & KVM_SREGS_E_PC) {
1111 env->spr[SPR_BOOKE_IVOR36] = sregs.u.e.ivor_high[4];
1112 kvm_sync_excp(env, POWERPC_EXCP_DOORI, SPR_BOOKE_IVOR36);
1113 env->spr[SPR_BOOKE_IVOR37] = sregs.u.e.ivor_high[5];
1114 kvm_sync_excp(env, POWERPC_EXCP_DOORCI, SPR_BOOKE_IVOR37);
1115 }
1116 }
1117
1118 if (sregs.u.e.features & KVM_SREGS_E_ARCH206_MMU) {
1119 env->spr[SPR_BOOKE_MAS0] = sregs.u.e.mas0;
1120 env->spr[SPR_BOOKE_MAS1] = sregs.u.e.mas1;
1121 env->spr[SPR_BOOKE_MAS2] = sregs.u.e.mas2;
1122 env->spr[SPR_BOOKE_MAS3] = sregs.u.e.mas7_3 & 0xffffffff;
1123 env->spr[SPR_BOOKE_MAS4] = sregs.u.e.mas4;
1124 env->spr[SPR_BOOKE_MAS6] = sregs.u.e.mas6;
1125 env->spr[SPR_BOOKE_MAS7] = sregs.u.e.mas7_3 >> 32;
1126 env->spr[SPR_MMUCFG] = sregs.u.e.mmucfg;
1127 env->spr[SPR_BOOKE_TLB0CFG] = sregs.u.e.tlbcfg[0];
1128 env->spr[SPR_BOOKE_TLB1CFG] = sregs.u.e.tlbcfg[1];
1129 }
1130
1131 if (sregs.u.e.features & KVM_SREGS_EXP) {
1132 env->spr[SPR_BOOKE_EPR] = sregs.u.e.epr;
1133 }
1134
1135 if (sregs.u.e.features & KVM_SREGS_E_PD) {
1136 env->spr[SPR_BOOKE_EPLC] = sregs.u.e.eplc;
1137 env->spr[SPR_BOOKE_EPSC] = sregs.u.e.epsc;
1138 }
1139
1140 if (sregs.u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
1141 env->spr[SPR_E500_SVR] = sregs.u.e.impl.fsl.svr;
1142 env->spr[SPR_Exxx_MCAR] = sregs.u.e.impl.fsl.mcar;
1143 env->spr[SPR_HID0] = sregs.u.e.impl.fsl.hid0;
1144
1145 if (sregs.u.e.impl.fsl.features & KVM_SREGS_E_FSL_PIDn) {
1146 env->spr[SPR_BOOKE_PID1] = sregs.u.e.impl.fsl.pid1;
1147 env->spr[SPR_BOOKE_PID2] = sregs.u.e.impl.fsl.pid2;
1148 }
1149 }
1150
1151 return 0;
1152}
1153
1154static int kvmppc_get_books_sregs(PowerPCCPU *cpu)
1155{
1156 CPUPPCState *env = &cpu->env;
1157 struct kvm_sregs sregs;
1158 int ret;
1159 int i;
1160
1161 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
1162 if (ret < 0) {
1163 return ret;
1164 }
1165
1166 if (!cpu->vhyp) {
1167 ppc_store_sdr1(env, sregs.u.s.sdr1);
1168 }
1169
1170
1171#ifdef TARGET_PPC64
1172
1173
1174
1175
1176
1177
1178 memset(env->slb, 0, sizeof(env->slb));
1179 for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
1180 target_ulong rb = sregs.u.s.ppc64.slb[i].slbe;
1181 target_ulong rs = sregs.u.s.ppc64.slb[i].slbv;
1182
1183
1184
1185 if (rb & SLB_ESID_V) {
1186 ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs);
1187 }
1188 }
1189#endif
1190
1191
1192 for (i = 0; i < 16; i++) {
1193 env->sr[i] = sregs.u.s.ppc32.sr[i];
1194 }
1195
1196
1197 for (i = 0; i < 8; i++) {
1198 env->DBAT[0][i] = sregs.u.s.ppc32.dbat[i] & 0xffffffff;
1199 env->DBAT[1][i] = sregs.u.s.ppc32.dbat[i] >> 32;
1200 env->IBAT[0][i] = sregs.u.s.ppc32.ibat[i] & 0xffffffff;
1201 env->IBAT[1][i] = sregs.u.s.ppc32.ibat[i] >> 32;
1202 }
1203
1204 return 0;
1205}
1206
1207int kvm_arch_get_registers(CPUState *cs)
1208{
1209 PowerPCCPU *cpu = POWERPC_CPU(cs);
1210 CPUPPCState *env = &cpu->env;
1211 struct kvm_regs regs;
1212 int i, ret;
1213
1214 ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s);
1215 if (ret < 0) {
1216 return ret;
1217 }
1218
1219 ppc_set_cr(env, regs.cr);
1220 env->ctr = regs.ctr;
1221 env->lr = regs.lr;
1222 cpu_write_xer(env, regs.xer);
1223 env->msr = regs.msr;
1224 env->nip = regs.pc;
1225
1226 env->spr[SPR_SRR0] = regs.srr0;
1227 env->spr[SPR_SRR1] = regs.srr1;
1228
1229 env->spr[SPR_SPRG0] = regs.sprg0;
1230 env->spr[SPR_SPRG1] = regs.sprg1;
1231 env->spr[SPR_SPRG2] = regs.sprg2;
1232 env->spr[SPR_SPRG3] = regs.sprg3;
1233 env->spr[SPR_SPRG4] = regs.sprg4;
1234 env->spr[SPR_SPRG5] = regs.sprg5;
1235 env->spr[SPR_SPRG6] = regs.sprg6;
1236 env->spr[SPR_SPRG7] = regs.sprg7;
1237
1238 env->spr[SPR_BOOKE_PID] = regs.pid;
1239
1240 for (i = 0; i < 32; i++) {
1241 env->gpr[i] = regs.gpr[i];
1242 }
1243
1244 kvm_get_fp(cs);
1245
1246 if (cap_booke_sregs) {
1247 ret = kvmppc_get_booke_sregs(cpu);
1248 if (ret < 0) {
1249 return ret;
1250 }
1251 }
1252
1253 if (cap_segstate) {
1254 ret = kvmppc_get_books_sregs(cpu);
1255 if (ret < 0) {
1256 return ret;
1257 }
1258 }
1259
1260 if (cap_hior) {
1261 kvm_get_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
1262 }
1263
1264 if (cap_one_reg) {
1265 int i;
1266
1267
1268
1269
1270
1271
1272
1273 for (i = 0; i < 1024; i++) {
1274 uint64_t id = env->spr_cb[i].one_reg_id;
1275
1276 if (id != 0) {
1277 kvm_get_one_spr(cs, id, i);
1278 }
1279 }
1280
1281#ifdef TARGET_PPC64
1282 if (FIELD_EX64(env->msr, MSR, TS)) {
1283 for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
1284 kvm_get_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
1285 }
1286 for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
1287 kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
1288 }
1289 kvm_get_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
1290 kvm_get_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
1291 kvm_get_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
1292 kvm_get_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
1293 kvm_get_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
1294 kvm_get_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
1295 kvm_get_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
1296 kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
1297 kvm_get_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
1298 kvm_get_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
1299 }
1300
1301 if (cap_papr) {
1302 if (kvm_get_vpa(cs) < 0) {
1303 trace_kvm_failed_get_vpa();
1304 }
1305 }
1306
1307 kvm_get_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
1308 kvm_get_one_spr(cs, KVM_REG_PPC_DPDES, SPR_DPDES);
1309#endif
1310 }
1311
1312 return 0;
1313}
1314
1315int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level)
1316{
1317 unsigned virq = level ? KVM_INTERRUPT_SET_LEVEL : KVM_INTERRUPT_UNSET;
1318
1319 if (irq != PPC_INTERRUPT_EXT) {
1320 return 0;
1321 }
1322
1323 if (!kvm_enabled() || !cap_interrupt_unset) {
1324 return 0;
1325 }
1326
1327 kvm_vcpu_ioctl(CPU(cpu), KVM_INTERRUPT, &virq);
1328
1329 return 0;
1330}
1331
1332void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
1333{
1334 return;
1335}
1336
1337MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
1338{
1339 return MEMTXATTRS_UNSPECIFIED;
1340}
1341
1342int kvm_arch_process_async_events(CPUState *cs)
1343{
1344 return cs->halted;
1345}
1346
1347static int kvmppc_handle_halt(PowerPCCPU *cpu)
1348{
1349 CPUState *cs = CPU(cpu);
1350 CPUPPCState *env = &cpu->env;
1351
1352 if (!(cs->interrupt_request & CPU_INTERRUPT_HARD) &&
1353 FIELD_EX64(env->msr, MSR, EE)) {
1354 cs->halted = 1;
1355 cs->exception_index = EXCP_HLT;
1356 }
1357
1358 return 0;
1359}
1360
1361
1362static int kvmppc_handle_dcr_read(CPUPPCState *env,
1363 uint32_t dcrn, uint32_t *data)
1364{
1365 if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0) {
1366 fprintf(stderr, "Read to unhandled DCR (0x%x)\n", dcrn);
1367 }
1368
1369 return 0;
1370}
1371
1372static int kvmppc_handle_dcr_write(CPUPPCState *env,
1373 uint32_t dcrn, uint32_t data)
1374{
1375 if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0) {
1376 fprintf(stderr, "Write to unhandled DCR (0x%x)\n", dcrn);
1377 }
1378
1379 return 0;
1380}
1381
1382int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
1383{
1384
1385 uint32_t sc = debug_inst_opcode;
1386
1387 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
1388 sizeof(sc), 0) ||
1389 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 1)) {
1390 return -EINVAL;
1391 }
1392
1393 return 0;
1394}
1395
1396int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
1397{
1398 uint32_t sc;
1399
1400 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 0) ||
1401 sc != debug_inst_opcode ||
1402 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
1403 sizeof(sc), 1)) {
1404 return -EINVAL;
1405 }
1406
1407 return 0;
1408}
1409
1410static int find_hw_breakpoint(target_ulong addr, int type)
1411{
1412 int n;
1413
1414 assert((nb_hw_breakpoint + nb_hw_watchpoint)
1415 <= ARRAY_SIZE(hw_debug_points));
1416
1417 for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
1418 if (hw_debug_points[n].addr == addr &&
1419 hw_debug_points[n].type == type) {
1420 return n;
1421 }
1422 }
1423
1424 return -1;
1425}
1426
1427static int find_hw_watchpoint(target_ulong addr, int *flag)
1428{
1429 int n;
1430
1431 n = find_hw_breakpoint(addr, GDB_WATCHPOINT_ACCESS);
1432 if (n >= 0) {
1433 *flag = BP_MEM_ACCESS;
1434 return n;
1435 }
1436
1437 n = find_hw_breakpoint(addr, GDB_WATCHPOINT_WRITE);
1438 if (n >= 0) {
1439 *flag = BP_MEM_WRITE;
1440 return n;
1441 }
1442
1443 n = find_hw_breakpoint(addr, GDB_WATCHPOINT_READ);
1444 if (n >= 0) {
1445 *flag = BP_MEM_READ;
1446 return n;
1447 }
1448
1449 return -1;
1450}
1451
1452int kvm_arch_insert_hw_breakpoint(target_ulong addr,
1453 target_ulong len, int type)
1454{
1455 if ((nb_hw_breakpoint + nb_hw_watchpoint) >= ARRAY_SIZE(hw_debug_points)) {
1456 return -ENOBUFS;
1457 }
1458
1459 hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].addr = addr;
1460 hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].type = type;
1461
1462 switch (type) {
1463 case GDB_BREAKPOINT_HW:
1464 if (nb_hw_breakpoint >= max_hw_breakpoint) {
1465 return -ENOBUFS;
1466 }
1467
1468 if (find_hw_breakpoint(addr, type) >= 0) {
1469 return -EEXIST;
1470 }
1471
1472 nb_hw_breakpoint++;
1473 break;
1474
1475 case GDB_WATCHPOINT_WRITE:
1476 case GDB_WATCHPOINT_READ:
1477 case GDB_WATCHPOINT_ACCESS:
1478 if (nb_hw_watchpoint >= max_hw_watchpoint) {
1479 return -ENOBUFS;
1480 }
1481
1482 if (find_hw_breakpoint(addr, type) >= 0) {
1483 return -EEXIST;
1484 }
1485
1486 nb_hw_watchpoint++;
1487 break;
1488
1489 default:
1490 return -ENOSYS;
1491 }
1492
1493 return 0;
1494}
1495
1496int kvm_arch_remove_hw_breakpoint(target_ulong addr,
1497 target_ulong len, int type)
1498{
1499 int n;
1500
1501 n = find_hw_breakpoint(addr, type);
1502 if (n < 0) {
1503 return -ENOENT;
1504 }
1505
1506 switch (type) {
1507 case GDB_BREAKPOINT_HW:
1508 nb_hw_breakpoint--;
1509 break;
1510
1511 case GDB_WATCHPOINT_WRITE:
1512 case GDB_WATCHPOINT_READ:
1513 case GDB_WATCHPOINT_ACCESS:
1514 nb_hw_watchpoint--;
1515 break;
1516
1517 default:
1518 return -ENOSYS;
1519 }
1520 hw_debug_points[n] = hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint];
1521
1522 return 0;
1523}
1524
1525void kvm_arch_remove_all_hw_breakpoints(void)
1526{
1527 nb_hw_breakpoint = nb_hw_watchpoint = 0;
1528}
1529
1530void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
1531{
1532 int n;
1533
1534
1535 if (kvm_sw_breakpoints_active(cs)) {
1536 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
1537 }
1538
1539 assert((nb_hw_breakpoint + nb_hw_watchpoint)
1540 <= ARRAY_SIZE(hw_debug_points));
1541 assert((nb_hw_breakpoint + nb_hw_watchpoint) <= ARRAY_SIZE(dbg->arch.bp));
1542
1543 if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
1544 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
1545 memset(dbg->arch.bp, 0, sizeof(dbg->arch.bp));
1546 for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
1547 switch (hw_debug_points[n].type) {
1548 case GDB_BREAKPOINT_HW:
1549 dbg->arch.bp[n].type = KVMPPC_DEBUG_BREAKPOINT;
1550 break;
1551 case GDB_WATCHPOINT_WRITE:
1552 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE;
1553 break;
1554 case GDB_WATCHPOINT_READ:
1555 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_READ;
1556 break;
1557 case GDB_WATCHPOINT_ACCESS:
1558 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE |
1559 KVMPPC_DEBUG_WATCH_READ;
1560 break;
1561 default:
1562 cpu_abort(cs, "Unsupported breakpoint type\n");
1563 }
1564 dbg->arch.bp[n].addr = hw_debug_points[n].addr;
1565 }
1566 }
1567}
1568
1569static int kvm_handle_hw_breakpoint(CPUState *cs,
1570 struct kvm_debug_exit_arch *arch_info)
1571{
1572 int handle = DEBUG_RETURN_GUEST;
1573 int n;
1574 int flag = 0;
1575
1576 if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
1577 if (arch_info->status & KVMPPC_DEBUG_BREAKPOINT) {
1578 n = find_hw_breakpoint(arch_info->address, GDB_BREAKPOINT_HW);
1579 if (n >= 0) {
1580 handle = DEBUG_RETURN_GDB;
1581 }
1582 } else if (arch_info->status & (KVMPPC_DEBUG_WATCH_READ |
1583 KVMPPC_DEBUG_WATCH_WRITE)) {
1584 n = find_hw_watchpoint(arch_info->address, &flag);
1585 if (n >= 0) {
1586 handle = DEBUG_RETURN_GDB;
1587 cs->watchpoint_hit = &hw_watchpoint;
1588 hw_watchpoint.vaddr = hw_debug_points[n].addr;
1589 hw_watchpoint.flags = flag;
1590 }
1591 }
1592 }
1593 return handle;
1594}
1595
1596static int kvm_handle_singlestep(void)
1597{
1598 return DEBUG_RETURN_GDB;
1599}
1600
1601static int kvm_handle_sw_breakpoint(void)
1602{
1603 return DEBUG_RETURN_GDB;
1604}
1605
1606static int kvm_handle_debug(PowerPCCPU *cpu, struct kvm_run *run)
1607{
1608 CPUState *cs = CPU(cpu);
1609 CPUPPCState *env = &cpu->env;
1610 struct kvm_debug_exit_arch *arch_info = &run->debug.arch;
1611
1612 if (cs->singlestep_enabled) {
1613 return kvm_handle_singlestep();
1614 }
1615
1616 if (arch_info->status) {
1617 return kvm_handle_hw_breakpoint(cs, arch_info);
1618 }
1619
1620 if (kvm_find_sw_breakpoint(cs, arch_info->address)) {
1621 return kvm_handle_sw_breakpoint();
1622 }
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645 cpu_synchronize_state(cs);
1646
1647
1648
1649
1650 env->nip += 4;
1651 cs->exception_index = POWERPC_EXCP_PROGRAM;
1652 env->error_code = POWERPC_EXCP_INVAL;
1653 ppc_cpu_do_interrupt(cs);
1654
1655 return DEBUG_RETURN_GUEST;
1656}
1657
1658int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
1659{
1660 PowerPCCPU *cpu = POWERPC_CPU(cs);
1661 CPUPPCState *env = &cpu->env;
1662 int ret;
1663
1664 qemu_mutex_lock_iothread();
1665
1666 switch (run->exit_reason) {
1667 case KVM_EXIT_DCR:
1668 if (run->dcr.is_write) {
1669 trace_kvm_handle_dcr_write();
1670 ret = kvmppc_handle_dcr_write(env, run->dcr.dcrn, run->dcr.data);
1671 } else {
1672 trace_kvm_handle_dcr_read();
1673 ret = kvmppc_handle_dcr_read(env, run->dcr.dcrn, &run->dcr.data);
1674 }
1675 break;
1676 case KVM_EXIT_HLT:
1677 trace_kvm_handle_halt();
1678 ret = kvmppc_handle_halt(cpu);
1679 break;
1680#if defined(TARGET_PPC64)
1681 case KVM_EXIT_PAPR_HCALL:
1682 trace_kvm_handle_papr_hcall(run->papr_hcall.nr);
1683 run->papr_hcall.ret = spapr_hypercall(cpu,
1684 run->papr_hcall.nr,
1685 run->papr_hcall.args);
1686 ret = 0;
1687 break;
1688#endif
1689 case KVM_EXIT_EPR:
1690 trace_kvm_handle_epr();
1691 run->epr.epr = ldl_phys(cs->as, env->mpic_iack);
1692 ret = 0;
1693 break;
1694 case KVM_EXIT_WATCHDOG:
1695 trace_kvm_handle_watchdog_expiry();
1696 watchdog_perform_action();
1697 ret = 0;
1698 break;
1699
1700 case KVM_EXIT_DEBUG:
1701 trace_kvm_handle_debug_exception();
1702 if (kvm_handle_debug(cpu, run)) {
1703 ret = EXCP_DEBUG;
1704 break;
1705 }
1706
1707 ret = 0;
1708 break;
1709
1710#if defined(TARGET_PPC64)
1711 case KVM_EXIT_NMI:
1712 trace_kvm_handle_nmi_exception();
1713 ret = kvm_handle_nmi(cpu, run);
1714 break;
1715#endif
1716
1717 default:
1718 fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
1719 ret = -1;
1720 break;
1721 }
1722
1723 qemu_mutex_unlock_iothread();
1724 return ret;
1725}
1726
1727int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
1728{
1729 CPUState *cs = CPU(cpu);
1730 uint32_t bits = tsr_bits;
1731 struct kvm_one_reg reg = {
1732 .id = KVM_REG_PPC_OR_TSR,
1733 .addr = (uintptr_t) &bits,
1734 };
1735
1736 if (!kvm_enabled()) {
1737 return 0;
1738 }
1739
1740 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1741}
1742
1743int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
1744{
1745
1746 CPUState *cs = CPU(cpu);
1747 uint32_t bits = tsr_bits;
1748 struct kvm_one_reg reg = {
1749 .id = KVM_REG_PPC_CLEAR_TSR,
1750 .addr = (uintptr_t) &bits,
1751 };
1752
1753 if (!kvm_enabled()) {
1754 return 0;
1755 }
1756
1757 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1758}
1759
1760int kvmppc_set_tcr(PowerPCCPU *cpu)
1761{
1762 CPUState *cs = CPU(cpu);
1763 CPUPPCState *env = &cpu->env;
1764 uint32_t tcr = env->spr[SPR_BOOKE_TCR];
1765
1766 struct kvm_one_reg reg = {
1767 .id = KVM_REG_PPC_TCR,
1768 .addr = (uintptr_t) &tcr,
1769 };
1770
1771 if (!kvm_enabled()) {
1772 return 0;
1773 }
1774
1775 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
1776}
1777
1778int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu)
1779{
1780 CPUState *cs = CPU(cpu);
1781 int ret;
1782
1783 if (!kvm_enabled()) {
1784 return -1;
1785 }
1786
1787 if (!cap_ppc_watchdog) {
1788 printf("warning: KVM does not support watchdog");
1789 return -1;
1790 }
1791
1792 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_BOOKE_WATCHDOG, 0);
1793 if (ret < 0) {
1794 fprintf(stderr, "%s: couldn't enable KVM_CAP_PPC_BOOKE_WATCHDOG: %s\n",
1795 __func__, strerror(-ret));
1796 return ret;
1797 }
1798
1799 return ret;
1800}
1801
1802static int read_cpuinfo(const char *field, char *value, int len)
1803{
1804 FILE *f;
1805 int ret = -1;
1806 int field_len = strlen(field);
1807 char line[512];
1808
1809 f = fopen("/proc/cpuinfo", "r");
1810 if (!f) {
1811 return -1;
1812 }
1813
1814 do {
1815 if (!fgets(line, sizeof(line), f)) {
1816 break;
1817 }
1818 if (!strncmp(line, field, field_len)) {
1819 pstrcpy(value, len, line);
1820 ret = 0;
1821 break;
1822 }
1823 } while (*line);
1824
1825 fclose(f);
1826
1827 return ret;
1828}
1829
1830static uint32_t kvmppc_get_tbfreq_procfs(void)
1831{
1832 char line[512];
1833 char *ns;
1834 uint32_t tbfreq_fallback = NANOSECONDS_PER_SECOND;
1835 uint32_t tbfreq_procfs;
1836
1837 if (read_cpuinfo("timebase", line, sizeof(line))) {
1838 return tbfreq_fallback;
1839 }
1840
1841 ns = strchr(line, ':');
1842 if (!ns) {
1843 return tbfreq_fallback;
1844 }
1845
1846 tbfreq_procfs = atoi(++ns);
1847
1848
1849 return tbfreq_procfs ? tbfreq_procfs : tbfreq_fallback;
1850}
1851
1852uint32_t kvmppc_get_tbfreq(void)
1853{
1854 static uint32_t cached_tbfreq;
1855
1856 if (!cached_tbfreq) {
1857 cached_tbfreq = kvmppc_get_tbfreq_procfs();
1858 }
1859
1860 return cached_tbfreq;
1861}
1862
1863bool kvmppc_get_host_serial(char **value)
1864{
1865 return g_file_get_contents("/proc/device-tree/system-id", value, NULL,
1866 NULL);
1867}
1868
1869bool kvmppc_get_host_model(char **value)
1870{
1871 return g_file_get_contents("/proc/device-tree/model", value, NULL, NULL);
1872}
1873
1874
1875static int kvmppc_find_cpu_dt(char *buf, int buf_len)
1876{
1877 struct dirent *dirp;
1878 DIR *dp;
1879
1880 dp = opendir(PROC_DEVTREE_CPU);
1881 if (!dp) {
1882 printf("Can't open directory " PROC_DEVTREE_CPU "\n");
1883 return -1;
1884 }
1885
1886 buf[0] = '\0';
1887 while ((dirp = readdir(dp)) != NULL) {
1888 FILE *f;
1889
1890
1891 if (strcmp(dirp->d_name, ".") == 0 || strcmp(dirp->d_name, "..") == 0) {
1892 continue;
1893 }
1894
1895 snprintf(buf, buf_len, "%s%s/clock-frequency", PROC_DEVTREE_CPU,
1896 dirp->d_name);
1897 f = fopen(buf, "r");
1898 if (f) {
1899 snprintf(buf, buf_len, "%s%s", PROC_DEVTREE_CPU, dirp->d_name);
1900 fclose(f);
1901 break;
1902 }
1903 buf[0] = '\0';
1904 }
1905 closedir(dp);
1906 if (buf[0] == '\0') {
1907 printf("Unknown host!\n");
1908 return -1;
1909 }
1910
1911 return 0;
1912}
1913
1914static uint64_t kvmppc_read_int_dt(const char *filename)
1915{
1916 union {
1917 uint32_t v32;
1918 uint64_t v64;
1919 } u;
1920 FILE *f;
1921 int len;
1922
1923 f = fopen(filename, "rb");
1924 if (!f) {
1925 return -1;
1926 }
1927
1928 len = fread(&u, 1, sizeof(u), f);
1929 fclose(f);
1930 switch (len) {
1931 case 4:
1932
1933 return be32_to_cpu(u.v32);
1934 case 8:
1935 return be64_to_cpu(u.v64);
1936 }
1937
1938 return 0;
1939}
1940
1941
1942
1943
1944
1945
1946static uint64_t kvmppc_read_int_cpu_dt(const char *propname)
1947{
1948 char buf[PATH_MAX], *tmp;
1949 uint64_t val;
1950
1951 if (kvmppc_find_cpu_dt(buf, sizeof(buf))) {
1952 return -1;
1953 }
1954
1955 tmp = g_strdup_printf("%s/%s", buf, propname);
1956 val = kvmppc_read_int_dt(tmp);
1957 g_free(tmp);
1958
1959 return val;
1960}
1961
1962uint64_t kvmppc_get_clockfreq(void)
1963{
1964 return kvmppc_read_int_cpu_dt("clock-frequency");
1965}
1966
1967static int kvmppc_get_dec_bits(void)
1968{
1969 int nr_bits = kvmppc_read_int_cpu_dt("ibm,dec-bits");
1970
1971 if (nr_bits > 0) {
1972 return nr_bits;
1973 }
1974 return 0;
1975}
1976
1977static int kvmppc_get_pvinfo(CPUPPCState *env, struct kvm_ppc_pvinfo *pvinfo)
1978{
1979 CPUState *cs = env_cpu(env);
1980
1981 if (kvm_vm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO) &&
1982 !kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_PVINFO, pvinfo)) {
1983 return 0;
1984 }
1985
1986 return 1;
1987}
1988
1989int kvmppc_get_hasidle(CPUPPCState *env)
1990{
1991 struct kvm_ppc_pvinfo pvinfo;
1992
1993 if (!kvmppc_get_pvinfo(env, &pvinfo) &&
1994 (pvinfo.flags & KVM_PPC_PVINFO_FLAGS_EV_IDLE)) {
1995 return 1;
1996 }
1997
1998 return 0;
1999}
2000
2001int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len)
2002{
2003 uint32_t *hc = (uint32_t *)buf;
2004 struct kvm_ppc_pvinfo pvinfo;
2005
2006 if (!kvmppc_get_pvinfo(env, &pvinfo)) {
2007 memcpy(buf, pvinfo.hcall, buf_len);
2008 return 0;
2009 }
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020 hc[0] = cpu_to_be32(0x08000048);
2021 hc[1] = cpu_to_be32(0x3860ffff);
2022 hc[2] = cpu_to_be32(0x48000008);
2023 hc[3] = cpu_to_be32(bswap32(0x3860ffff));
2024
2025 return 1;
2026}
2027
2028static inline int kvmppc_enable_hcall(KVMState *s, target_ulong hcall)
2029{
2030 return kvm_vm_enable_cap(s, KVM_CAP_PPC_ENABLE_HCALL, 0, hcall, 1);
2031}
2032
2033void kvmppc_enable_logical_ci_hcalls(void)
2034{
2035
2036
2037
2038
2039
2040
2041 kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_LOAD);
2042 kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_STORE);
2043}
2044
2045void kvmppc_enable_set_mode_hcall(void)
2046{
2047 kvmppc_enable_hcall(kvm_state, H_SET_MODE);
2048}
2049
2050void kvmppc_enable_clear_ref_mod_hcalls(void)
2051{
2052 kvmppc_enable_hcall(kvm_state, H_CLEAR_REF);
2053 kvmppc_enable_hcall(kvm_state, H_CLEAR_MOD);
2054}
2055
2056void kvmppc_enable_h_page_init(void)
2057{
2058 kvmppc_enable_hcall(kvm_state, H_PAGE_INIT);
2059}
2060
2061void kvmppc_enable_h_rpt_invalidate(void)
2062{
2063 kvmppc_enable_hcall(kvm_state, H_RPT_INVALIDATE);
2064}
2065
2066void kvmppc_set_papr(PowerPCCPU *cpu)
2067{
2068 CPUState *cs = CPU(cpu);
2069 int ret;
2070
2071 if (!kvm_enabled()) {
2072 return;
2073 }
2074
2075 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_PAPR, 0);
2076 if (ret) {
2077 error_report("This vCPU type or KVM version does not support PAPR");
2078 exit(1);
2079 }
2080
2081
2082
2083
2084
2085 cap_papr = 1;
2086}
2087
2088int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr)
2089{
2090 return kvm_set_one_reg(CPU(cpu), KVM_REG_PPC_ARCH_COMPAT, &compat_pvr);
2091}
2092
2093void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy)
2094{
2095 CPUState *cs = CPU(cpu);
2096 int ret;
2097
2098 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_EPR, 0, mpic_proxy);
2099 if (ret && mpic_proxy) {
2100 error_report("This KVM version does not support EPR");
2101 exit(1);
2102 }
2103}
2104
2105bool kvmppc_get_fwnmi(void)
2106{
2107 return cap_fwnmi;
2108}
2109
2110int kvmppc_set_fwnmi(PowerPCCPU *cpu)
2111{
2112 CPUState *cs = CPU(cpu);
2113
2114 return kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_FWNMI, 0);
2115}
2116
2117int kvmppc_smt_threads(void)
2118{
2119 return cap_ppc_smt ? cap_ppc_smt : 1;
2120}
2121
2122int kvmppc_set_smt_threads(int smt)
2123{
2124 int ret;
2125
2126 ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_SMT, 0, smt, 0);
2127 if (!ret) {
2128 cap_ppc_smt = smt;
2129 }
2130 return ret;
2131}
2132
2133void kvmppc_error_append_smt_possible_hint(Error *const *errp)
2134{
2135 int i;
2136 GString *g;
2137 char *s;
2138
2139 assert(kvm_enabled());
2140 if (cap_ppc_smt_possible) {
2141 g = g_string_new("Available VSMT modes:");
2142 for (i = 63; i >= 0; i--) {
2143 if ((1UL << i) & cap_ppc_smt_possible) {
2144 g_string_append_printf(g, " %lu", (1UL << i));
2145 }
2146 }
2147 s = g_string_free(g, false);
2148 error_append_hint(errp, "%s.\n", s);
2149 g_free(s);
2150 } else {
2151 error_append_hint(errp,
2152 "This KVM seems to be too old to support VSMT.\n");
2153 }
2154}
2155
2156
2157#ifdef TARGET_PPC64
2158uint64_t kvmppc_vrma_limit(unsigned int hash_shift)
2159{
2160 struct kvm_ppc_smmu_info info;
2161 long rampagesize, best_page_shift;
2162 int i;
2163
2164
2165
2166
2167
2168 kvm_get_smmu_info(&info, &error_fatal);
2169 rampagesize = qemu_minrampagesize();
2170 best_page_shift = 0;
2171
2172 for (i = 0; i < KVM_PPC_PAGE_SIZES_MAX_SZ; i++) {
2173 struct kvm_ppc_one_seg_page_size *sps = &info.sps[i];
2174
2175 if (!sps->page_shift) {
2176 continue;
2177 }
2178
2179 if ((sps->page_shift > best_page_shift)
2180 && ((1UL << sps->page_shift) <= rampagesize)) {
2181 best_page_shift = sps->page_shift;
2182 }
2183 }
2184
2185 return 1ULL << (best_page_shift + hash_shift - 7);
2186}
2187#endif
2188
2189bool kvmppc_spapr_use_multitce(void)
2190{
2191 return cap_spapr_multitce;
2192}
2193
2194int kvmppc_spapr_enable_inkernel_multitce(void)
2195{
2196 int ret;
2197
2198 ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_ENABLE_HCALL, 0,
2199 H_PUT_TCE_INDIRECT, 1);
2200 if (!ret) {
2201 ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_ENABLE_HCALL, 0,
2202 H_STUFF_TCE, 1);
2203 }
2204
2205 return ret;
2206}
2207
2208void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t page_shift,
2209 uint64_t bus_offset, uint32_t nb_table,
2210 int *pfd, bool need_vfio)
2211{
2212 long len;
2213 int fd;
2214 void *table;
2215
2216
2217
2218
2219
2220 *pfd = -1;
2221 if (!cap_spapr_tce || (need_vfio && !cap_spapr_vfio)) {
2222 return NULL;
2223 }
2224
2225 if (cap_spapr_tce_64) {
2226 struct kvm_create_spapr_tce_64 args = {
2227 .liobn = liobn,
2228 .page_shift = page_shift,
2229 .offset = bus_offset >> page_shift,
2230 .size = nb_table,
2231 .flags = 0
2232 };
2233 fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE_64, &args);
2234 if (fd < 0) {
2235 fprintf(stderr,
2236 "KVM: Failed to create TCE64 table for liobn 0x%x\n",
2237 liobn);
2238 return NULL;
2239 }
2240 } else if (cap_spapr_tce) {
2241 uint64_t window_size = (uint64_t) nb_table << page_shift;
2242 struct kvm_create_spapr_tce args = {
2243 .liobn = liobn,
2244 .window_size = window_size,
2245 };
2246 if ((window_size != args.window_size) || bus_offset) {
2247 return NULL;
2248 }
2249 fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE, &args);
2250 if (fd < 0) {
2251 fprintf(stderr, "KVM: Failed to create TCE table for liobn 0x%x\n",
2252 liobn);
2253 return NULL;
2254 }
2255 } else {
2256 return NULL;
2257 }
2258
2259 len = nb_table * sizeof(uint64_t);
2260
2261
2262 table = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
2263 if (table == MAP_FAILED) {
2264 fprintf(stderr, "KVM: Failed to map TCE table for liobn 0x%x\n",
2265 liobn);
2266 close(fd);
2267 return NULL;
2268 }
2269
2270 *pfd = fd;
2271 return table;
2272}
2273
2274int kvmppc_remove_spapr_tce(void *table, int fd, uint32_t nb_table)
2275{
2276 long len;
2277
2278 if (fd < 0) {
2279 return -1;
2280 }
2281
2282 len = nb_table * sizeof(uint64_t);
2283 if ((munmap(table, len) < 0) ||
2284 (close(fd) < 0)) {
2285 fprintf(stderr, "KVM: Unexpected error removing TCE table: %s",
2286 strerror(errno));
2287
2288 }
2289
2290 return 0;
2291}
2292
2293int kvmppc_reset_htab(int shift_hint)
2294{
2295 uint32_t shift = shift_hint;
2296
2297 if (!kvm_enabled()) {
2298
2299 return 0;
2300 }
2301 if (kvm_vm_check_extension(kvm_state, KVM_CAP_PPC_ALLOC_HTAB)) {
2302 int ret;
2303 ret = kvm_vm_ioctl(kvm_state, KVM_PPC_ALLOCATE_HTAB, &shift);
2304 if (ret == -ENOTTY) {
2305
2306
2307
2308
2309
2310
2311 return 0;
2312 } else if (ret < 0) {
2313 return ret;
2314 }
2315 return shift;
2316 }
2317
2318
2319
2320
2321
2322
2323
2324 if (kvmppc_is_pr(kvm_state)) {
2325
2326 return 0;
2327 } else {
2328
2329 return 24;
2330 }
2331}
2332
2333static inline uint32_t mfpvr(void)
2334{
2335 uint32_t pvr;
2336
2337 asm ("mfpvr %0"
2338 : "=r"(pvr));
2339 return pvr;
2340}
2341
2342static void alter_insns(uint64_t *word, uint64_t flags, bool on)
2343{
2344 if (on) {
2345 *word |= flags;
2346 } else {
2347 *word &= ~flags;
2348 }
2349}
2350
2351static void kvmppc_host_cpu_class_init(ObjectClass *oc, void *data)
2352{
2353 PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
2354 uint32_t dcache_size = kvmppc_read_int_cpu_dt("d-cache-size");
2355 uint32_t icache_size = kvmppc_read_int_cpu_dt("i-cache-size");
2356
2357
2358 pcc->pvr = mfpvr();
2359
2360 alter_insns(&pcc->insns_flags, PPC_ALTIVEC,
2361 qemu_getauxval(AT_HWCAP) & PPC_FEATURE_HAS_ALTIVEC);
2362 alter_insns(&pcc->insns_flags2, PPC2_VSX,
2363 qemu_getauxval(AT_HWCAP) & PPC_FEATURE_HAS_VSX);
2364 alter_insns(&pcc->insns_flags2, PPC2_DFP,
2365 qemu_getauxval(AT_HWCAP) & PPC_FEATURE_HAS_DFP);
2366
2367 if (dcache_size != -1) {
2368 pcc->l1_dcache_size = dcache_size;
2369 }
2370
2371 if (icache_size != -1) {
2372 pcc->l1_icache_size = icache_size;
2373 }
2374
2375#if defined(TARGET_PPC64)
2376 pcc->radix_page_info = kvm_get_radix_page_info();
2377
2378 if ((pcc->pvr & 0xffffff00) == CPU_POWERPC_POWER9_DD1) {
2379
2380
2381
2382
2383
2384
2385 pcc->pcr_supported &= ~(PCR_COMPAT_3_00 | PCR_COMPAT_2_07
2386 | PCR_COMPAT_2_06 | PCR_COMPAT_2_05);
2387 }
2388#endif
2389}
2390
2391bool kvmppc_has_cap_epr(void)
2392{
2393 return cap_epr;
2394}
2395
2396bool kvmppc_has_cap_fixup_hcalls(void)
2397{
2398 return cap_fixup_hcalls;
2399}
2400
2401bool kvmppc_has_cap_htm(void)
2402{
2403 return cap_htm;
2404}
2405
2406bool kvmppc_has_cap_mmu_radix(void)
2407{
2408 return cap_mmu_radix;
2409}
2410
2411bool kvmppc_has_cap_mmu_hash_v3(void)
2412{
2413 return cap_mmu_hash_v3;
2414}
2415
2416static bool kvmppc_power8_host(void)
2417{
2418 bool ret = false;
2419#ifdef TARGET_PPC64
2420 {
2421 uint32_t base_pvr = CPU_POWERPC_POWER_SERVER_MASK & mfpvr();
2422 ret = (base_pvr == CPU_POWERPC_POWER8E_BASE) ||
2423 (base_pvr == CPU_POWERPC_POWER8NVL_BASE) ||
2424 (base_pvr == CPU_POWERPC_POWER8_BASE);
2425 }
2426#endif
2427 return ret;
2428}
2429
2430static int parse_cap_ppc_safe_cache(struct kvm_ppc_cpu_char c)
2431{
2432 bool l1d_thread_priv_req = !kvmppc_power8_host();
2433
2434 if (~c.behaviour & c.behaviour_mask & H_CPU_BEHAV_L1D_FLUSH_PR) {
2435 return 2;
2436 } else if ((!l1d_thread_priv_req ||
2437 c.character & c.character_mask & H_CPU_CHAR_L1D_THREAD_PRIV) &&
2438 (c.character & c.character_mask
2439 & (H_CPU_CHAR_L1D_FLUSH_ORI30 | H_CPU_CHAR_L1D_FLUSH_TRIG2))) {
2440 return 1;
2441 }
2442
2443 return 0;
2444}
2445
2446static int parse_cap_ppc_safe_bounds_check(struct kvm_ppc_cpu_char c)
2447{
2448 if (~c.behaviour & c.behaviour_mask & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR) {
2449 return 2;
2450 } else if (c.character & c.character_mask & H_CPU_CHAR_SPEC_BAR_ORI31) {
2451 return 1;
2452 }
2453
2454 return 0;
2455}
2456
2457static int parse_cap_ppc_safe_indirect_branch(struct kvm_ppc_cpu_char c)
2458{
2459 if ((~c.behaviour & c.behaviour_mask & H_CPU_BEHAV_FLUSH_COUNT_CACHE) &&
2460 (~c.character & c.character_mask & H_CPU_CHAR_CACHE_COUNT_DIS) &&
2461 (~c.character & c.character_mask & H_CPU_CHAR_BCCTRL_SERIALISED)) {
2462 return SPAPR_CAP_FIXED_NA;
2463 } else if (c.behaviour & c.behaviour_mask & H_CPU_BEHAV_FLUSH_COUNT_CACHE) {
2464 return SPAPR_CAP_WORKAROUND;
2465 } else if (c.character & c.character_mask & H_CPU_CHAR_CACHE_COUNT_DIS) {
2466 return SPAPR_CAP_FIXED_CCD;
2467 } else if (c.character & c.character_mask & H_CPU_CHAR_BCCTRL_SERIALISED) {
2468 return SPAPR_CAP_FIXED_IBS;
2469 }
2470
2471 return 0;
2472}
2473
2474static int parse_cap_ppc_count_cache_flush_assist(struct kvm_ppc_cpu_char c)
2475{
2476 if (c.character & c.character_mask & H_CPU_CHAR_BCCTR_FLUSH_ASSIST) {
2477 return 1;
2478 }
2479 return 0;
2480}
2481
2482bool kvmppc_has_cap_xive(void)
2483{
2484 return cap_xive;
2485}
2486
2487static void kvmppc_get_cpu_characteristics(KVMState *s)
2488{
2489 struct kvm_ppc_cpu_char c;
2490 int ret;
2491
2492
2493 cap_ppc_safe_cache = 0;
2494 cap_ppc_safe_bounds_check = 0;
2495 cap_ppc_safe_indirect_branch = 0;
2496
2497 ret = kvm_vm_check_extension(s, KVM_CAP_PPC_GET_CPU_CHAR);
2498 if (!ret) {
2499 return;
2500 }
2501 ret = kvm_vm_ioctl(s, KVM_PPC_GET_CPU_CHAR, &c);
2502 if (ret < 0) {
2503 return;
2504 }
2505
2506 cap_ppc_safe_cache = parse_cap_ppc_safe_cache(c);
2507 cap_ppc_safe_bounds_check = parse_cap_ppc_safe_bounds_check(c);
2508 cap_ppc_safe_indirect_branch = parse_cap_ppc_safe_indirect_branch(c);
2509 cap_ppc_count_cache_flush_assist =
2510 parse_cap_ppc_count_cache_flush_assist(c);
2511}
2512
2513int kvmppc_get_cap_safe_cache(void)
2514{
2515 return cap_ppc_safe_cache;
2516}
2517
2518int kvmppc_get_cap_safe_bounds_check(void)
2519{
2520 return cap_ppc_safe_bounds_check;
2521}
2522
2523int kvmppc_get_cap_safe_indirect_branch(void)
2524{
2525 return cap_ppc_safe_indirect_branch;
2526}
2527
2528int kvmppc_get_cap_count_cache_flush_assist(void)
2529{
2530 return cap_ppc_count_cache_flush_assist;
2531}
2532
2533bool kvmppc_has_cap_nested_kvm_hv(void)
2534{
2535 return !!cap_ppc_nested_kvm_hv;
2536}
2537
2538int kvmppc_set_cap_nested_kvm_hv(int enable)
2539{
2540 return kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_NESTED_HV, 0, enable);
2541}
2542
2543bool kvmppc_has_cap_spapr_vfio(void)
2544{
2545 return cap_spapr_vfio;
2546}
2547
2548int kvmppc_get_cap_large_decr(void)
2549{
2550 return cap_large_decr;
2551}
2552
2553int kvmppc_enable_cap_large_decr(PowerPCCPU *cpu, int enable)
2554{
2555 CPUState *cs = CPU(cpu);
2556 uint64_t lpcr = 0;
2557
2558 kvm_get_one_reg(cs, KVM_REG_PPC_LPCR_64, &lpcr);
2559
2560 if (!!(lpcr & LPCR_LD) != !!enable) {
2561 if (enable) {
2562 lpcr |= LPCR_LD;
2563 } else {
2564 lpcr &= ~LPCR_LD;
2565 }
2566 kvm_set_one_reg(cs, KVM_REG_PPC_LPCR_64, &lpcr);
2567 kvm_get_one_reg(cs, KVM_REG_PPC_LPCR_64, &lpcr);
2568
2569 if (!!(lpcr & LPCR_LD) != !!enable) {
2570 return -1;
2571 }
2572 }
2573
2574 return 0;
2575}
2576
2577int kvmppc_has_cap_rpt_invalidate(void)
2578{
2579 return cap_rpt_invalidate;
2580}
2581
2582bool kvmppc_supports_ail_3(void)
2583{
2584 return cap_ail_mode_3;
2585}
2586
2587PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void)
2588{
2589 uint32_t host_pvr = mfpvr();
2590 PowerPCCPUClass *pvr_pcc;
2591
2592 pvr_pcc = ppc_cpu_class_by_pvr(host_pvr);
2593 if (pvr_pcc == NULL) {
2594 pvr_pcc = ppc_cpu_class_by_pvr_mask(host_pvr);
2595 }
2596
2597 return pvr_pcc;
2598}
2599
2600static void pseries_machine_class_fixup(ObjectClass *oc, void *opaque)
2601{
2602 MachineClass *mc = MACHINE_CLASS(oc);
2603
2604 mc->default_cpu_type = TYPE_HOST_POWERPC_CPU;
2605}
2606
2607static int kvm_ppc_register_host_cpu_type(void)
2608{
2609 TypeInfo type_info = {
2610 .name = TYPE_HOST_POWERPC_CPU,
2611 .class_init = kvmppc_host_cpu_class_init,
2612 };
2613 PowerPCCPUClass *pvr_pcc;
2614 ObjectClass *oc;
2615 DeviceClass *dc;
2616 int i;
2617
2618 pvr_pcc = kvm_ppc_get_host_cpu_class();
2619 if (pvr_pcc == NULL) {
2620 return -1;
2621 }
2622 type_info.parent = object_class_get_name(OBJECT_CLASS(pvr_pcc));
2623 type_register(&type_info);
2624
2625 object_class_foreach(pseries_machine_class_fixup, TYPE_SPAPR_MACHINE,
2626 false, NULL);
2627
2628 oc = object_class_by_name(type_info.name);
2629 g_assert(oc);
2630
2631
2632
2633
2634
2635
2636 dc = DEVICE_CLASS(ppc_cpu_get_family_class(pvr_pcc));
2637 for (i = 0; ppc_cpu_aliases[i].alias != NULL; i++) {
2638 if (strcasecmp(ppc_cpu_aliases[i].alias, dc->desc) == 0) {
2639 char *suffix;
2640
2641 ppc_cpu_aliases[i].model = g_strdup(object_class_get_name(oc));
2642 suffix = strstr(ppc_cpu_aliases[i].model, POWERPC_CPU_TYPE_SUFFIX);
2643 if (suffix) {
2644 *suffix = 0;
2645 }
2646 break;
2647 }
2648 }
2649
2650 return 0;
2651}
2652
2653int kvmppc_define_rtas_kernel_token(uint32_t token, const char *function)
2654{
2655 struct kvm_rtas_token_args args = {
2656 .token = token,
2657 };
2658
2659 if (!kvm_check_extension(kvm_state, KVM_CAP_PPC_RTAS)) {
2660 return -ENOENT;
2661 }
2662
2663 strncpy(args.name, function, sizeof(args.name) - 1);
2664
2665 return kvm_vm_ioctl(kvm_state, KVM_PPC_RTAS_DEFINE_TOKEN, &args);
2666}
2667
2668int kvmppc_get_htab_fd(bool write, uint64_t index, Error **errp)
2669{
2670 struct kvm_get_htab_fd s = {
2671 .flags = write ? KVM_GET_HTAB_WRITE : 0,
2672 .start_index = index,
2673 };
2674 int ret;
2675
2676 if (!cap_htab_fd) {
2677 error_setg(errp, "KVM version doesn't support %s the HPT",
2678 write ? "writing" : "reading");
2679 return -ENOTSUP;
2680 }
2681
2682 ret = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &s);
2683 if (ret < 0) {
2684 error_setg(errp, "Unable to open fd for %s HPT %s KVM: %s",
2685 write ? "writing" : "reading", write ? "to" : "from",
2686 strerror(errno));
2687 return -errno;
2688 }
2689
2690 return ret;
2691}
2692
2693int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns)
2694{
2695 int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
2696 uint8_t buf[bufsize];
2697 ssize_t rc;
2698
2699 do {
2700 rc = read(fd, buf, bufsize);
2701 if (rc < 0) {
2702 fprintf(stderr, "Error reading data from KVM HTAB fd: %s\n",
2703 strerror(errno));
2704 return rc;
2705 } else if (rc) {
2706 uint8_t *buffer = buf;
2707 ssize_t n = rc;
2708 while (n) {
2709 struct kvm_get_htab_header *head =
2710 (struct kvm_get_htab_header *) buffer;
2711 size_t chunksize = sizeof(*head) +
2712 HASH_PTE_SIZE_64 * head->n_valid;
2713
2714 qemu_put_be32(f, head->index);
2715 qemu_put_be16(f, head->n_valid);
2716 qemu_put_be16(f, head->n_invalid);
2717 qemu_put_buffer(f, (void *)(head + 1),
2718 HASH_PTE_SIZE_64 * head->n_valid);
2719
2720 buffer += chunksize;
2721 n -= chunksize;
2722 }
2723 }
2724 } while ((rc != 0)
2725 && ((max_ns < 0) ||
2726 ((qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) < max_ns)));
2727
2728 return (rc == 0) ? 1 : 0;
2729}
2730
2731int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
2732 uint16_t n_valid, uint16_t n_invalid, Error **errp)
2733{
2734 struct kvm_get_htab_header *buf;
2735 size_t chunksize = sizeof(*buf) + n_valid * HASH_PTE_SIZE_64;
2736 ssize_t rc;
2737
2738 buf = alloca(chunksize);
2739 buf->index = index;
2740 buf->n_valid = n_valid;
2741 buf->n_invalid = n_invalid;
2742
2743 qemu_get_buffer(f, (void *)(buf + 1), HASH_PTE_SIZE_64 * n_valid);
2744
2745 rc = write(fd, buf, chunksize);
2746 if (rc < 0) {
2747 error_setg_errno(errp, errno, "Error writing the KVM hash table");
2748 return -errno;
2749 }
2750 if (rc != chunksize) {
2751
2752 error_setg(errp, "Short write while restoring the KVM hash table");
2753 return -ENOSPC;
2754 }
2755 return 0;
2756}
2757
2758bool kvm_arch_stop_on_emulation_error(CPUState *cpu)
2759{
2760 return true;
2761}
2762
2763void kvm_arch_init_irq_routing(KVMState *s)
2764{
2765}
2766
2767void kvmppc_read_hptes(ppc_hash_pte64_t *hptes, hwaddr ptex, int n)
2768{
2769 int fd, rc;
2770 int i;
2771
2772 fd = kvmppc_get_htab_fd(false, ptex, &error_abort);
2773
2774 i = 0;
2775 while (i < n) {
2776 struct kvm_get_htab_header *hdr;
2777 int m = n < HPTES_PER_GROUP ? n : HPTES_PER_GROUP;
2778 char buf[sizeof(*hdr) + m * HASH_PTE_SIZE_64];
2779
2780 rc = read(fd, buf, sizeof(buf));
2781 if (rc < 0) {
2782 hw_error("kvmppc_read_hptes: Unable to read HPTEs");
2783 }
2784
2785 hdr = (struct kvm_get_htab_header *)buf;
2786 while ((i < n) && ((char *)hdr < (buf + rc))) {
2787 int invalid = hdr->n_invalid, valid = hdr->n_valid;
2788
2789 if (hdr->index != (ptex + i)) {
2790 hw_error("kvmppc_read_hptes: Unexpected HPTE index %"PRIu32
2791 " != (%"HWADDR_PRIu" + %d", hdr->index, ptex, i);
2792 }
2793
2794 if (n - i < valid) {
2795 valid = n - i;
2796 }
2797 memcpy(hptes + i, hdr + 1, HASH_PTE_SIZE_64 * valid);
2798 i += valid;
2799
2800 if ((n - i) < invalid) {
2801 invalid = n - i;
2802 }
2803 memset(hptes + i, 0, invalid * HASH_PTE_SIZE_64);
2804 i += invalid;
2805
2806 hdr = (struct kvm_get_htab_header *)
2807 ((char *)(hdr + 1) + HASH_PTE_SIZE_64 * hdr->n_valid);
2808 }
2809 }
2810
2811 close(fd);
2812}
2813
2814void kvmppc_write_hpte(hwaddr ptex, uint64_t pte0, uint64_t pte1)
2815{
2816 int fd, rc;
2817 struct {
2818 struct kvm_get_htab_header hdr;
2819 uint64_t pte0;
2820 uint64_t pte1;
2821 } buf;
2822
2823 fd = kvmppc_get_htab_fd(true, 0 , &error_abort);
2824
2825 buf.hdr.n_valid = 1;
2826 buf.hdr.n_invalid = 0;
2827 buf.hdr.index = ptex;
2828 buf.pte0 = cpu_to_be64(pte0);
2829 buf.pte1 = cpu_to_be64(pte1);
2830
2831 rc = write(fd, &buf, sizeof(buf));
2832 if (rc != sizeof(buf)) {
2833 hw_error("kvmppc_write_hpte: Unable to update KVM HPT");
2834 }
2835 close(fd);
2836}
2837
2838int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
2839 uint64_t address, uint32_t data, PCIDevice *dev)
2840{
2841 return 0;
2842}
2843
2844int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
2845 int vector, PCIDevice *dev)
2846{
2847 return 0;
2848}
2849
2850int kvm_arch_release_virq_post(int virq)
2851{
2852 return 0;
2853}
2854
2855int kvm_arch_msi_data_to_gsi(uint32_t data)
2856{
2857 return data & 0xffff;
2858}
2859
2860#if defined(TARGET_PPC64)
2861int kvm_handle_nmi(PowerPCCPU *cpu, struct kvm_run *run)
2862{
2863 uint16_t flags = run->flags & KVM_RUN_PPC_NMI_DISP_MASK;
2864
2865 cpu_synchronize_state(CPU(cpu));
2866
2867 spapr_mce_req_event(cpu, flags == KVM_RUN_PPC_NMI_DISP_FULLY_RECOV);
2868
2869 return 0;
2870}
2871#endif
2872
2873int kvmppc_enable_hwrng(void)
2874{
2875 if (!kvm_enabled() || !kvm_check_extension(kvm_state, KVM_CAP_PPC_HWRNG)) {
2876 return -1;
2877 }
2878
2879 return kvmppc_enable_hcall(kvm_state, H_RANDOM);
2880}
2881
2882void kvmppc_check_papr_resize_hpt(Error **errp)
2883{
2884 if (!kvm_enabled()) {
2885 return;
2886 }
2887
2888 if (cap_resize_hpt) {
2889 return;
2890 }
2891
2892
2893 if (kvmppc_is_pr(kvm_state)) {
2894 return;
2895 }
2896
2897 error_setg(errp,
2898 "Hash page table resizing not available with this KVM version");
2899}
2900
2901int kvmppc_resize_hpt_prepare(PowerPCCPU *cpu, target_ulong flags, int shift)
2902{
2903 CPUState *cs = CPU(cpu);
2904 struct kvm_ppc_resize_hpt rhpt = {
2905 .flags = flags,
2906 .shift = shift,
2907 };
2908
2909 if (!cap_resize_hpt) {
2910 return -ENOSYS;
2911 }
2912
2913 return kvm_vm_ioctl(cs->kvm_state, KVM_PPC_RESIZE_HPT_PREPARE, &rhpt);
2914}
2915
2916int kvmppc_resize_hpt_commit(PowerPCCPU *cpu, target_ulong flags, int shift)
2917{
2918 CPUState *cs = CPU(cpu);
2919 struct kvm_ppc_resize_hpt rhpt = {
2920 .flags = flags,
2921 .shift = shift,
2922 };
2923
2924 if (!cap_resize_hpt) {
2925 return -ENOSYS;
2926 }
2927
2928 return kvm_vm_ioctl(cs->kvm_state, KVM_PPC_RESIZE_HPT_COMMIT, &rhpt);
2929}
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946bool kvmppc_pvr_workaround_required(PowerPCCPU *cpu)
2947{
2948 CPUState *cs = CPU(cpu);
2949
2950 if (!kvm_enabled()) {
2951 return false;
2952 }
2953
2954 if (cap_ppc_pvr_compat) {
2955 return false;
2956 }
2957
2958 return !kvmppc_is_pr(cs->kvm_state);
2959}
2960
2961void kvmppc_set_reg_ppc_online(PowerPCCPU *cpu, unsigned int online)
2962{
2963 CPUState *cs = CPU(cpu);
2964
2965 if (kvm_enabled()) {
2966 kvm_set_one_reg(cs, KVM_REG_PPC_ONLINE, &online);
2967 }
2968}
2969
2970void kvmppc_set_reg_tb_offset(PowerPCCPU *cpu, int64_t tb_offset)
2971{
2972 CPUState *cs = CPU(cpu);
2973
2974 if (kvm_enabled()) {
2975 kvm_set_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &tb_offset);
2976 }
2977}
2978
2979bool kvm_arch_cpu_check_are_resettable(void)
2980{
2981 return true;
2982}
2983
2984void kvm_arch_accel_class_init(ObjectClass *oc)
2985{
2986}
2987