1
2
3
4
5
6
7
8
9
10#include <linux/errno.h>
11#include <linux/err.h>
12#include <linux/kvm_host.h>
13#include <linux/vmalloc.h>
14#include <linux/hrtimer.h>
15#include <linux/sched/signal.h>
16#include <linux/fs.h>
17#include <linux/slab.h>
18#include <linux/file.h>
19#include <linux/module.h>
20#include <linux/irqbypass.h>
21#include <linux/kvm_irqfd.h>
22#include <asm/cputable.h>
23#include <linux/uaccess.h>
24#include <asm/kvm_ppc.h>
25#include <asm/cputhreads.h>
26#include <asm/irqflags.h>
27#include <asm/iommu.h>
28#include <asm/switch_to.h>
29#include <asm/xive.h>
30#ifdef CONFIG_PPC_PSERIES
31#include <asm/hvcall.h>
32#include <asm/plpar_wrappers.h>
33#endif
34#include <asm/ultravisor.h>
35
36#include "timing.h"
37#include "irq.h"
38#include "../mm/mmu_decl.h"
39
40#define CREATE_TRACE_POINTS
41#include "trace.h"
42
43struct kvmppc_ops *kvmppc_hv_ops;
44EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
45struct kvmppc_ops *kvmppc_pr_ops;
46EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
47
48
49int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
50{
51 return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
52}
53
54bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
55{
56 return kvm_arch_vcpu_runnable(vcpu);
57}
58
59bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
60{
61 return false;
62}
63
64int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
65{
66 return 1;
67}
68
69
70
71
72
73
74
75
76
77
78int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
79{
80 int r;
81
82 WARN_ON(irqs_disabled());
83 hard_irq_disable();
84
85 while (true) {
86 if (need_resched()) {
87 local_irq_enable();
88 cond_resched();
89 hard_irq_disable();
90 continue;
91 }
92
93 if (signal_pending(current)) {
94 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
95 vcpu->run->exit_reason = KVM_EXIT_INTR;
96 r = -EINTR;
97 break;
98 }
99
100 vcpu->mode = IN_GUEST_MODE;
101
102
103
104
105
106
107
108
109
110
111 smp_mb();
112
113 if (kvm_request_pending(vcpu)) {
114
115 local_irq_enable();
116 trace_kvm_check_requests(vcpu);
117 r = kvmppc_core_check_requests(vcpu);
118 hard_irq_disable();
119 if (r > 0)
120 continue;
121 break;
122 }
123
124 if (kvmppc_core_prepare_to_enter(vcpu)) {
125
126
127 continue;
128 }
129
130 guest_enter_irqoff();
131 return 1;
132 }
133
134
135 local_irq_enable();
136 return r;
137}
138EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
139
140#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
141static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
142{
143 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
144 int i;
145
146 shared->sprg0 = swab64(shared->sprg0);
147 shared->sprg1 = swab64(shared->sprg1);
148 shared->sprg2 = swab64(shared->sprg2);
149 shared->sprg3 = swab64(shared->sprg3);
150 shared->srr0 = swab64(shared->srr0);
151 shared->srr1 = swab64(shared->srr1);
152 shared->dar = swab64(shared->dar);
153 shared->msr = swab64(shared->msr);
154 shared->dsisr = swab32(shared->dsisr);
155 shared->int_pending = swab32(shared->int_pending);
156 for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
157 shared->sr[i] = swab32(shared->sr[i]);
158}
159#endif
160
161int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
162{
163 int nr = kvmppc_get_gpr(vcpu, 11);
164 int r;
165 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
166 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
167 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
168 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
169 unsigned long r2 = 0;
170
171 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
172
173 param1 &= 0xffffffff;
174 param2 &= 0xffffffff;
175 param3 &= 0xffffffff;
176 param4 &= 0xffffffff;
177 }
178
179 switch (nr) {
180 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
181 {
182#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
183
184 int shared_big_endian = true;
185 if (vcpu->arch.intr_msr & MSR_LE)
186 shared_big_endian = false;
187 if (shared_big_endian != vcpu->arch.shared_big_endian)
188 kvmppc_swab_shared(vcpu);
189 vcpu->arch.shared_big_endian = shared_big_endian;
190#endif
191
192 if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
193
194
195
196
197
198 vcpu->arch.disable_kernel_nx = true;
199 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
200 }
201
202 vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
203 vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
204
205#ifdef CONFIG_PPC_64K_PAGES
206
207
208
209
210 if ((vcpu->arch.magic_page_pa & 0xf000) !=
211 ((ulong)vcpu->arch.shared & 0xf000)) {
212 void *old_shared = vcpu->arch.shared;
213 ulong shared = (ulong)vcpu->arch.shared;
214 void *new_shared;
215
216 shared &= PAGE_MASK;
217 shared |= vcpu->arch.magic_page_pa & 0xf000;
218 new_shared = (void*)shared;
219 memcpy(new_shared, old_shared, 0x1000);
220 vcpu->arch.shared = new_shared;
221 }
222#endif
223
224 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
225
226 r = EV_SUCCESS;
227 break;
228 }
229 case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
230 r = EV_SUCCESS;
231#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
232 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
233#endif
234
235
236 break;
237 case EV_HCALL_TOKEN(EV_IDLE):
238 r = EV_SUCCESS;
239 kvm_vcpu_block(vcpu);
240 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
241 break;
242 default:
243 r = EV_UNIMPLEMENTED;
244 break;
245 }
246
247 kvmppc_set_gpr(vcpu, 4, r2);
248
249 return r;
250}
251EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
252
253int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
254{
255 int r = false;
256
257
258 if (!vcpu->arch.pvr)
259 goto out;
260
261
262 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
263 goto out;
264
265
266 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
267 goto out;
268
269#ifdef CONFIG_KVM_BOOKE_HV
270 if (!cpu_has_feature(CPU_FTR_EMB_HV))
271 goto out;
272#endif
273
274 r = true;
275
276out:
277 vcpu->arch.sane = r;
278 return r ? 0 : -EINVAL;
279}
280EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
281
282int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
283{
284 enum emulation_result er;
285 int r;
286
287 er = kvmppc_emulate_loadstore(vcpu);
288 switch (er) {
289 case EMULATE_DONE:
290
291
292 r = RESUME_GUEST_NV;
293 break;
294 case EMULATE_AGAIN:
295 r = RESUME_GUEST;
296 break;
297 case EMULATE_DO_MMIO:
298 run->exit_reason = KVM_EXIT_MMIO;
299
300
301
302
303 r = RESUME_HOST_NV;
304 break;
305 case EMULATE_FAIL:
306 {
307 u32 last_inst;
308
309 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
310
311 pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
312 r = RESUME_HOST;
313 break;
314 }
315 default:
316 WARN_ON(1);
317 r = RESUME_GUEST;
318 }
319
320 return r;
321}
322EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
323
324int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
325 bool data)
326{
327 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
328 struct kvmppc_pte pte;
329 int r = -EINVAL;
330
331 vcpu->stat.st++;
332
333 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr)
334 r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr,
335 size);
336
337 if ((!r) || (r == -EAGAIN))
338 return r;
339
340 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
341 XLATE_WRITE, &pte);
342 if (r < 0)
343 return r;
344
345 *eaddr = pte.raddr;
346
347 if (!pte.may_write)
348 return -EPERM;
349
350
351 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
352 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
353 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
354 void *magic = vcpu->arch.shared;
355 magic += pte.eaddr & 0xfff;
356 memcpy(magic, ptr, size);
357 return EMULATE_DONE;
358 }
359
360 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
361 return EMULATE_DO_MMIO;
362
363 return EMULATE_DONE;
364}
365EXPORT_SYMBOL_GPL(kvmppc_st);
366
367int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
368 bool data)
369{
370 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
371 struct kvmppc_pte pte;
372 int rc = -EINVAL;
373
374 vcpu->stat.ld++;
375
376 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr)
377 rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr,
378 size);
379
380 if ((!rc) || (rc == -EAGAIN))
381 return rc;
382
383 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
384 XLATE_READ, &pte);
385 if (rc)
386 return rc;
387
388 *eaddr = pte.raddr;
389
390 if (!pte.may_read)
391 return -EPERM;
392
393 if (!data && !pte.may_execute)
394 return -ENOEXEC;
395
396
397 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
398 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
399 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
400 void *magic = vcpu->arch.shared;
401 magic += pte.eaddr & 0xfff;
402 memcpy(ptr, magic, size);
403 return EMULATE_DONE;
404 }
405
406 if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size))
407 return EMULATE_DO_MMIO;
408
409 return EMULATE_DONE;
410}
411EXPORT_SYMBOL_GPL(kvmppc_ld);
412
413int kvm_arch_hardware_enable(void)
414{
415 return 0;
416}
417
418int kvm_arch_hardware_setup(void *opaque)
419{
420 return 0;
421}
422
423int kvm_arch_check_processor_compat(void *opaque)
424{
425 return kvmppc_core_check_processor_compat();
426}
427
428int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
429{
430 struct kvmppc_ops *kvm_ops = NULL;
431
432
433
434 if (type == 0) {
435 if (kvmppc_hv_ops)
436 kvm_ops = kvmppc_hv_ops;
437 else
438 kvm_ops = kvmppc_pr_ops;
439 if (!kvm_ops)
440 goto err_out;
441 } else if (type == KVM_VM_PPC_HV) {
442 if (!kvmppc_hv_ops)
443 goto err_out;
444 kvm_ops = kvmppc_hv_ops;
445 } else if (type == KVM_VM_PPC_PR) {
446 if (!kvmppc_pr_ops)
447 goto err_out;
448 kvm_ops = kvmppc_pr_ops;
449 } else
450 goto err_out;
451
452 if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
453 return -ENOENT;
454
455 kvm->arch.kvm_ops = kvm_ops;
456 return kvmppc_core_init_vm(kvm);
457err_out:
458 return -EINVAL;
459}
460
461void kvm_arch_destroy_vm(struct kvm *kvm)
462{
463 unsigned int i;
464 struct kvm_vcpu *vcpu;
465
466#ifdef CONFIG_KVM_XICS
467
468
469
470
471
472 if (is_kvmppc_hv_enabled(kvm))
473 kick_all_cpus_sync();
474#endif
475
476 kvm_for_each_vcpu(i, vcpu, kvm)
477 kvm_vcpu_destroy(vcpu);
478
479 mutex_lock(&kvm->lock);
480 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
481 kvm->vcpus[i] = NULL;
482
483 atomic_set(&kvm->online_vcpus, 0);
484
485 kvmppc_core_destroy_vm(kvm);
486
487 mutex_unlock(&kvm->lock);
488
489
490 module_put(kvm->arch.kvm_ops->owner);
491}
492
493int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
494{
495 int r;
496
497 int hv_enabled = kvmppc_hv_ops ? 1 : 0;
498
499 if (kvm) {
500
501
502
503
504 hv_enabled = is_kvmppc_hv_enabled(kvm);
505 }
506
507 switch (ext) {
508#ifdef CONFIG_BOOKE
509 case KVM_CAP_PPC_BOOKE_SREGS:
510 case KVM_CAP_PPC_BOOKE_WATCHDOG:
511 case KVM_CAP_PPC_EPR:
512#else
513 case KVM_CAP_PPC_SEGSTATE:
514 case KVM_CAP_PPC_HIOR:
515 case KVM_CAP_PPC_PAPR:
516#endif
517 case KVM_CAP_PPC_UNSET_IRQ:
518 case KVM_CAP_PPC_IRQ_LEVEL:
519 case KVM_CAP_ENABLE_CAP:
520 case KVM_CAP_ONE_REG:
521 case KVM_CAP_IOEVENTFD:
522 case KVM_CAP_DEVICE_CTRL:
523 case KVM_CAP_IMMEDIATE_EXIT:
524 case KVM_CAP_SET_GUEST_DEBUG:
525 r = 1;
526 break;
527 case KVM_CAP_PPC_GUEST_DEBUG_SSTEP:
528 case KVM_CAP_PPC_PAIRED_SINGLES:
529 case KVM_CAP_PPC_OSI:
530 case KVM_CAP_PPC_GET_PVINFO:
531#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
532 case KVM_CAP_SW_TLB:
533#endif
534
535 r = !hv_enabled;
536 break;
537#ifdef CONFIG_KVM_MPIC
538 case KVM_CAP_IRQ_MPIC:
539 r = 1;
540 break;
541#endif
542
543#ifdef CONFIG_PPC_BOOK3S_64
544 case KVM_CAP_SPAPR_TCE:
545 case KVM_CAP_SPAPR_TCE_64:
546 r = 1;
547 break;
548 case KVM_CAP_SPAPR_TCE_VFIO:
549 r = !!cpu_has_feature(CPU_FTR_HVMODE);
550 break;
551 case KVM_CAP_PPC_RTAS:
552 case KVM_CAP_PPC_FIXUP_HCALL:
553 case KVM_CAP_PPC_ENABLE_HCALL:
554#ifdef CONFIG_KVM_XICS
555 case KVM_CAP_IRQ_XICS:
556#endif
557 case KVM_CAP_PPC_GET_CPU_CHAR:
558 r = 1;
559 break;
560#ifdef CONFIG_KVM_XIVE
561 case KVM_CAP_PPC_IRQ_XIVE:
562
563
564
565
566
567 r = xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE) &&
568 kvmppc_xive_native_supported();
569 break;
570#endif
571
572 case KVM_CAP_PPC_ALLOC_HTAB:
573 r = hv_enabled;
574 break;
575#endif
576#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
577 case KVM_CAP_PPC_SMT:
578 r = 0;
579 if (kvm) {
580 if (kvm->arch.emul_smt_mode > 1)
581 r = kvm->arch.emul_smt_mode;
582 else
583 r = kvm->arch.smt_mode;
584 } else if (hv_enabled) {
585 if (cpu_has_feature(CPU_FTR_ARCH_300))
586 r = 1;
587 else
588 r = threads_per_subcore;
589 }
590 break;
591 case KVM_CAP_PPC_SMT_POSSIBLE:
592 r = 1;
593 if (hv_enabled) {
594 if (!cpu_has_feature(CPU_FTR_ARCH_300))
595 r = ((threads_per_subcore << 1) - 1);
596 else
597
598 r = 8 | 4 | 2 | 1;
599 }
600 break;
601 case KVM_CAP_PPC_RMA:
602 r = 0;
603 break;
604 case KVM_CAP_PPC_HWRNG:
605 r = kvmppc_hwrng_present();
606 break;
607 case KVM_CAP_PPC_MMU_RADIX:
608 r = !!(hv_enabled && radix_enabled());
609 break;
610 case KVM_CAP_PPC_MMU_HASH_V3:
611 r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300) &&
612 cpu_has_feature(CPU_FTR_HVMODE));
613 break;
614 case KVM_CAP_PPC_NESTED_HV:
615 r = !!(hv_enabled && kvmppc_hv_ops->enable_nested &&
616 !kvmppc_hv_ops->enable_nested(NULL));
617 break;
618#endif
619 case KVM_CAP_SYNC_MMU:
620#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
621 r = hv_enabled;
622#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
623 r = 1;
624#else
625 r = 0;
626#endif
627 break;
628#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
629 case KVM_CAP_PPC_HTAB_FD:
630 r = hv_enabled;
631 break;
632#endif
633 case KVM_CAP_NR_VCPUS:
634
635
636
637
638
639
640 if (hv_enabled)
641 r = num_present_cpus();
642 else
643 r = num_online_cpus();
644 break;
645 case KVM_CAP_MAX_VCPUS:
646 r = KVM_MAX_VCPUS;
647 break;
648 case KVM_CAP_MAX_VCPU_ID:
649 r = KVM_MAX_VCPU_ID;
650 break;
651#ifdef CONFIG_PPC_BOOK3S_64
652 case KVM_CAP_PPC_GET_SMMU_INFO:
653 r = 1;
654 break;
655 case KVM_CAP_SPAPR_MULTITCE:
656 r = 1;
657 break;
658 case KVM_CAP_SPAPR_RESIZE_HPT:
659 r = !!hv_enabled;
660 break;
661#endif
662#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
663 case KVM_CAP_PPC_FWNMI:
664 r = hv_enabled;
665 break;
666#endif
667#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
668 case KVM_CAP_PPC_HTM:
669 r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) ||
670 (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST));
671 break;
672#endif
673#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
674 case KVM_CAP_PPC_SECURE_GUEST:
675 r = hv_enabled && kvmppc_hv_ops->enable_svm &&
676 !kvmppc_hv_ops->enable_svm(NULL);
677 break;
678#endif
679 default:
680 r = 0;
681 break;
682 }
683 return r;
684
685}
686
687long kvm_arch_dev_ioctl(struct file *filp,
688 unsigned int ioctl, unsigned long arg)
689{
690 return -EINVAL;
691}
692
693void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
694{
695 kvmppc_core_free_memslot(kvm, slot);
696}
697
698int kvm_arch_prepare_memory_region(struct kvm *kvm,
699 struct kvm_memory_slot *memslot,
700 const struct kvm_userspace_memory_region *mem,
701 enum kvm_mr_change change)
702{
703 return kvmppc_core_prepare_memory_region(kvm, memslot, mem, change);
704}
705
706void kvm_arch_commit_memory_region(struct kvm *kvm,
707 const struct kvm_userspace_memory_region *mem,
708 struct kvm_memory_slot *old,
709 const struct kvm_memory_slot *new,
710 enum kvm_mr_change change)
711{
712 kvmppc_core_commit_memory_region(kvm, mem, old, new, change);
713}
714
715void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
716 struct kvm_memory_slot *slot)
717{
718 kvmppc_core_flush_memslot(kvm, slot);
719}
720
721int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
722{
723 return 0;
724}
725
726static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
727{
728 struct kvm_vcpu *vcpu;
729
730 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
731 kvmppc_decrementer_func(vcpu);
732
733 return HRTIMER_NORESTART;
734}
735
736int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
737{
738 int err;
739
740 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
741 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
742 vcpu->arch.dec_expires = get_tb();
743
744#ifdef CONFIG_KVM_EXIT_TIMING
745 mutex_init(&vcpu->arch.exit_timing_lock);
746#endif
747 err = kvmppc_subarch_vcpu_init(vcpu);
748 if (err)
749 return err;
750
751 err = kvmppc_core_vcpu_create(vcpu);
752 if (err)
753 goto out_vcpu_uninit;
754
755 vcpu->arch.wqp = &vcpu->wq;
756 kvmppc_create_vcpu_debugfs(vcpu, vcpu->vcpu_id);
757 return 0;
758
759out_vcpu_uninit:
760 kvmppc_subarch_vcpu_uninit(vcpu);
761 return err;
762}
763
764void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
765{
766}
767
768void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
769{
770
771 hrtimer_cancel(&vcpu->arch.dec_timer);
772
773 kvmppc_remove_vcpu_debugfs(vcpu);
774
775 switch (vcpu->arch.irq_type) {
776 case KVMPPC_IRQ_MPIC:
777 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
778 break;
779 case KVMPPC_IRQ_XICS:
780 if (xics_on_xive())
781 kvmppc_xive_cleanup_vcpu(vcpu);
782 else
783 kvmppc_xics_free_icp(vcpu);
784 break;
785 case KVMPPC_IRQ_XIVE:
786 kvmppc_xive_native_cleanup_vcpu(vcpu);
787 break;
788 }
789
790 kvmppc_core_vcpu_free(vcpu);
791
792 kvmppc_subarch_vcpu_uninit(vcpu);
793}
794
795int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
796{
797 return kvmppc_core_pending_dec(vcpu);
798}
799
800void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
801{
802#ifdef CONFIG_BOOKE
803
804
805
806
807
808
809
810 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
811#endif
812 kvmppc_core_vcpu_load(vcpu, cpu);
813}
814
815void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
816{
817 kvmppc_core_vcpu_put(vcpu);
818#ifdef CONFIG_BOOKE
819 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
820#endif
821}
822
823
824
825
826
827
828
829bool kvm_arch_has_irq_bypass(void)
830{
831 return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) ||
832 (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer));
833}
834
835int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
836 struct irq_bypass_producer *prod)
837{
838 struct kvm_kernel_irqfd *irqfd =
839 container_of(cons, struct kvm_kernel_irqfd, consumer);
840 struct kvm *kvm = irqfd->kvm;
841
842 if (kvm->arch.kvm_ops->irq_bypass_add_producer)
843 return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
844
845 return 0;
846}
847
848void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
849 struct irq_bypass_producer *prod)
850{
851 struct kvm_kernel_irqfd *irqfd =
852 container_of(cons, struct kvm_kernel_irqfd, consumer);
853 struct kvm *kvm = irqfd->kvm;
854
855 if (kvm->arch.kvm_ops->irq_bypass_del_producer)
856 kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
857}
858
859#ifdef CONFIG_VSX
860static inline int kvmppc_get_vsr_dword_offset(int index)
861{
862 int offset;
863
864 if ((index != 0) && (index != 1))
865 return -1;
866
867#ifdef __BIG_ENDIAN
868 offset = index;
869#else
870 offset = 1 - index;
871#endif
872
873 return offset;
874}
875
876static inline int kvmppc_get_vsr_word_offset(int index)
877{
878 int offset;
879
880 if ((index > 3) || (index < 0))
881 return -1;
882
883#ifdef __BIG_ENDIAN
884 offset = index;
885#else
886 offset = 3 - index;
887#endif
888 return offset;
889}
890
891static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
892 u64 gpr)
893{
894 union kvmppc_one_reg val;
895 int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
896 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
897
898 if (offset == -1)
899 return;
900
901 if (index >= 32) {
902 val.vval = VCPU_VSX_VR(vcpu, index - 32);
903 val.vsxval[offset] = gpr;
904 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
905 } else {
906 VCPU_VSX_FPR(vcpu, index, offset) = gpr;
907 }
908}
909
910static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
911 u64 gpr)
912{
913 union kvmppc_one_reg val;
914 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
915
916 if (index >= 32) {
917 val.vval = VCPU_VSX_VR(vcpu, index - 32);
918 val.vsxval[0] = gpr;
919 val.vsxval[1] = gpr;
920 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
921 } else {
922 VCPU_VSX_FPR(vcpu, index, 0) = gpr;
923 VCPU_VSX_FPR(vcpu, index, 1) = gpr;
924 }
925}
926
927static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
928 u32 gpr)
929{
930 union kvmppc_one_reg val;
931 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
932
933 if (index >= 32) {
934 val.vsx32val[0] = gpr;
935 val.vsx32val[1] = gpr;
936 val.vsx32val[2] = gpr;
937 val.vsx32val[3] = gpr;
938 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
939 } else {
940 val.vsx32val[0] = gpr;
941 val.vsx32val[1] = gpr;
942 VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0];
943 VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0];
944 }
945}
946
947static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
948 u32 gpr32)
949{
950 union kvmppc_one_reg val;
951 int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
952 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
953 int dword_offset, word_offset;
954
955 if (offset == -1)
956 return;
957
958 if (index >= 32) {
959 val.vval = VCPU_VSX_VR(vcpu, index - 32);
960 val.vsx32val[offset] = gpr32;
961 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
962 } else {
963 dword_offset = offset / 2;
964 word_offset = offset % 2;
965 val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
966 val.vsx32val[word_offset] = gpr32;
967 VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
968 }
969}
970#endif
971
972#ifdef CONFIG_ALTIVEC
973static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu,
974 int index, int element_size)
975{
976 int offset;
977 int elts = sizeof(vector128)/element_size;
978
979 if ((index < 0) || (index >= elts))
980 return -1;
981
982 if (kvmppc_need_byteswap(vcpu))
983 offset = elts - index - 1;
984 else
985 offset = index;
986
987 return offset;
988}
989
990static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu,
991 int index)
992{
993 return kvmppc_get_vmx_offset_generic(vcpu, index, 8);
994}
995
996static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu,
997 int index)
998{
999 return kvmppc_get_vmx_offset_generic(vcpu, index, 4);
1000}
1001
1002static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu,
1003 int index)
1004{
1005 return kvmppc_get_vmx_offset_generic(vcpu, index, 2);
1006}
1007
1008static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu,
1009 int index)
1010{
1011 return kvmppc_get_vmx_offset_generic(vcpu, index, 1);
1012}
1013
1014
1015static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
1016 u64 gpr)
1017{
1018 union kvmppc_one_reg val;
1019 int offset = kvmppc_get_vmx_dword_offset(vcpu,
1020 vcpu->arch.mmio_vmx_offset);
1021 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1022
1023 if (offset == -1)
1024 return;
1025
1026 val.vval = VCPU_VSX_VR(vcpu, index);
1027 val.vsxval[offset] = gpr;
1028 VCPU_VSX_VR(vcpu, index) = val.vval;
1029}
1030
1031static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
1032 u32 gpr32)
1033{
1034 union kvmppc_one_reg val;
1035 int offset = kvmppc_get_vmx_word_offset(vcpu,
1036 vcpu->arch.mmio_vmx_offset);
1037 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1038
1039 if (offset == -1)
1040 return;
1041
1042 val.vval = VCPU_VSX_VR(vcpu, index);
1043 val.vsx32val[offset] = gpr32;
1044 VCPU_VSX_VR(vcpu, index) = val.vval;
1045}
1046
1047static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
1048 u16 gpr16)
1049{
1050 union kvmppc_one_reg val;
1051 int offset = kvmppc_get_vmx_hword_offset(vcpu,
1052 vcpu->arch.mmio_vmx_offset);
1053 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1054
1055 if (offset == -1)
1056 return;
1057
1058 val.vval = VCPU_VSX_VR(vcpu, index);
1059 val.vsx16val[offset] = gpr16;
1060 VCPU_VSX_VR(vcpu, index) = val.vval;
1061}
1062
1063static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
1064 u8 gpr8)
1065{
1066 union kvmppc_one_reg val;
1067 int offset = kvmppc_get_vmx_byte_offset(vcpu,
1068 vcpu->arch.mmio_vmx_offset);
1069 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1070
1071 if (offset == -1)
1072 return;
1073
1074 val.vval = VCPU_VSX_VR(vcpu, index);
1075 val.vsx8val[offset] = gpr8;
1076 VCPU_VSX_VR(vcpu, index) = val.vval;
1077}
1078#endif
1079
1080#ifdef CONFIG_PPC_FPU
1081static inline u64 sp_to_dp(u32 fprs)
1082{
1083 u64 fprd;
1084
1085 preempt_disable();
1086 enable_kernel_fp();
1087 asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m" (fprd) : "m" (fprs)
1088 : "fr0");
1089 preempt_enable();
1090 return fprd;
1091}
1092
1093static inline u32 dp_to_sp(u64 fprd)
1094{
1095 u32 fprs;
1096
1097 preempt_disable();
1098 enable_kernel_fp();
1099 asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m" (fprs) : "m" (fprd)
1100 : "fr0");
1101 preempt_enable();
1102 return fprs;
1103}
1104
1105#else
1106#define sp_to_dp(x) (x)
1107#define dp_to_sp(x) (x)
1108#endif
1109
1110static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
1111 struct kvm_run *run)
1112{
1113 u64 uninitialized_var(gpr);
1114
1115 if (run->mmio.len > sizeof(gpr)) {
1116 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
1117 return;
1118 }
1119
1120 if (!vcpu->arch.mmio_host_swabbed) {
1121 switch (run->mmio.len) {
1122 case 8: gpr = *(u64 *)run->mmio.data; break;
1123 case 4: gpr = *(u32 *)run->mmio.data; break;
1124 case 2: gpr = *(u16 *)run->mmio.data; break;
1125 case 1: gpr = *(u8 *)run->mmio.data; break;
1126 }
1127 } else {
1128 switch (run->mmio.len) {
1129 case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
1130 case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
1131 case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
1132 case 1: gpr = *(u8 *)run->mmio.data; break;
1133 }
1134 }
1135
1136
1137 if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
1138 gpr = sp_to_dp(gpr);
1139
1140 if (vcpu->arch.mmio_sign_extend) {
1141 switch (run->mmio.len) {
1142#ifdef CONFIG_PPC64
1143 case 4:
1144 gpr = (s64)(s32)gpr;
1145 break;
1146#endif
1147 case 2:
1148 gpr = (s64)(s16)gpr;
1149 break;
1150 case 1:
1151 gpr = (s64)(s8)gpr;
1152 break;
1153 }
1154 }
1155
1156 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
1157 case KVM_MMIO_REG_GPR:
1158 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
1159 break;
1160 case KVM_MMIO_REG_FPR:
1161 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1162 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
1163
1164 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1165 break;
1166#ifdef CONFIG_PPC_BOOK3S
1167 case KVM_MMIO_REG_QPR:
1168 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1169 break;
1170 case KVM_MMIO_REG_FQPR:
1171 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1172 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1173 break;
1174#endif
1175#ifdef CONFIG_VSX
1176 case KVM_MMIO_REG_VSX:
1177 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1178 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX);
1179
1180 if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD)
1181 kvmppc_set_vsr_dword(vcpu, gpr);
1182 else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD)
1183 kvmppc_set_vsr_word(vcpu, gpr);
1184 else if (vcpu->arch.mmio_copy_type ==
1185 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
1186 kvmppc_set_vsr_dword_dump(vcpu, gpr);
1187 else if (vcpu->arch.mmio_copy_type ==
1188 KVMPPC_VSX_COPY_WORD_LOAD_DUMP)
1189 kvmppc_set_vsr_word_dump(vcpu, gpr);
1190 break;
1191#endif
1192#ifdef CONFIG_ALTIVEC
1193 case KVM_MMIO_REG_VMX:
1194 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1195 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC);
1196
1197 if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD)
1198 kvmppc_set_vmx_dword(vcpu, gpr);
1199 else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD)
1200 kvmppc_set_vmx_word(vcpu, gpr);
1201 else if (vcpu->arch.mmio_copy_type ==
1202 KVMPPC_VMX_COPY_HWORD)
1203 kvmppc_set_vmx_hword(vcpu, gpr);
1204 else if (vcpu->arch.mmio_copy_type ==
1205 KVMPPC_VMX_COPY_BYTE)
1206 kvmppc_set_vmx_byte(vcpu, gpr);
1207 break;
1208#endif
1209#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1210 case KVM_MMIO_REG_NESTED_GPR:
1211 if (kvmppc_need_byteswap(vcpu))
1212 gpr = swab64(gpr);
1213 kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr,
1214 sizeof(gpr));
1215 break;
1216#endif
1217 default:
1218 BUG();
1219 }
1220}
1221
1222static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1223 unsigned int rt, unsigned int bytes,
1224 int is_default_endian, int sign_extend)
1225{
1226 int idx, ret;
1227 bool host_swabbed;
1228
1229
1230 if (kvmppc_need_byteswap(vcpu)) {
1231 host_swabbed = is_default_endian;
1232 } else {
1233 host_swabbed = !is_default_endian;
1234 }
1235
1236 if (bytes > sizeof(run->mmio.data)) {
1237 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
1238 run->mmio.len);
1239 }
1240
1241 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1242 run->mmio.len = bytes;
1243 run->mmio.is_write = 0;
1244
1245 vcpu->arch.io_gpr = rt;
1246 vcpu->arch.mmio_host_swabbed = host_swabbed;
1247 vcpu->mmio_needed = 1;
1248 vcpu->mmio_is_write = 0;
1249 vcpu->arch.mmio_sign_extend = sign_extend;
1250
1251 idx = srcu_read_lock(&vcpu->kvm->srcu);
1252
1253 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1254 bytes, &run->mmio.data);
1255
1256 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1257
1258 if (!ret) {
1259 kvmppc_complete_mmio_load(vcpu, run);
1260 vcpu->mmio_needed = 0;
1261 return EMULATE_DONE;
1262 }
1263
1264 return EMULATE_DO_MMIO;
1265}
1266
1267int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1268 unsigned int rt, unsigned int bytes,
1269 int is_default_endian)
1270{
1271 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0);
1272}
1273EXPORT_SYMBOL_GPL(kvmppc_handle_load);
1274
1275
1276int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
1277 unsigned int rt, unsigned int bytes,
1278 int is_default_endian)
1279{
1280 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1);
1281}
1282
1283#ifdef CONFIG_VSX
1284int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1285 unsigned int rt, unsigned int bytes,
1286 int is_default_endian, int mmio_sign_extend)
1287{
1288 enum emulation_result emulated = EMULATE_DONE;
1289
1290
1291 if (vcpu->arch.mmio_vsx_copy_nums > 4)
1292 return EMULATE_FAIL;
1293
1294 while (vcpu->arch.mmio_vsx_copy_nums) {
1295 emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
1296 is_default_endian, mmio_sign_extend);
1297
1298 if (emulated != EMULATE_DONE)
1299 break;
1300
1301 vcpu->arch.paddr_accessed += run->mmio.len;
1302
1303 vcpu->arch.mmio_vsx_copy_nums--;
1304 vcpu->arch.mmio_vsx_offset++;
1305 }
1306 return emulated;
1307}
1308#endif
1309
1310int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
1311 u64 val, unsigned int bytes, int is_default_endian)
1312{
1313 void *data = run->mmio.data;
1314 int idx, ret;
1315 bool host_swabbed;
1316
1317
1318 if (kvmppc_need_byteswap(vcpu)) {
1319 host_swabbed = is_default_endian;
1320 } else {
1321 host_swabbed = !is_default_endian;
1322 }
1323
1324 if (bytes > sizeof(run->mmio.data)) {
1325 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
1326 run->mmio.len);
1327 }
1328
1329 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1330 run->mmio.len = bytes;
1331 run->mmio.is_write = 1;
1332 vcpu->mmio_needed = 1;
1333 vcpu->mmio_is_write = 1;
1334
1335 if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
1336 val = dp_to_sp(val);
1337
1338
1339 if (!host_swabbed) {
1340 switch (bytes) {
1341 case 8: *(u64 *)data = val; break;
1342 case 4: *(u32 *)data = val; break;
1343 case 2: *(u16 *)data = val; break;
1344 case 1: *(u8 *)data = val; break;
1345 }
1346 } else {
1347 switch (bytes) {
1348 case 8: *(u64 *)data = swab64(val); break;
1349 case 4: *(u32 *)data = swab32(val); break;
1350 case 2: *(u16 *)data = swab16(val); break;
1351 case 1: *(u8 *)data = val; break;
1352 }
1353 }
1354
1355 idx = srcu_read_lock(&vcpu->kvm->srcu);
1356
1357 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1358 bytes, &run->mmio.data);
1359
1360 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1361
1362 if (!ret) {
1363 vcpu->mmio_needed = 0;
1364 return EMULATE_DONE;
1365 }
1366
1367 return EMULATE_DO_MMIO;
1368}
1369EXPORT_SYMBOL_GPL(kvmppc_handle_store);
1370
1371#ifdef CONFIG_VSX
1372static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
1373{
1374 u32 dword_offset, word_offset;
1375 union kvmppc_one_reg reg;
1376 int vsx_offset = 0;
1377 int copy_type = vcpu->arch.mmio_copy_type;
1378 int result = 0;
1379
1380 switch (copy_type) {
1381 case KVMPPC_VSX_COPY_DWORD:
1382 vsx_offset =
1383 kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
1384
1385 if (vsx_offset == -1) {
1386 result = -1;
1387 break;
1388 }
1389
1390 if (rs < 32) {
1391 *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
1392 } else {
1393 reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
1394 *val = reg.vsxval[vsx_offset];
1395 }
1396 break;
1397
1398 case KVMPPC_VSX_COPY_WORD:
1399 vsx_offset =
1400 kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
1401
1402 if (vsx_offset == -1) {
1403 result = -1;
1404 break;
1405 }
1406
1407 if (rs < 32) {
1408 dword_offset = vsx_offset / 2;
1409 word_offset = vsx_offset % 2;
1410 reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
1411 *val = reg.vsx32val[word_offset];
1412 } else {
1413 reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
1414 *val = reg.vsx32val[vsx_offset];
1415 }
1416 break;
1417
1418 default:
1419 result = -1;
1420 break;
1421 }
1422
1423 return result;
1424}
1425
1426int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
1427 int rs, unsigned int bytes, int is_default_endian)
1428{
1429 u64 val;
1430 enum emulation_result emulated = EMULATE_DONE;
1431
1432 vcpu->arch.io_gpr = rs;
1433
1434
1435 if (vcpu->arch.mmio_vsx_copy_nums > 4)
1436 return EMULATE_FAIL;
1437
1438 while (vcpu->arch.mmio_vsx_copy_nums) {
1439 if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
1440 return EMULATE_FAIL;
1441
1442 emulated = kvmppc_handle_store(run, vcpu,
1443 val, bytes, is_default_endian);
1444
1445 if (emulated != EMULATE_DONE)
1446 break;
1447
1448 vcpu->arch.paddr_accessed += run->mmio.len;
1449
1450 vcpu->arch.mmio_vsx_copy_nums--;
1451 vcpu->arch.mmio_vsx_offset++;
1452 }
1453
1454 return emulated;
1455}
1456
1457static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
1458 struct kvm_run *run)
1459{
1460 enum emulation_result emulated = EMULATE_FAIL;
1461 int r;
1462
1463 vcpu->arch.paddr_accessed += run->mmio.len;
1464
1465 if (!vcpu->mmio_is_write) {
1466 emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr,
1467 run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
1468 } else {
1469 emulated = kvmppc_handle_vsx_store(run, vcpu,
1470 vcpu->arch.io_gpr, run->mmio.len, 1);
1471 }
1472
1473 switch (emulated) {
1474 case EMULATE_DO_MMIO:
1475 run->exit_reason = KVM_EXIT_MMIO;
1476 r = RESUME_HOST;
1477 break;
1478 case EMULATE_FAIL:
1479 pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
1480 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1481 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1482 r = RESUME_HOST;
1483 break;
1484 default:
1485 r = RESUME_GUEST;
1486 break;
1487 }
1488 return r;
1489}
1490#endif
1491
1492#ifdef CONFIG_ALTIVEC
1493int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1494 unsigned int rt, unsigned int bytes, int is_default_endian)
1495{
1496 enum emulation_result emulated = EMULATE_DONE;
1497
1498 if (vcpu->arch.mmio_vsx_copy_nums > 2)
1499 return EMULATE_FAIL;
1500
1501 while (vcpu->arch.mmio_vmx_copy_nums) {
1502 emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
1503 is_default_endian, 0);
1504
1505 if (emulated != EMULATE_DONE)
1506 break;
1507
1508 vcpu->arch.paddr_accessed += run->mmio.len;
1509 vcpu->arch.mmio_vmx_copy_nums--;
1510 vcpu->arch.mmio_vmx_offset++;
1511 }
1512
1513 return emulated;
1514}
1515
1516int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
1517{
1518 union kvmppc_one_reg reg;
1519 int vmx_offset = 0;
1520 int result = 0;
1521
1522 vmx_offset =
1523 kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1524
1525 if (vmx_offset == -1)
1526 return -1;
1527
1528 reg.vval = VCPU_VSX_VR(vcpu, index);
1529 *val = reg.vsxval[vmx_offset];
1530
1531 return result;
1532}
1533
1534int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
1535{
1536 union kvmppc_one_reg reg;
1537 int vmx_offset = 0;
1538 int result = 0;
1539
1540 vmx_offset =
1541 kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1542
1543 if (vmx_offset == -1)
1544 return -1;
1545
1546 reg.vval = VCPU_VSX_VR(vcpu, index);
1547 *val = reg.vsx32val[vmx_offset];
1548
1549 return result;
1550}
1551
1552int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
1553{
1554 union kvmppc_one_reg reg;
1555 int vmx_offset = 0;
1556 int result = 0;
1557
1558 vmx_offset =
1559 kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1560
1561 if (vmx_offset == -1)
1562 return -1;
1563
1564 reg.vval = VCPU_VSX_VR(vcpu, index);
1565 *val = reg.vsx16val[vmx_offset];
1566
1567 return result;
1568}
1569
1570int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
1571{
1572 union kvmppc_one_reg reg;
1573 int vmx_offset = 0;
1574 int result = 0;
1575
1576 vmx_offset =
1577 kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1578
1579 if (vmx_offset == -1)
1580 return -1;
1581
1582 reg.vval = VCPU_VSX_VR(vcpu, index);
1583 *val = reg.vsx8val[vmx_offset];
1584
1585 return result;
1586}
1587
1588int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
1589 unsigned int rs, unsigned int bytes, int is_default_endian)
1590{
1591 u64 val = 0;
1592 unsigned int index = rs & KVM_MMIO_REG_MASK;
1593 enum emulation_result emulated = EMULATE_DONE;
1594
1595 if (vcpu->arch.mmio_vsx_copy_nums > 2)
1596 return EMULATE_FAIL;
1597
1598 vcpu->arch.io_gpr = rs;
1599
1600 while (vcpu->arch.mmio_vmx_copy_nums) {
1601 switch (vcpu->arch.mmio_copy_type) {
1602 case KVMPPC_VMX_COPY_DWORD:
1603 if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1)
1604 return EMULATE_FAIL;
1605
1606 break;
1607 case KVMPPC_VMX_COPY_WORD:
1608 if (kvmppc_get_vmx_word(vcpu, index, &val) == -1)
1609 return EMULATE_FAIL;
1610 break;
1611 case KVMPPC_VMX_COPY_HWORD:
1612 if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1)
1613 return EMULATE_FAIL;
1614 break;
1615 case KVMPPC_VMX_COPY_BYTE:
1616 if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1)
1617 return EMULATE_FAIL;
1618 break;
1619 default:
1620 return EMULATE_FAIL;
1621 }
1622
1623 emulated = kvmppc_handle_store(run, vcpu, val, bytes,
1624 is_default_endian);
1625 if (emulated != EMULATE_DONE)
1626 break;
1627
1628 vcpu->arch.paddr_accessed += run->mmio.len;
1629 vcpu->arch.mmio_vmx_copy_nums--;
1630 vcpu->arch.mmio_vmx_offset++;
1631 }
1632
1633 return emulated;
1634}
1635
1636static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu,
1637 struct kvm_run *run)
1638{
1639 enum emulation_result emulated = EMULATE_FAIL;
1640 int r;
1641
1642 vcpu->arch.paddr_accessed += run->mmio.len;
1643
1644 if (!vcpu->mmio_is_write) {
1645 emulated = kvmppc_handle_vmx_load(run, vcpu,
1646 vcpu->arch.io_gpr, run->mmio.len, 1);
1647 } else {
1648 emulated = kvmppc_handle_vmx_store(run, vcpu,
1649 vcpu->arch.io_gpr, run->mmio.len, 1);
1650 }
1651
1652 switch (emulated) {
1653 case EMULATE_DO_MMIO:
1654 run->exit_reason = KVM_EXIT_MMIO;
1655 r = RESUME_HOST;
1656 break;
1657 case EMULATE_FAIL:
1658 pr_info("KVM: MMIO emulation failed (VMX repeat)\n");
1659 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1660 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1661 r = RESUME_HOST;
1662 break;
1663 default:
1664 r = RESUME_GUEST;
1665 break;
1666 }
1667 return r;
1668}
1669#endif
1670
1671int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1672{
1673 int r = 0;
1674 union kvmppc_one_reg val;
1675 int size;
1676
1677 size = one_reg_size(reg->id);
1678 if (size > sizeof(val))
1679 return -EINVAL;
1680
1681 r = kvmppc_get_one_reg(vcpu, reg->id, &val);
1682 if (r == -EINVAL) {
1683 r = 0;
1684 switch (reg->id) {
1685#ifdef CONFIG_ALTIVEC
1686 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1687 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1688 r = -ENXIO;
1689 break;
1690 }
1691 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
1692 break;
1693 case KVM_REG_PPC_VSCR:
1694 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1695 r = -ENXIO;
1696 break;
1697 }
1698 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
1699 break;
1700 case KVM_REG_PPC_VRSAVE:
1701 val = get_reg_val(reg->id, vcpu->arch.vrsave);
1702 break;
1703#endif
1704 default:
1705 r = -EINVAL;
1706 break;
1707 }
1708 }
1709
1710 if (r)
1711 return r;
1712
1713 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
1714 r = -EFAULT;
1715
1716 return r;
1717}
1718
1719int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1720{
1721 int r;
1722 union kvmppc_one_reg val;
1723 int size;
1724
1725 size = one_reg_size(reg->id);
1726 if (size > sizeof(val))
1727 return -EINVAL;
1728
1729 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
1730 return -EFAULT;
1731
1732 r = kvmppc_set_one_reg(vcpu, reg->id, &val);
1733 if (r == -EINVAL) {
1734 r = 0;
1735 switch (reg->id) {
1736#ifdef CONFIG_ALTIVEC
1737 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1738 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1739 r = -ENXIO;
1740 break;
1741 }
1742 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
1743 break;
1744 case KVM_REG_PPC_VSCR:
1745 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1746 r = -ENXIO;
1747 break;
1748 }
1749 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
1750 break;
1751 case KVM_REG_PPC_VRSAVE:
1752 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1753 r = -ENXIO;
1754 break;
1755 }
1756 vcpu->arch.vrsave = set_reg_val(reg->id, val);
1757 break;
1758#endif
1759 default:
1760 r = -EINVAL;
1761 break;
1762 }
1763 }
1764
1765 return r;
1766}
1767
1768int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
1769{
1770 int r;
1771
1772 vcpu_load(vcpu);
1773
1774 if (vcpu->mmio_needed) {
1775 vcpu->mmio_needed = 0;
1776 if (!vcpu->mmio_is_write)
1777 kvmppc_complete_mmio_load(vcpu, run);
1778#ifdef CONFIG_VSX
1779 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1780 vcpu->arch.mmio_vsx_copy_nums--;
1781 vcpu->arch.mmio_vsx_offset++;
1782 }
1783
1784 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1785 r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run);
1786 if (r == RESUME_HOST) {
1787 vcpu->mmio_needed = 1;
1788 goto out;
1789 }
1790 }
1791#endif
1792#ifdef CONFIG_ALTIVEC
1793 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1794 vcpu->arch.mmio_vmx_copy_nums--;
1795 vcpu->arch.mmio_vmx_offset++;
1796 }
1797
1798 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1799 r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run);
1800 if (r == RESUME_HOST) {
1801 vcpu->mmio_needed = 1;
1802 goto out;
1803 }
1804 }
1805#endif
1806 } else if (vcpu->arch.osi_needed) {
1807 u64 *gprs = run->osi.gprs;
1808 int i;
1809
1810 for (i = 0; i < 32; i++)
1811 kvmppc_set_gpr(vcpu, i, gprs[i]);
1812 vcpu->arch.osi_needed = 0;
1813 } else if (vcpu->arch.hcall_needed) {
1814 int i;
1815
1816 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1817 for (i = 0; i < 9; ++i)
1818 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1819 vcpu->arch.hcall_needed = 0;
1820#ifdef CONFIG_BOOKE
1821 } else if (vcpu->arch.epr_needed) {
1822 kvmppc_set_epr(vcpu, run->epr.epr);
1823 vcpu->arch.epr_needed = 0;
1824#endif
1825 }
1826
1827 kvm_sigset_activate(vcpu);
1828
1829 if (run->immediate_exit)
1830 r = -EINTR;
1831 else
1832 r = kvmppc_vcpu_run(run, vcpu);
1833
1834 kvm_sigset_deactivate(vcpu);
1835
1836#ifdef CONFIG_ALTIVEC
1837out:
1838#endif
1839 vcpu_put(vcpu);
1840 return r;
1841}
1842
1843int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1844{
1845 if (irq->irq == KVM_INTERRUPT_UNSET) {
1846 kvmppc_core_dequeue_external(vcpu);
1847 return 0;
1848 }
1849
1850 kvmppc_core_queue_external(vcpu, irq);
1851
1852 kvm_vcpu_kick(vcpu);
1853
1854 return 0;
1855}
1856
1857static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1858 struct kvm_enable_cap *cap)
1859{
1860 int r;
1861
1862 if (cap->flags)
1863 return -EINVAL;
1864
1865 switch (cap->cap) {
1866 case KVM_CAP_PPC_OSI:
1867 r = 0;
1868 vcpu->arch.osi_enabled = true;
1869 break;
1870 case KVM_CAP_PPC_PAPR:
1871 r = 0;
1872 vcpu->arch.papr_enabled = true;
1873 break;
1874 case KVM_CAP_PPC_EPR:
1875 r = 0;
1876 if (cap->args[0])
1877 vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1878 else
1879 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1880 break;
1881#ifdef CONFIG_BOOKE
1882 case KVM_CAP_PPC_BOOKE_WATCHDOG:
1883 r = 0;
1884 vcpu->arch.watchdog_enabled = true;
1885 break;
1886#endif
1887#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1888 case KVM_CAP_SW_TLB: {
1889 struct kvm_config_tlb cfg;
1890 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1891
1892 r = -EFAULT;
1893 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1894 break;
1895
1896 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1897 break;
1898 }
1899#endif
1900#ifdef CONFIG_KVM_MPIC
1901 case KVM_CAP_IRQ_MPIC: {
1902 struct fd f;
1903 struct kvm_device *dev;
1904
1905 r = -EBADF;
1906 f = fdget(cap->args[0]);
1907 if (!f.file)
1908 break;
1909
1910 r = -EPERM;
1911 dev = kvm_device_from_filp(f.file);
1912 if (dev)
1913 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1914
1915 fdput(f);
1916 break;
1917 }
1918#endif
1919#ifdef CONFIG_KVM_XICS
1920 case KVM_CAP_IRQ_XICS: {
1921 struct fd f;
1922 struct kvm_device *dev;
1923
1924 r = -EBADF;
1925 f = fdget(cap->args[0]);
1926 if (!f.file)
1927 break;
1928
1929 r = -EPERM;
1930 dev = kvm_device_from_filp(f.file);
1931 if (dev) {
1932 if (xics_on_xive())
1933 r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
1934 else
1935 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1936 }
1937
1938 fdput(f);
1939 break;
1940 }
1941#endif
1942#ifdef CONFIG_KVM_XIVE
1943 case KVM_CAP_PPC_IRQ_XIVE: {
1944 struct fd f;
1945 struct kvm_device *dev;
1946
1947 r = -EBADF;
1948 f = fdget(cap->args[0]);
1949 if (!f.file)
1950 break;
1951
1952 r = -ENXIO;
1953 if (!xive_enabled())
1954 break;
1955
1956 r = -EPERM;
1957 dev = kvm_device_from_filp(f.file);
1958 if (dev)
1959 r = kvmppc_xive_native_connect_vcpu(dev, vcpu,
1960 cap->args[1]);
1961
1962 fdput(f);
1963 break;
1964 }
1965#endif
1966#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1967 case KVM_CAP_PPC_FWNMI:
1968 r = -EINVAL;
1969 if (!is_kvmppc_hv_enabled(vcpu->kvm))
1970 break;
1971 r = 0;
1972 vcpu->kvm->arch.fwnmi_enabled = true;
1973 break;
1974#endif
1975 default:
1976 r = -EINVAL;
1977 break;
1978 }
1979
1980 if (!r)
1981 r = kvmppc_sanity_check(vcpu);
1982
1983 return r;
1984}
1985
1986bool kvm_arch_intc_initialized(struct kvm *kvm)
1987{
1988#ifdef CONFIG_KVM_MPIC
1989 if (kvm->arch.mpic)
1990 return true;
1991#endif
1992#ifdef CONFIG_KVM_XICS
1993 if (kvm->arch.xics || kvm->arch.xive)
1994 return true;
1995#endif
1996 return false;
1997}
1998
1999int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2000 struct kvm_mp_state *mp_state)
2001{
2002 return -EINVAL;
2003}
2004
2005int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2006 struct kvm_mp_state *mp_state)
2007{
2008 return -EINVAL;
2009}
2010
2011long kvm_arch_vcpu_async_ioctl(struct file *filp,
2012 unsigned int ioctl, unsigned long arg)
2013{
2014 struct kvm_vcpu *vcpu = filp->private_data;
2015 void __user *argp = (void __user *)arg;
2016
2017 if (ioctl == KVM_INTERRUPT) {
2018 struct kvm_interrupt irq;
2019 if (copy_from_user(&irq, argp, sizeof(irq)))
2020 return -EFAULT;
2021 return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
2022 }
2023 return -ENOIOCTLCMD;
2024}
2025
2026long kvm_arch_vcpu_ioctl(struct file *filp,
2027 unsigned int ioctl, unsigned long arg)
2028{
2029 struct kvm_vcpu *vcpu = filp->private_data;
2030 void __user *argp = (void __user *)arg;
2031 long r;
2032
2033 switch (ioctl) {
2034 case KVM_ENABLE_CAP:
2035 {
2036 struct kvm_enable_cap cap;
2037 r = -EFAULT;
2038 vcpu_load(vcpu);
2039 if (copy_from_user(&cap, argp, sizeof(cap)))
2040 goto out;
2041 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2042 vcpu_put(vcpu);
2043 break;
2044 }
2045
2046 case KVM_SET_ONE_REG:
2047 case KVM_GET_ONE_REG:
2048 {
2049 struct kvm_one_reg reg;
2050 r = -EFAULT;
2051 if (copy_from_user(®, argp, sizeof(reg)))
2052 goto out;
2053 if (ioctl == KVM_SET_ONE_REG)
2054 r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®);
2055 else
2056 r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®);
2057 break;
2058 }
2059
2060#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
2061 case KVM_DIRTY_TLB: {
2062 struct kvm_dirty_tlb dirty;
2063 r = -EFAULT;
2064 vcpu_load(vcpu);
2065 if (copy_from_user(&dirty, argp, sizeof(dirty)))
2066 goto out;
2067 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
2068 vcpu_put(vcpu);
2069 break;
2070 }
2071#endif
2072 default:
2073 r = -EINVAL;
2074 }
2075
2076out:
2077 return r;
2078}
2079
2080vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2081{
2082 return VM_FAULT_SIGBUS;
2083}
2084
2085static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
2086{
2087 u32 inst_nop = 0x60000000;
2088#ifdef CONFIG_KVM_BOOKE_HV
2089 u32 inst_sc1 = 0x44000022;
2090 pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
2091 pvinfo->hcall[1] = cpu_to_be32(inst_nop);
2092 pvinfo->hcall[2] = cpu_to_be32(inst_nop);
2093 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2094#else
2095 u32 inst_lis = 0x3c000000;
2096 u32 inst_ori = 0x60000000;
2097 u32 inst_sc = 0x44000002;
2098 u32 inst_imm_mask = 0xffff;
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109 pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
2110 pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
2111 pvinfo->hcall[2] = cpu_to_be32(inst_sc);
2112 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2113#endif
2114
2115 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
2116
2117 return 0;
2118}
2119
2120int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
2121 bool line_status)
2122{
2123 if (!irqchip_in_kernel(kvm))
2124 return -ENXIO;
2125
2126 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
2127 irq_event->irq, irq_event->level,
2128 line_status);
2129 return 0;
2130}
2131
2132
2133int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
2134 struct kvm_enable_cap *cap)
2135{
2136 int r;
2137
2138 if (cap->flags)
2139 return -EINVAL;
2140
2141 switch (cap->cap) {
2142#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
2143 case KVM_CAP_PPC_ENABLE_HCALL: {
2144 unsigned long hcall = cap->args[0];
2145
2146 r = -EINVAL;
2147 if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
2148 cap->args[1] > 1)
2149 break;
2150 if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
2151 break;
2152 if (cap->args[1])
2153 set_bit(hcall / 4, kvm->arch.enabled_hcalls);
2154 else
2155 clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
2156 r = 0;
2157 break;
2158 }
2159 case KVM_CAP_PPC_SMT: {
2160 unsigned long mode = cap->args[0];
2161 unsigned long flags = cap->args[1];
2162
2163 r = -EINVAL;
2164 if (kvm->arch.kvm_ops->set_smt_mode)
2165 r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
2166 break;
2167 }
2168
2169 case KVM_CAP_PPC_NESTED_HV:
2170 r = -EINVAL;
2171 if (!is_kvmppc_hv_enabled(kvm) ||
2172 !kvm->arch.kvm_ops->enable_nested)
2173 break;
2174 r = kvm->arch.kvm_ops->enable_nested(kvm);
2175 break;
2176#endif
2177#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
2178 case KVM_CAP_PPC_SECURE_GUEST:
2179 r = -EINVAL;
2180 if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_svm)
2181 break;
2182 r = kvm->arch.kvm_ops->enable_svm(kvm);
2183 break;
2184#endif
2185 default:
2186 r = -EINVAL;
2187 break;
2188 }
2189
2190 return r;
2191}
2192
2193#ifdef CONFIG_PPC_BOOK3S_64
2194
2195
2196
2197
2198
2199
2200
2201
2202#ifdef CONFIG_PPC_PSERIES
2203static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2204{
2205 struct h_cpu_char_result c;
2206 unsigned long rc;
2207
2208 if (!machine_is(pseries))
2209 return -ENOTTY;
2210
2211 rc = plpar_get_cpu_characteristics(&c);
2212 if (rc == H_SUCCESS) {
2213 cp->character = c.character;
2214 cp->behaviour = c.behaviour;
2215 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2216 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2217 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2218 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2219 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2220 KVM_PPC_CPU_CHAR_BR_HINT_HONOURED |
2221 KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF |
2222 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2223 KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2224 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2225 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2226 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2227 KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2228 }
2229 return 0;
2230}
2231#else
2232static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2233{
2234 return -ENOTTY;
2235}
2236#endif
2237
2238static inline bool have_fw_feat(struct device_node *fw_features,
2239 const char *state, const char *name)
2240{
2241 struct device_node *np;
2242 bool r = false;
2243
2244 np = of_get_child_by_name(fw_features, name);
2245 if (np) {
2246 r = of_property_read_bool(np, state);
2247 of_node_put(np);
2248 }
2249 return r;
2250}
2251
2252static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2253{
2254 struct device_node *np, *fw_features;
2255 int r;
2256
2257 memset(cp, 0, sizeof(*cp));
2258 r = pseries_get_cpu_char(cp);
2259 if (r != -ENOTTY)
2260 return r;
2261
2262 np = of_find_node_by_name(NULL, "ibm,opal");
2263 if (np) {
2264 fw_features = of_get_child_by_name(np, "fw-features");
2265 of_node_put(np);
2266 if (!fw_features)
2267 return 0;
2268 if (have_fw_feat(fw_features, "enabled",
2269 "inst-spec-barrier-ori31,31,0"))
2270 cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31;
2271 if (have_fw_feat(fw_features, "enabled",
2272 "fw-bcctrl-serialized"))
2273 cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED;
2274 if (have_fw_feat(fw_features, "enabled",
2275 "inst-l1d-flush-ori30,30,0"))
2276 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30;
2277 if (have_fw_feat(fw_features, "enabled",
2278 "inst-l1d-flush-trig2"))
2279 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2;
2280 if (have_fw_feat(fw_features, "enabled",
2281 "fw-l1d-thread-split"))
2282 cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV;
2283 if (have_fw_feat(fw_features, "enabled",
2284 "fw-count-cache-disabled"))
2285 cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
2286 if (have_fw_feat(fw_features, "enabled",
2287 "fw-count-cache-flush-bcctr2,0,0"))
2288 cp->character |= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2289 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2290 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2291 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2292 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2293 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2294 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2295 KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2296
2297 if (have_fw_feat(fw_features, "enabled",
2298 "speculation-policy-favor-security"))
2299 cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY;
2300 if (!have_fw_feat(fw_features, "disabled",
2301 "needs-l1d-flush-msr-pr-0-to-1"))
2302 cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR;
2303 if (!have_fw_feat(fw_features, "disabled",
2304 "needs-spec-barrier-for-bound-checks"))
2305 cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
2306 if (have_fw_feat(fw_features, "enabled",
2307 "needs-count-cache-flush-on-context-switch"))
2308 cp->behaviour |= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2309 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2310 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2311 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2312 KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2313
2314 of_node_put(fw_features);
2315 }
2316
2317 return 0;
2318}
2319#endif
2320
2321long kvm_arch_vm_ioctl(struct file *filp,
2322 unsigned int ioctl, unsigned long arg)
2323{
2324 struct kvm *kvm __maybe_unused = filp->private_data;
2325 void __user *argp = (void __user *)arg;
2326 long r;
2327
2328 switch (ioctl) {
2329 case KVM_PPC_GET_PVINFO: {
2330 struct kvm_ppc_pvinfo pvinfo;
2331 memset(&pvinfo, 0, sizeof(pvinfo));
2332 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
2333 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
2334 r = -EFAULT;
2335 goto out;
2336 }
2337
2338 break;
2339 }
2340#ifdef CONFIG_SPAPR_TCE_IOMMU
2341 case KVM_CREATE_SPAPR_TCE_64: {
2342 struct kvm_create_spapr_tce_64 create_tce_64;
2343
2344 r = -EFAULT;
2345 if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
2346 goto out;
2347 if (create_tce_64.flags) {
2348 r = -EINVAL;
2349 goto out;
2350 }
2351 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2352 goto out;
2353 }
2354 case KVM_CREATE_SPAPR_TCE: {
2355 struct kvm_create_spapr_tce create_tce;
2356 struct kvm_create_spapr_tce_64 create_tce_64;
2357
2358 r = -EFAULT;
2359 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
2360 goto out;
2361
2362 create_tce_64.liobn = create_tce.liobn;
2363 create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
2364 create_tce_64.offset = 0;
2365 create_tce_64.size = create_tce.window_size >>
2366 IOMMU_PAGE_SHIFT_4K;
2367 create_tce_64.flags = 0;
2368 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2369 goto out;
2370 }
2371#endif
2372#ifdef CONFIG_PPC_BOOK3S_64
2373 case KVM_PPC_GET_SMMU_INFO: {
2374 struct kvm_ppc_smmu_info info;
2375 struct kvm *kvm = filp->private_data;
2376
2377 memset(&info, 0, sizeof(info));
2378 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
2379 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2380 r = -EFAULT;
2381 break;
2382 }
2383 case KVM_PPC_RTAS_DEFINE_TOKEN: {
2384 struct kvm *kvm = filp->private_data;
2385
2386 r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
2387 break;
2388 }
2389 case KVM_PPC_CONFIGURE_V3_MMU: {
2390 struct kvm *kvm = filp->private_data;
2391 struct kvm_ppc_mmuv3_cfg cfg;
2392
2393 r = -EINVAL;
2394 if (!kvm->arch.kvm_ops->configure_mmu)
2395 goto out;
2396 r = -EFAULT;
2397 if (copy_from_user(&cfg, argp, sizeof(cfg)))
2398 goto out;
2399 r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg);
2400 break;
2401 }
2402 case KVM_PPC_GET_RMMU_INFO: {
2403 struct kvm *kvm = filp->private_data;
2404 struct kvm_ppc_rmmu_info info;
2405
2406 r = -EINVAL;
2407 if (!kvm->arch.kvm_ops->get_rmmu_info)
2408 goto out;
2409 r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info);
2410 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2411 r = -EFAULT;
2412 break;
2413 }
2414 case KVM_PPC_GET_CPU_CHAR: {
2415 struct kvm_ppc_cpu_char cpuchar;
2416
2417 r = kvmppc_get_cpu_char(&cpuchar);
2418 if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar)))
2419 r = -EFAULT;
2420 break;
2421 }
2422 case KVM_PPC_SVM_OFF: {
2423 struct kvm *kvm = filp->private_data;
2424
2425 r = 0;
2426 if (!kvm->arch.kvm_ops->svm_off)
2427 goto out;
2428
2429 r = kvm->arch.kvm_ops->svm_off(kvm);
2430 break;
2431 }
2432 default: {
2433 struct kvm *kvm = filp->private_data;
2434 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
2435 }
2436#else
2437 default:
2438 r = -ENOTTY;
2439#endif
2440 }
2441out:
2442 return r;
2443}
2444
2445static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
2446static unsigned long nr_lpids;
2447
2448long kvmppc_alloc_lpid(void)
2449{
2450 long lpid;
2451
2452 do {
2453 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
2454 if (lpid >= nr_lpids) {
2455 pr_err("%s: No LPIDs free\n", __func__);
2456 return -ENOMEM;
2457 }
2458 } while (test_and_set_bit(lpid, lpid_inuse));
2459
2460 return lpid;
2461}
2462EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
2463
2464void kvmppc_claim_lpid(long lpid)
2465{
2466 set_bit(lpid, lpid_inuse);
2467}
2468EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
2469
2470void kvmppc_free_lpid(long lpid)
2471{
2472 clear_bit(lpid, lpid_inuse);
2473}
2474EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
2475
2476void kvmppc_init_lpid(unsigned long nr_lpids_param)
2477{
2478 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
2479 memset(lpid_inuse, 0, sizeof(lpid_inuse));
2480}
2481EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
2482
2483int kvm_arch_init(void *opaque)
2484{
2485 return 0;
2486}
2487
2488EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);
2489