1
2
3
4
5
6
7
8
9
10#include <linux/errno.h>
11#include <linux/err.h>
12#include <linux/kvm_host.h>
13#include <linux/vmalloc.h>
14#include <linux/hrtimer.h>
15#include <linux/sched/signal.h>
16#include <linux/fs.h>
17#include <linux/slab.h>
18#include <linux/file.h>
19#include <linux/module.h>
20#include <linux/irqbypass.h>
21#include <linux/kvm_irqfd.h>
22#include <asm/cputable.h>
23#include <linux/uaccess.h>
24#include <asm/kvm_ppc.h>
25#include <asm/cputhreads.h>
26#include <asm/irqflags.h>
27#include <asm/iommu.h>
28#include <asm/switch_to.h>
29#include <asm/xive.h>
30#ifdef CONFIG_PPC_PSERIES
31#include <asm/hvcall.h>
32#include <asm/plpar_wrappers.h>
33#endif
34
35#include "timing.h"
36#include "irq.h"
37#include "../mm/mmu_decl.h"
38
39#define CREATE_TRACE_POINTS
40#include "trace.h"
41
42struct kvmppc_ops *kvmppc_hv_ops;
43EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
44struct kvmppc_ops *kvmppc_pr_ops;
45EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
46
47
48int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
49{
50 return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
51}
52
53bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
54{
55 return kvm_arch_vcpu_runnable(vcpu);
56}
57
58bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
59{
60 return false;
61}
62
63int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
64{
65 return 1;
66}
67
68
69
70
71
72
73
74
75
76
77int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
78{
79 int r;
80
81 WARN_ON(irqs_disabled());
82 hard_irq_disable();
83
84 while (true) {
85 if (need_resched()) {
86 local_irq_enable();
87 cond_resched();
88 hard_irq_disable();
89 continue;
90 }
91
92 if (signal_pending(current)) {
93 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
94 vcpu->run->exit_reason = KVM_EXIT_INTR;
95 r = -EINTR;
96 break;
97 }
98
99 vcpu->mode = IN_GUEST_MODE;
100
101
102
103
104
105
106
107
108
109
110 smp_mb();
111
112 if (kvm_request_pending(vcpu)) {
113
114 local_irq_enable();
115 trace_kvm_check_requests(vcpu);
116 r = kvmppc_core_check_requests(vcpu);
117 hard_irq_disable();
118 if (r > 0)
119 continue;
120 break;
121 }
122
123 if (kvmppc_core_prepare_to_enter(vcpu)) {
124
125
126 continue;
127 }
128
129 guest_enter_irqoff();
130 return 1;
131 }
132
133
134 local_irq_enable();
135 return r;
136}
137EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
138
139#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
140static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
141{
142 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
143 int i;
144
145 shared->sprg0 = swab64(shared->sprg0);
146 shared->sprg1 = swab64(shared->sprg1);
147 shared->sprg2 = swab64(shared->sprg2);
148 shared->sprg3 = swab64(shared->sprg3);
149 shared->srr0 = swab64(shared->srr0);
150 shared->srr1 = swab64(shared->srr1);
151 shared->dar = swab64(shared->dar);
152 shared->msr = swab64(shared->msr);
153 shared->dsisr = swab32(shared->dsisr);
154 shared->int_pending = swab32(shared->int_pending);
155 for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
156 shared->sr[i] = swab32(shared->sr[i]);
157}
158#endif
159
160int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
161{
162 int nr = kvmppc_get_gpr(vcpu, 11);
163 int r;
164 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
165 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
166 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
167 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
168 unsigned long r2 = 0;
169
170 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
171
172 param1 &= 0xffffffff;
173 param2 &= 0xffffffff;
174 param3 &= 0xffffffff;
175 param4 &= 0xffffffff;
176 }
177
178 switch (nr) {
179 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
180 {
181#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
182
183 int shared_big_endian = true;
184 if (vcpu->arch.intr_msr & MSR_LE)
185 shared_big_endian = false;
186 if (shared_big_endian != vcpu->arch.shared_big_endian)
187 kvmppc_swab_shared(vcpu);
188 vcpu->arch.shared_big_endian = shared_big_endian;
189#endif
190
191 if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
192
193
194
195
196
197 vcpu->arch.disable_kernel_nx = true;
198 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
199 }
200
201 vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
202 vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
203
204#ifdef CONFIG_PPC_64K_PAGES
205
206
207
208
209 if ((vcpu->arch.magic_page_pa & 0xf000) !=
210 ((ulong)vcpu->arch.shared & 0xf000)) {
211 void *old_shared = vcpu->arch.shared;
212 ulong shared = (ulong)vcpu->arch.shared;
213 void *new_shared;
214
215 shared &= PAGE_MASK;
216 shared |= vcpu->arch.magic_page_pa & 0xf000;
217 new_shared = (void*)shared;
218 memcpy(new_shared, old_shared, 0x1000);
219 vcpu->arch.shared = new_shared;
220 }
221#endif
222
223 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
224
225 r = EV_SUCCESS;
226 break;
227 }
228 case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
229 r = EV_SUCCESS;
230#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
231 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
232#endif
233
234
235 break;
236 case EV_HCALL_TOKEN(EV_IDLE):
237 r = EV_SUCCESS;
238 kvm_vcpu_block(vcpu);
239 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
240 break;
241 default:
242 r = EV_UNIMPLEMENTED;
243 break;
244 }
245
246 kvmppc_set_gpr(vcpu, 4, r2);
247
248 return r;
249}
250EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
251
252int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
253{
254 int r = false;
255
256
257 if (!vcpu->arch.pvr)
258 goto out;
259
260
261 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
262 goto out;
263
264
265 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
266 goto out;
267
268#ifdef CONFIG_KVM_BOOKE_HV
269 if (!cpu_has_feature(CPU_FTR_EMB_HV))
270 goto out;
271#endif
272
273 r = true;
274
275out:
276 vcpu->arch.sane = r;
277 return r ? 0 : -EINVAL;
278}
279EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
280
281int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
282{
283 enum emulation_result er;
284 int r;
285
286 er = kvmppc_emulate_loadstore(vcpu);
287 switch (er) {
288 case EMULATE_DONE:
289
290
291 r = RESUME_GUEST_NV;
292 break;
293 case EMULATE_AGAIN:
294 r = RESUME_GUEST;
295 break;
296 case EMULATE_DO_MMIO:
297 run->exit_reason = KVM_EXIT_MMIO;
298
299
300
301
302 r = RESUME_HOST_NV;
303 break;
304 case EMULATE_FAIL:
305 {
306 u32 last_inst;
307
308 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
309
310 pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
311 r = RESUME_HOST;
312 break;
313 }
314 default:
315 WARN_ON(1);
316 r = RESUME_GUEST;
317 }
318
319 return r;
320}
321EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
322
323int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
324 bool data)
325{
326 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
327 struct kvmppc_pte pte;
328 int r = -EINVAL;
329
330 vcpu->stat.st++;
331
332 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr)
333 r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr,
334 size);
335
336 if ((!r) || (r == -EAGAIN))
337 return r;
338
339 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
340 XLATE_WRITE, &pte);
341 if (r < 0)
342 return r;
343
344 *eaddr = pte.raddr;
345
346 if (!pte.may_write)
347 return -EPERM;
348
349
350 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
351 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
352 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
353 void *magic = vcpu->arch.shared;
354 magic += pte.eaddr & 0xfff;
355 memcpy(magic, ptr, size);
356 return EMULATE_DONE;
357 }
358
359 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
360 return EMULATE_DO_MMIO;
361
362 return EMULATE_DONE;
363}
364EXPORT_SYMBOL_GPL(kvmppc_st);
365
366int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
367 bool data)
368{
369 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
370 struct kvmppc_pte pte;
371 int rc = -EINVAL;
372
373 vcpu->stat.ld++;
374
375 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr)
376 rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr,
377 size);
378
379 if ((!rc) || (rc == -EAGAIN))
380 return rc;
381
382 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
383 XLATE_READ, &pte);
384 if (rc)
385 return rc;
386
387 *eaddr = pte.raddr;
388
389 if (!pte.may_read)
390 return -EPERM;
391
392 if (!data && !pte.may_execute)
393 return -ENOEXEC;
394
395
396 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
397 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
398 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
399 void *magic = vcpu->arch.shared;
400 magic += pte.eaddr & 0xfff;
401 memcpy(ptr, magic, size);
402 return EMULATE_DONE;
403 }
404
405 if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size))
406 return EMULATE_DO_MMIO;
407
408 return EMULATE_DONE;
409}
410EXPORT_SYMBOL_GPL(kvmppc_ld);
411
412int kvm_arch_hardware_enable(void)
413{
414 return 0;
415}
416
417int kvm_arch_hardware_setup(void)
418{
419 return 0;
420}
421
422int kvm_arch_check_processor_compat(void)
423{
424 return kvmppc_core_check_processor_compat();
425}
426
427int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
428{
429 struct kvmppc_ops *kvm_ops = NULL;
430
431
432
433 if (type == 0) {
434 if (kvmppc_hv_ops)
435 kvm_ops = kvmppc_hv_ops;
436 else
437 kvm_ops = kvmppc_pr_ops;
438 if (!kvm_ops)
439 goto err_out;
440 } else if (type == KVM_VM_PPC_HV) {
441 if (!kvmppc_hv_ops)
442 goto err_out;
443 kvm_ops = kvmppc_hv_ops;
444 } else if (type == KVM_VM_PPC_PR) {
445 if (!kvmppc_pr_ops)
446 goto err_out;
447 kvm_ops = kvmppc_pr_ops;
448 } else
449 goto err_out;
450
451 if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
452 return -ENOENT;
453
454 kvm->arch.kvm_ops = kvm_ops;
455 return kvmppc_core_init_vm(kvm);
456err_out:
457 return -EINVAL;
458}
459
460void kvm_arch_destroy_vm(struct kvm *kvm)
461{
462 unsigned int i;
463 struct kvm_vcpu *vcpu;
464
465#ifdef CONFIG_KVM_XICS
466
467
468
469
470
471 if (is_kvmppc_hv_enabled(kvm))
472 kick_all_cpus_sync();
473#endif
474
475 kvm_for_each_vcpu(i, vcpu, kvm)
476 kvm_arch_vcpu_free(vcpu);
477
478 mutex_lock(&kvm->lock);
479 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
480 kvm->vcpus[i] = NULL;
481
482 atomic_set(&kvm->online_vcpus, 0);
483
484 kvmppc_core_destroy_vm(kvm);
485
486 mutex_unlock(&kvm->lock);
487
488
489 module_put(kvm->arch.kvm_ops->owner);
490}
491
492int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
493{
494 int r;
495
496 int hv_enabled = kvmppc_hv_ops ? 1 : 0;
497
498 if (kvm) {
499
500
501
502
503 hv_enabled = is_kvmppc_hv_enabled(kvm);
504 }
505
506 switch (ext) {
507#ifdef CONFIG_BOOKE
508 case KVM_CAP_PPC_BOOKE_SREGS:
509 case KVM_CAP_PPC_BOOKE_WATCHDOG:
510 case KVM_CAP_PPC_EPR:
511#else
512 case KVM_CAP_PPC_SEGSTATE:
513 case KVM_CAP_PPC_HIOR:
514 case KVM_CAP_PPC_PAPR:
515#endif
516 case KVM_CAP_PPC_UNSET_IRQ:
517 case KVM_CAP_PPC_IRQ_LEVEL:
518 case KVM_CAP_ENABLE_CAP:
519 case KVM_CAP_ONE_REG:
520 case KVM_CAP_IOEVENTFD:
521 case KVM_CAP_DEVICE_CTRL:
522 case KVM_CAP_IMMEDIATE_EXIT:
523 r = 1;
524 break;
525 case KVM_CAP_PPC_PAIRED_SINGLES:
526 case KVM_CAP_PPC_OSI:
527 case KVM_CAP_PPC_GET_PVINFO:
528#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
529 case KVM_CAP_SW_TLB:
530#endif
531
532 r = !hv_enabled;
533 break;
534#ifdef CONFIG_KVM_MPIC
535 case KVM_CAP_IRQ_MPIC:
536 r = 1;
537 break;
538#endif
539
540#ifdef CONFIG_PPC_BOOK3S_64
541 case KVM_CAP_SPAPR_TCE:
542 case KVM_CAP_SPAPR_TCE_64:
543 r = 1;
544 break;
545 case KVM_CAP_SPAPR_TCE_VFIO:
546 r = !!cpu_has_feature(CPU_FTR_HVMODE);
547 break;
548 case KVM_CAP_PPC_RTAS:
549 case KVM_CAP_PPC_FIXUP_HCALL:
550 case KVM_CAP_PPC_ENABLE_HCALL:
551#ifdef CONFIG_KVM_XICS
552 case KVM_CAP_IRQ_XICS:
553#endif
554 case KVM_CAP_PPC_GET_CPU_CHAR:
555 r = 1;
556 break;
557#ifdef CONFIG_KVM_XIVE
558 case KVM_CAP_PPC_IRQ_XIVE:
559
560
561
562
563
564 r = xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE);
565 break;
566#endif
567
568 case KVM_CAP_PPC_ALLOC_HTAB:
569 r = hv_enabled;
570 break;
571#endif
572#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
573 case KVM_CAP_PPC_SMT:
574 r = 0;
575 if (kvm) {
576 if (kvm->arch.emul_smt_mode > 1)
577 r = kvm->arch.emul_smt_mode;
578 else
579 r = kvm->arch.smt_mode;
580 } else if (hv_enabled) {
581 if (cpu_has_feature(CPU_FTR_ARCH_300))
582 r = 1;
583 else
584 r = threads_per_subcore;
585 }
586 break;
587 case KVM_CAP_PPC_SMT_POSSIBLE:
588 r = 1;
589 if (hv_enabled) {
590 if (!cpu_has_feature(CPU_FTR_ARCH_300))
591 r = ((threads_per_subcore << 1) - 1);
592 else
593
594 r = 8 | 4 | 2 | 1;
595 }
596 break;
597 case KVM_CAP_PPC_RMA:
598 r = 0;
599 break;
600 case KVM_CAP_PPC_HWRNG:
601 r = kvmppc_hwrng_present();
602 break;
603 case KVM_CAP_PPC_MMU_RADIX:
604 r = !!(hv_enabled && radix_enabled());
605 break;
606 case KVM_CAP_PPC_MMU_HASH_V3:
607 r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300) &&
608 cpu_has_feature(CPU_FTR_HVMODE));
609 break;
610 case KVM_CAP_PPC_NESTED_HV:
611 r = !!(hv_enabled && kvmppc_hv_ops->enable_nested &&
612 !kvmppc_hv_ops->enable_nested(NULL));
613 break;
614#endif
615 case KVM_CAP_SYNC_MMU:
616#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
617 r = hv_enabled;
618#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
619 r = 1;
620#else
621 r = 0;
622#endif
623 break;
624#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
625 case KVM_CAP_PPC_HTAB_FD:
626 r = hv_enabled;
627 break;
628#endif
629 case KVM_CAP_NR_VCPUS:
630
631
632
633
634
635
636 if (hv_enabled)
637 r = num_present_cpus();
638 else
639 r = num_online_cpus();
640 break;
641 case KVM_CAP_MAX_VCPUS:
642 r = KVM_MAX_VCPUS;
643 break;
644 case KVM_CAP_MAX_VCPU_ID:
645 r = KVM_MAX_VCPU_ID;
646 break;
647#ifdef CONFIG_PPC_BOOK3S_64
648 case KVM_CAP_PPC_GET_SMMU_INFO:
649 r = 1;
650 break;
651 case KVM_CAP_SPAPR_MULTITCE:
652 r = 1;
653 break;
654 case KVM_CAP_SPAPR_RESIZE_HPT:
655 r = !!hv_enabled;
656 break;
657#endif
658#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
659 case KVM_CAP_PPC_FWNMI:
660 r = hv_enabled;
661 break;
662#endif
663#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
664 case KVM_CAP_PPC_HTM:
665 r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) ||
666 (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST));
667 break;
668#endif
669 default:
670 r = 0;
671 break;
672 }
673 return r;
674
675}
676
677long kvm_arch_dev_ioctl(struct file *filp,
678 unsigned int ioctl, unsigned long arg)
679{
680 return -EINVAL;
681}
682
683void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
684 struct kvm_memory_slot *dont)
685{
686 kvmppc_core_free_memslot(kvm, free, dont);
687}
688
689int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
690 unsigned long npages)
691{
692 return kvmppc_core_create_memslot(kvm, slot, npages);
693}
694
695int kvm_arch_prepare_memory_region(struct kvm *kvm,
696 struct kvm_memory_slot *memslot,
697 const struct kvm_userspace_memory_region *mem,
698 enum kvm_mr_change change)
699{
700 return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
701}
702
703void kvm_arch_commit_memory_region(struct kvm *kvm,
704 const struct kvm_userspace_memory_region *mem,
705 const struct kvm_memory_slot *old,
706 const struct kvm_memory_slot *new,
707 enum kvm_mr_change change)
708{
709 kvmppc_core_commit_memory_region(kvm, mem, old, new, change);
710}
711
712void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
713 struct kvm_memory_slot *slot)
714{
715 kvmppc_core_flush_memslot(kvm, slot);
716}
717
718struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
719{
720 struct kvm_vcpu *vcpu;
721 vcpu = kvmppc_core_vcpu_create(kvm, id);
722 if (!IS_ERR(vcpu)) {
723 vcpu->arch.wqp = &vcpu->wq;
724 kvmppc_create_vcpu_debugfs(vcpu, id);
725 }
726 return vcpu;
727}
728
729void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
730{
731}
732
733void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
734{
735
736 hrtimer_cancel(&vcpu->arch.dec_timer);
737
738 kvmppc_remove_vcpu_debugfs(vcpu);
739
740 switch (vcpu->arch.irq_type) {
741 case KVMPPC_IRQ_MPIC:
742 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
743 break;
744 case KVMPPC_IRQ_XICS:
745 if (xics_on_xive())
746 kvmppc_xive_cleanup_vcpu(vcpu);
747 else
748 kvmppc_xics_free_icp(vcpu);
749 break;
750 case KVMPPC_IRQ_XIVE:
751 kvmppc_xive_native_cleanup_vcpu(vcpu);
752 break;
753 }
754
755 kvmppc_core_vcpu_free(vcpu);
756}
757
758void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
759{
760 kvm_arch_vcpu_free(vcpu);
761}
762
763int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
764{
765 return kvmppc_core_pending_dec(vcpu);
766}
767
768static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
769{
770 struct kvm_vcpu *vcpu;
771
772 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
773 kvmppc_decrementer_func(vcpu);
774
775 return HRTIMER_NORESTART;
776}
777
778int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
779{
780 int ret;
781
782 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
783 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
784 vcpu->arch.dec_expires = get_tb();
785
786#ifdef CONFIG_KVM_EXIT_TIMING
787 mutex_init(&vcpu->arch.exit_timing_lock);
788#endif
789 ret = kvmppc_subarch_vcpu_init(vcpu);
790 return ret;
791}
792
793void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
794{
795 kvmppc_mmu_destroy(vcpu);
796 kvmppc_subarch_vcpu_uninit(vcpu);
797}
798
799void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
800{
801#ifdef CONFIG_BOOKE
802
803
804
805
806
807
808
809 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
810#endif
811 kvmppc_core_vcpu_load(vcpu, cpu);
812}
813
814void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
815{
816 kvmppc_core_vcpu_put(vcpu);
817#ifdef CONFIG_BOOKE
818 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
819#endif
820}
821
822
823
824
825
826
827
828bool kvm_arch_has_irq_bypass(void)
829{
830 return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) ||
831 (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer));
832}
833
834int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
835 struct irq_bypass_producer *prod)
836{
837 struct kvm_kernel_irqfd *irqfd =
838 container_of(cons, struct kvm_kernel_irqfd, consumer);
839 struct kvm *kvm = irqfd->kvm;
840
841 if (kvm->arch.kvm_ops->irq_bypass_add_producer)
842 return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
843
844 return 0;
845}
846
847void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
848 struct irq_bypass_producer *prod)
849{
850 struct kvm_kernel_irqfd *irqfd =
851 container_of(cons, struct kvm_kernel_irqfd, consumer);
852 struct kvm *kvm = irqfd->kvm;
853
854 if (kvm->arch.kvm_ops->irq_bypass_del_producer)
855 kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
856}
857
858#ifdef CONFIG_VSX
859static inline int kvmppc_get_vsr_dword_offset(int index)
860{
861 int offset;
862
863 if ((index != 0) && (index != 1))
864 return -1;
865
866#ifdef __BIG_ENDIAN
867 offset = index;
868#else
869 offset = 1 - index;
870#endif
871
872 return offset;
873}
874
875static inline int kvmppc_get_vsr_word_offset(int index)
876{
877 int offset;
878
879 if ((index > 3) || (index < 0))
880 return -1;
881
882#ifdef __BIG_ENDIAN
883 offset = index;
884#else
885 offset = 3 - index;
886#endif
887 return offset;
888}
889
890static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
891 u64 gpr)
892{
893 union kvmppc_one_reg val;
894 int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
895 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
896
897 if (offset == -1)
898 return;
899
900 if (index >= 32) {
901 val.vval = VCPU_VSX_VR(vcpu, index - 32);
902 val.vsxval[offset] = gpr;
903 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
904 } else {
905 VCPU_VSX_FPR(vcpu, index, offset) = gpr;
906 }
907}
908
909static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
910 u64 gpr)
911{
912 union kvmppc_one_reg val;
913 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
914
915 if (index >= 32) {
916 val.vval = VCPU_VSX_VR(vcpu, index - 32);
917 val.vsxval[0] = gpr;
918 val.vsxval[1] = gpr;
919 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
920 } else {
921 VCPU_VSX_FPR(vcpu, index, 0) = gpr;
922 VCPU_VSX_FPR(vcpu, index, 1) = gpr;
923 }
924}
925
926static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
927 u32 gpr)
928{
929 union kvmppc_one_reg val;
930 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
931
932 if (index >= 32) {
933 val.vsx32val[0] = gpr;
934 val.vsx32val[1] = gpr;
935 val.vsx32val[2] = gpr;
936 val.vsx32val[3] = gpr;
937 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
938 } else {
939 val.vsx32val[0] = gpr;
940 val.vsx32val[1] = gpr;
941 VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0];
942 VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0];
943 }
944}
945
946static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
947 u32 gpr32)
948{
949 union kvmppc_one_reg val;
950 int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
951 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
952 int dword_offset, word_offset;
953
954 if (offset == -1)
955 return;
956
957 if (index >= 32) {
958 val.vval = VCPU_VSX_VR(vcpu, index - 32);
959 val.vsx32val[offset] = gpr32;
960 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
961 } else {
962 dword_offset = offset / 2;
963 word_offset = offset % 2;
964 val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
965 val.vsx32val[word_offset] = gpr32;
966 VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
967 }
968}
969#endif
970
971#ifdef CONFIG_ALTIVEC
972static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu,
973 int index, int element_size)
974{
975 int offset;
976 int elts = sizeof(vector128)/element_size;
977
978 if ((index < 0) || (index >= elts))
979 return -1;
980
981 if (kvmppc_need_byteswap(vcpu))
982 offset = elts - index - 1;
983 else
984 offset = index;
985
986 return offset;
987}
988
989static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu,
990 int index)
991{
992 return kvmppc_get_vmx_offset_generic(vcpu, index, 8);
993}
994
995static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu,
996 int index)
997{
998 return kvmppc_get_vmx_offset_generic(vcpu, index, 4);
999}
1000
1001static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu,
1002 int index)
1003{
1004 return kvmppc_get_vmx_offset_generic(vcpu, index, 2);
1005}
1006
1007static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu,
1008 int index)
1009{
1010 return kvmppc_get_vmx_offset_generic(vcpu, index, 1);
1011}
1012
1013
1014static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
1015 u64 gpr)
1016{
1017 union kvmppc_one_reg val;
1018 int offset = kvmppc_get_vmx_dword_offset(vcpu,
1019 vcpu->arch.mmio_vmx_offset);
1020 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1021
1022 if (offset == -1)
1023 return;
1024
1025 val.vval = VCPU_VSX_VR(vcpu, index);
1026 val.vsxval[offset] = gpr;
1027 VCPU_VSX_VR(vcpu, index) = val.vval;
1028}
1029
1030static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
1031 u32 gpr32)
1032{
1033 union kvmppc_one_reg val;
1034 int offset = kvmppc_get_vmx_word_offset(vcpu,
1035 vcpu->arch.mmio_vmx_offset);
1036 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1037
1038 if (offset == -1)
1039 return;
1040
1041 val.vval = VCPU_VSX_VR(vcpu, index);
1042 val.vsx32val[offset] = gpr32;
1043 VCPU_VSX_VR(vcpu, index) = val.vval;
1044}
1045
1046static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
1047 u16 gpr16)
1048{
1049 union kvmppc_one_reg val;
1050 int offset = kvmppc_get_vmx_hword_offset(vcpu,
1051 vcpu->arch.mmio_vmx_offset);
1052 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1053
1054 if (offset == -1)
1055 return;
1056
1057 val.vval = VCPU_VSX_VR(vcpu, index);
1058 val.vsx16val[offset] = gpr16;
1059 VCPU_VSX_VR(vcpu, index) = val.vval;
1060}
1061
1062static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
1063 u8 gpr8)
1064{
1065 union kvmppc_one_reg val;
1066 int offset = kvmppc_get_vmx_byte_offset(vcpu,
1067 vcpu->arch.mmio_vmx_offset);
1068 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1069
1070 if (offset == -1)
1071 return;
1072
1073 val.vval = VCPU_VSX_VR(vcpu, index);
1074 val.vsx8val[offset] = gpr8;
1075 VCPU_VSX_VR(vcpu, index) = val.vval;
1076}
1077#endif
1078
1079#ifdef CONFIG_PPC_FPU
1080static inline u64 sp_to_dp(u32 fprs)
1081{
1082 u64 fprd;
1083
1084 preempt_disable();
1085 enable_kernel_fp();
1086 asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m" (fprd) : "m" (fprs)
1087 : "fr0");
1088 preempt_enable();
1089 return fprd;
1090}
1091
1092static inline u32 dp_to_sp(u64 fprd)
1093{
1094 u32 fprs;
1095
1096 preempt_disable();
1097 enable_kernel_fp();
1098 asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m" (fprs) : "m" (fprd)
1099 : "fr0");
1100 preempt_enable();
1101 return fprs;
1102}
1103
1104#else
1105#define sp_to_dp(x) (x)
1106#define dp_to_sp(x) (x)
1107#endif
1108
1109static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
1110 struct kvm_run *run)
1111{
1112 u64 uninitialized_var(gpr);
1113
1114 if (run->mmio.len > sizeof(gpr)) {
1115 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
1116 return;
1117 }
1118
1119 if (!vcpu->arch.mmio_host_swabbed) {
1120 switch (run->mmio.len) {
1121 case 8: gpr = *(u64 *)run->mmio.data; break;
1122 case 4: gpr = *(u32 *)run->mmio.data; break;
1123 case 2: gpr = *(u16 *)run->mmio.data; break;
1124 case 1: gpr = *(u8 *)run->mmio.data; break;
1125 }
1126 } else {
1127 switch (run->mmio.len) {
1128 case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
1129 case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
1130 case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
1131 case 1: gpr = *(u8 *)run->mmio.data; break;
1132 }
1133 }
1134
1135
1136 if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
1137 gpr = sp_to_dp(gpr);
1138
1139 if (vcpu->arch.mmio_sign_extend) {
1140 switch (run->mmio.len) {
1141#ifdef CONFIG_PPC64
1142 case 4:
1143 gpr = (s64)(s32)gpr;
1144 break;
1145#endif
1146 case 2:
1147 gpr = (s64)(s16)gpr;
1148 break;
1149 case 1:
1150 gpr = (s64)(s8)gpr;
1151 break;
1152 }
1153 }
1154
1155 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
1156 case KVM_MMIO_REG_GPR:
1157 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
1158 break;
1159 case KVM_MMIO_REG_FPR:
1160 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1161 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
1162
1163 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1164 break;
1165#ifdef CONFIG_PPC_BOOK3S
1166 case KVM_MMIO_REG_QPR:
1167 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1168 break;
1169 case KVM_MMIO_REG_FQPR:
1170 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1171 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1172 break;
1173#endif
1174#ifdef CONFIG_VSX
1175 case KVM_MMIO_REG_VSX:
1176 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1177 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX);
1178
1179 if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD)
1180 kvmppc_set_vsr_dword(vcpu, gpr);
1181 else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD)
1182 kvmppc_set_vsr_word(vcpu, gpr);
1183 else if (vcpu->arch.mmio_copy_type ==
1184 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
1185 kvmppc_set_vsr_dword_dump(vcpu, gpr);
1186 else if (vcpu->arch.mmio_copy_type ==
1187 KVMPPC_VSX_COPY_WORD_LOAD_DUMP)
1188 kvmppc_set_vsr_word_dump(vcpu, gpr);
1189 break;
1190#endif
1191#ifdef CONFIG_ALTIVEC
1192 case KVM_MMIO_REG_VMX:
1193 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1194 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC);
1195
1196 if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD)
1197 kvmppc_set_vmx_dword(vcpu, gpr);
1198 else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD)
1199 kvmppc_set_vmx_word(vcpu, gpr);
1200 else if (vcpu->arch.mmio_copy_type ==
1201 KVMPPC_VMX_COPY_HWORD)
1202 kvmppc_set_vmx_hword(vcpu, gpr);
1203 else if (vcpu->arch.mmio_copy_type ==
1204 KVMPPC_VMX_COPY_BYTE)
1205 kvmppc_set_vmx_byte(vcpu, gpr);
1206 break;
1207#endif
1208#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1209 case KVM_MMIO_REG_NESTED_GPR:
1210 if (kvmppc_need_byteswap(vcpu))
1211 gpr = swab64(gpr);
1212 kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr,
1213 sizeof(gpr));
1214 break;
1215#endif
1216 default:
1217 BUG();
1218 }
1219}
1220
1221static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1222 unsigned int rt, unsigned int bytes,
1223 int is_default_endian, int sign_extend)
1224{
1225 int idx, ret;
1226 bool host_swabbed;
1227
1228
1229 if (kvmppc_need_byteswap(vcpu)) {
1230 host_swabbed = is_default_endian;
1231 } else {
1232 host_swabbed = !is_default_endian;
1233 }
1234
1235 if (bytes > sizeof(run->mmio.data)) {
1236 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
1237 run->mmio.len);
1238 }
1239
1240 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1241 run->mmio.len = bytes;
1242 run->mmio.is_write = 0;
1243
1244 vcpu->arch.io_gpr = rt;
1245 vcpu->arch.mmio_host_swabbed = host_swabbed;
1246 vcpu->mmio_needed = 1;
1247 vcpu->mmio_is_write = 0;
1248 vcpu->arch.mmio_sign_extend = sign_extend;
1249
1250 idx = srcu_read_lock(&vcpu->kvm->srcu);
1251
1252 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1253 bytes, &run->mmio.data);
1254
1255 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1256
1257 if (!ret) {
1258 kvmppc_complete_mmio_load(vcpu, run);
1259 vcpu->mmio_needed = 0;
1260 return EMULATE_DONE;
1261 }
1262
1263 return EMULATE_DO_MMIO;
1264}
1265
1266int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1267 unsigned int rt, unsigned int bytes,
1268 int is_default_endian)
1269{
1270 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0);
1271}
1272EXPORT_SYMBOL_GPL(kvmppc_handle_load);
1273
1274
1275int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
1276 unsigned int rt, unsigned int bytes,
1277 int is_default_endian)
1278{
1279 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1);
1280}
1281
1282#ifdef CONFIG_VSX
1283int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1284 unsigned int rt, unsigned int bytes,
1285 int is_default_endian, int mmio_sign_extend)
1286{
1287 enum emulation_result emulated = EMULATE_DONE;
1288
1289
1290 if (vcpu->arch.mmio_vsx_copy_nums > 4)
1291 return EMULATE_FAIL;
1292
1293 while (vcpu->arch.mmio_vsx_copy_nums) {
1294 emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
1295 is_default_endian, mmio_sign_extend);
1296
1297 if (emulated != EMULATE_DONE)
1298 break;
1299
1300 vcpu->arch.paddr_accessed += run->mmio.len;
1301
1302 vcpu->arch.mmio_vsx_copy_nums--;
1303 vcpu->arch.mmio_vsx_offset++;
1304 }
1305 return emulated;
1306}
1307#endif
1308
1309int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
1310 u64 val, unsigned int bytes, int is_default_endian)
1311{
1312 void *data = run->mmio.data;
1313 int idx, ret;
1314 bool host_swabbed;
1315
1316
1317 if (kvmppc_need_byteswap(vcpu)) {
1318 host_swabbed = is_default_endian;
1319 } else {
1320 host_swabbed = !is_default_endian;
1321 }
1322
1323 if (bytes > sizeof(run->mmio.data)) {
1324 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
1325 run->mmio.len);
1326 }
1327
1328 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1329 run->mmio.len = bytes;
1330 run->mmio.is_write = 1;
1331 vcpu->mmio_needed = 1;
1332 vcpu->mmio_is_write = 1;
1333
1334 if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
1335 val = dp_to_sp(val);
1336
1337
1338 if (!host_swabbed) {
1339 switch (bytes) {
1340 case 8: *(u64 *)data = val; break;
1341 case 4: *(u32 *)data = val; break;
1342 case 2: *(u16 *)data = val; break;
1343 case 1: *(u8 *)data = val; break;
1344 }
1345 } else {
1346 switch (bytes) {
1347 case 8: *(u64 *)data = swab64(val); break;
1348 case 4: *(u32 *)data = swab32(val); break;
1349 case 2: *(u16 *)data = swab16(val); break;
1350 case 1: *(u8 *)data = val; break;
1351 }
1352 }
1353
1354 idx = srcu_read_lock(&vcpu->kvm->srcu);
1355
1356 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1357 bytes, &run->mmio.data);
1358
1359 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1360
1361 if (!ret) {
1362 vcpu->mmio_needed = 0;
1363 return EMULATE_DONE;
1364 }
1365
1366 return EMULATE_DO_MMIO;
1367}
1368EXPORT_SYMBOL_GPL(kvmppc_handle_store);
1369
1370#ifdef CONFIG_VSX
1371static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
1372{
1373 u32 dword_offset, word_offset;
1374 union kvmppc_one_reg reg;
1375 int vsx_offset = 0;
1376 int copy_type = vcpu->arch.mmio_copy_type;
1377 int result = 0;
1378
1379 switch (copy_type) {
1380 case KVMPPC_VSX_COPY_DWORD:
1381 vsx_offset =
1382 kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
1383
1384 if (vsx_offset == -1) {
1385 result = -1;
1386 break;
1387 }
1388
1389 if (rs < 32) {
1390 *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
1391 } else {
1392 reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
1393 *val = reg.vsxval[vsx_offset];
1394 }
1395 break;
1396
1397 case KVMPPC_VSX_COPY_WORD:
1398 vsx_offset =
1399 kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
1400
1401 if (vsx_offset == -1) {
1402 result = -1;
1403 break;
1404 }
1405
1406 if (rs < 32) {
1407 dword_offset = vsx_offset / 2;
1408 word_offset = vsx_offset % 2;
1409 reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
1410 *val = reg.vsx32val[word_offset];
1411 } else {
1412 reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
1413 *val = reg.vsx32val[vsx_offset];
1414 }
1415 break;
1416
1417 default:
1418 result = -1;
1419 break;
1420 }
1421
1422 return result;
1423}
1424
1425int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
1426 int rs, unsigned int bytes, int is_default_endian)
1427{
1428 u64 val;
1429 enum emulation_result emulated = EMULATE_DONE;
1430
1431 vcpu->arch.io_gpr = rs;
1432
1433
1434 if (vcpu->arch.mmio_vsx_copy_nums > 4)
1435 return EMULATE_FAIL;
1436
1437 while (vcpu->arch.mmio_vsx_copy_nums) {
1438 if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
1439 return EMULATE_FAIL;
1440
1441 emulated = kvmppc_handle_store(run, vcpu,
1442 val, bytes, is_default_endian);
1443
1444 if (emulated != EMULATE_DONE)
1445 break;
1446
1447 vcpu->arch.paddr_accessed += run->mmio.len;
1448
1449 vcpu->arch.mmio_vsx_copy_nums--;
1450 vcpu->arch.mmio_vsx_offset++;
1451 }
1452
1453 return emulated;
1454}
1455
1456static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
1457 struct kvm_run *run)
1458{
1459 enum emulation_result emulated = EMULATE_FAIL;
1460 int r;
1461
1462 vcpu->arch.paddr_accessed += run->mmio.len;
1463
1464 if (!vcpu->mmio_is_write) {
1465 emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr,
1466 run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
1467 } else {
1468 emulated = kvmppc_handle_vsx_store(run, vcpu,
1469 vcpu->arch.io_gpr, run->mmio.len, 1);
1470 }
1471
1472 switch (emulated) {
1473 case EMULATE_DO_MMIO:
1474 run->exit_reason = KVM_EXIT_MMIO;
1475 r = RESUME_HOST;
1476 break;
1477 case EMULATE_FAIL:
1478 pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
1479 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1480 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1481 r = RESUME_HOST;
1482 break;
1483 default:
1484 r = RESUME_GUEST;
1485 break;
1486 }
1487 return r;
1488}
1489#endif
1490
1491#ifdef CONFIG_ALTIVEC
1492int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1493 unsigned int rt, unsigned int bytes, int is_default_endian)
1494{
1495 enum emulation_result emulated = EMULATE_DONE;
1496
1497 if (vcpu->arch.mmio_vsx_copy_nums > 2)
1498 return EMULATE_FAIL;
1499
1500 while (vcpu->arch.mmio_vmx_copy_nums) {
1501 emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
1502 is_default_endian, 0);
1503
1504 if (emulated != EMULATE_DONE)
1505 break;
1506
1507 vcpu->arch.paddr_accessed += run->mmio.len;
1508 vcpu->arch.mmio_vmx_copy_nums--;
1509 vcpu->arch.mmio_vmx_offset++;
1510 }
1511
1512 return emulated;
1513}
1514
1515int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
1516{
1517 union kvmppc_one_reg reg;
1518 int vmx_offset = 0;
1519 int result = 0;
1520
1521 vmx_offset =
1522 kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1523
1524 if (vmx_offset == -1)
1525 return -1;
1526
1527 reg.vval = VCPU_VSX_VR(vcpu, index);
1528 *val = reg.vsxval[vmx_offset];
1529
1530 return result;
1531}
1532
1533int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
1534{
1535 union kvmppc_one_reg reg;
1536 int vmx_offset = 0;
1537 int result = 0;
1538
1539 vmx_offset =
1540 kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1541
1542 if (vmx_offset == -1)
1543 return -1;
1544
1545 reg.vval = VCPU_VSX_VR(vcpu, index);
1546 *val = reg.vsx32val[vmx_offset];
1547
1548 return result;
1549}
1550
1551int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
1552{
1553 union kvmppc_one_reg reg;
1554 int vmx_offset = 0;
1555 int result = 0;
1556
1557 vmx_offset =
1558 kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1559
1560 if (vmx_offset == -1)
1561 return -1;
1562
1563 reg.vval = VCPU_VSX_VR(vcpu, index);
1564 *val = reg.vsx16val[vmx_offset];
1565
1566 return result;
1567}
1568
1569int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
1570{
1571 union kvmppc_one_reg reg;
1572 int vmx_offset = 0;
1573 int result = 0;
1574
1575 vmx_offset =
1576 kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1577
1578 if (vmx_offset == -1)
1579 return -1;
1580
1581 reg.vval = VCPU_VSX_VR(vcpu, index);
1582 *val = reg.vsx8val[vmx_offset];
1583
1584 return result;
1585}
1586
1587int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
1588 unsigned int rs, unsigned int bytes, int is_default_endian)
1589{
1590 u64 val = 0;
1591 unsigned int index = rs & KVM_MMIO_REG_MASK;
1592 enum emulation_result emulated = EMULATE_DONE;
1593
1594 if (vcpu->arch.mmio_vsx_copy_nums > 2)
1595 return EMULATE_FAIL;
1596
1597 vcpu->arch.io_gpr = rs;
1598
1599 while (vcpu->arch.mmio_vmx_copy_nums) {
1600 switch (vcpu->arch.mmio_copy_type) {
1601 case KVMPPC_VMX_COPY_DWORD:
1602 if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1)
1603 return EMULATE_FAIL;
1604
1605 break;
1606 case KVMPPC_VMX_COPY_WORD:
1607 if (kvmppc_get_vmx_word(vcpu, index, &val) == -1)
1608 return EMULATE_FAIL;
1609 break;
1610 case KVMPPC_VMX_COPY_HWORD:
1611 if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1)
1612 return EMULATE_FAIL;
1613 break;
1614 case KVMPPC_VMX_COPY_BYTE:
1615 if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1)
1616 return EMULATE_FAIL;
1617 break;
1618 default:
1619 return EMULATE_FAIL;
1620 }
1621
1622 emulated = kvmppc_handle_store(run, vcpu, val, bytes,
1623 is_default_endian);
1624 if (emulated != EMULATE_DONE)
1625 break;
1626
1627 vcpu->arch.paddr_accessed += run->mmio.len;
1628 vcpu->arch.mmio_vmx_copy_nums--;
1629 vcpu->arch.mmio_vmx_offset++;
1630 }
1631
1632 return emulated;
1633}
1634
1635static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu,
1636 struct kvm_run *run)
1637{
1638 enum emulation_result emulated = EMULATE_FAIL;
1639 int r;
1640
1641 vcpu->arch.paddr_accessed += run->mmio.len;
1642
1643 if (!vcpu->mmio_is_write) {
1644 emulated = kvmppc_handle_vmx_load(run, vcpu,
1645 vcpu->arch.io_gpr, run->mmio.len, 1);
1646 } else {
1647 emulated = kvmppc_handle_vmx_store(run, vcpu,
1648 vcpu->arch.io_gpr, run->mmio.len, 1);
1649 }
1650
1651 switch (emulated) {
1652 case EMULATE_DO_MMIO:
1653 run->exit_reason = KVM_EXIT_MMIO;
1654 r = RESUME_HOST;
1655 break;
1656 case EMULATE_FAIL:
1657 pr_info("KVM: MMIO emulation failed (VMX repeat)\n");
1658 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1659 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1660 r = RESUME_HOST;
1661 break;
1662 default:
1663 r = RESUME_GUEST;
1664 break;
1665 }
1666 return r;
1667}
1668#endif
1669
1670int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1671{
1672 int r = 0;
1673 union kvmppc_one_reg val;
1674 int size;
1675
1676 size = one_reg_size(reg->id);
1677 if (size > sizeof(val))
1678 return -EINVAL;
1679
1680 r = kvmppc_get_one_reg(vcpu, reg->id, &val);
1681 if (r == -EINVAL) {
1682 r = 0;
1683 switch (reg->id) {
1684#ifdef CONFIG_ALTIVEC
1685 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1686 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1687 r = -ENXIO;
1688 break;
1689 }
1690 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
1691 break;
1692 case KVM_REG_PPC_VSCR:
1693 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1694 r = -ENXIO;
1695 break;
1696 }
1697 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
1698 break;
1699 case KVM_REG_PPC_VRSAVE:
1700 val = get_reg_val(reg->id, vcpu->arch.vrsave);
1701 break;
1702#endif
1703 default:
1704 r = -EINVAL;
1705 break;
1706 }
1707 }
1708
1709 if (r)
1710 return r;
1711
1712 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
1713 r = -EFAULT;
1714
1715 return r;
1716}
1717
1718int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1719{
1720 int r;
1721 union kvmppc_one_reg val;
1722 int size;
1723
1724 size = one_reg_size(reg->id);
1725 if (size > sizeof(val))
1726 return -EINVAL;
1727
1728 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
1729 return -EFAULT;
1730
1731 r = kvmppc_set_one_reg(vcpu, reg->id, &val);
1732 if (r == -EINVAL) {
1733 r = 0;
1734 switch (reg->id) {
1735#ifdef CONFIG_ALTIVEC
1736 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1737 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1738 r = -ENXIO;
1739 break;
1740 }
1741 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
1742 break;
1743 case KVM_REG_PPC_VSCR:
1744 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1745 r = -ENXIO;
1746 break;
1747 }
1748 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
1749 break;
1750 case KVM_REG_PPC_VRSAVE:
1751 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1752 r = -ENXIO;
1753 break;
1754 }
1755 vcpu->arch.vrsave = set_reg_val(reg->id, val);
1756 break;
1757#endif
1758 default:
1759 r = -EINVAL;
1760 break;
1761 }
1762 }
1763
1764 return r;
1765}
1766
1767int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
1768{
1769 int r;
1770
1771 vcpu_load(vcpu);
1772
1773 if (vcpu->mmio_needed) {
1774 vcpu->mmio_needed = 0;
1775 if (!vcpu->mmio_is_write)
1776 kvmppc_complete_mmio_load(vcpu, run);
1777#ifdef CONFIG_VSX
1778 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1779 vcpu->arch.mmio_vsx_copy_nums--;
1780 vcpu->arch.mmio_vsx_offset++;
1781 }
1782
1783 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1784 r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run);
1785 if (r == RESUME_HOST) {
1786 vcpu->mmio_needed = 1;
1787 goto out;
1788 }
1789 }
1790#endif
1791#ifdef CONFIG_ALTIVEC
1792 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1793 vcpu->arch.mmio_vmx_copy_nums--;
1794 vcpu->arch.mmio_vmx_offset++;
1795 }
1796
1797 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1798 r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run);
1799 if (r == RESUME_HOST) {
1800 vcpu->mmio_needed = 1;
1801 goto out;
1802 }
1803 }
1804#endif
1805 } else if (vcpu->arch.osi_needed) {
1806 u64 *gprs = run->osi.gprs;
1807 int i;
1808
1809 for (i = 0; i < 32; i++)
1810 kvmppc_set_gpr(vcpu, i, gprs[i]);
1811 vcpu->arch.osi_needed = 0;
1812 } else if (vcpu->arch.hcall_needed) {
1813 int i;
1814
1815 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1816 for (i = 0; i < 9; ++i)
1817 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1818 vcpu->arch.hcall_needed = 0;
1819#ifdef CONFIG_BOOKE
1820 } else if (vcpu->arch.epr_needed) {
1821 kvmppc_set_epr(vcpu, run->epr.epr);
1822 vcpu->arch.epr_needed = 0;
1823#endif
1824 }
1825
1826 kvm_sigset_activate(vcpu);
1827
1828 if (run->immediate_exit)
1829 r = -EINTR;
1830 else
1831 r = kvmppc_vcpu_run(run, vcpu);
1832
1833 kvm_sigset_deactivate(vcpu);
1834
1835#ifdef CONFIG_ALTIVEC
1836out:
1837#endif
1838 vcpu_put(vcpu);
1839 return r;
1840}
1841
1842int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1843{
1844 if (irq->irq == KVM_INTERRUPT_UNSET) {
1845 kvmppc_core_dequeue_external(vcpu);
1846 return 0;
1847 }
1848
1849 kvmppc_core_queue_external(vcpu, irq);
1850
1851 kvm_vcpu_kick(vcpu);
1852
1853 return 0;
1854}
1855
1856static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1857 struct kvm_enable_cap *cap)
1858{
1859 int r;
1860
1861 if (cap->flags)
1862 return -EINVAL;
1863
1864 switch (cap->cap) {
1865 case KVM_CAP_PPC_OSI:
1866 r = 0;
1867 vcpu->arch.osi_enabled = true;
1868 break;
1869 case KVM_CAP_PPC_PAPR:
1870 r = 0;
1871 vcpu->arch.papr_enabled = true;
1872 break;
1873 case KVM_CAP_PPC_EPR:
1874 r = 0;
1875 if (cap->args[0])
1876 vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1877 else
1878 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1879 break;
1880#ifdef CONFIG_BOOKE
1881 case KVM_CAP_PPC_BOOKE_WATCHDOG:
1882 r = 0;
1883 vcpu->arch.watchdog_enabled = true;
1884 break;
1885#endif
1886#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1887 case KVM_CAP_SW_TLB: {
1888 struct kvm_config_tlb cfg;
1889 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1890
1891 r = -EFAULT;
1892 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1893 break;
1894
1895 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1896 break;
1897 }
1898#endif
1899#ifdef CONFIG_KVM_MPIC
1900 case KVM_CAP_IRQ_MPIC: {
1901 struct fd f;
1902 struct kvm_device *dev;
1903
1904 r = -EBADF;
1905 f = fdget(cap->args[0]);
1906 if (!f.file)
1907 break;
1908
1909 r = -EPERM;
1910 dev = kvm_device_from_filp(f.file);
1911 if (dev)
1912 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1913
1914 fdput(f);
1915 break;
1916 }
1917#endif
1918#ifdef CONFIG_KVM_XICS
1919 case KVM_CAP_IRQ_XICS: {
1920 struct fd f;
1921 struct kvm_device *dev;
1922
1923 r = -EBADF;
1924 f = fdget(cap->args[0]);
1925 if (!f.file)
1926 break;
1927
1928 r = -EPERM;
1929 dev = kvm_device_from_filp(f.file);
1930 if (dev) {
1931 if (xics_on_xive())
1932 r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
1933 else
1934 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1935 }
1936
1937 fdput(f);
1938 break;
1939 }
1940#endif
1941#ifdef CONFIG_KVM_XIVE
1942 case KVM_CAP_PPC_IRQ_XIVE: {
1943 struct fd f;
1944 struct kvm_device *dev;
1945
1946 r = -EBADF;
1947 f = fdget(cap->args[0]);
1948 if (!f.file)
1949 break;
1950
1951 r = -ENXIO;
1952 if (!xive_enabled())
1953 break;
1954
1955 r = -EPERM;
1956 dev = kvm_device_from_filp(f.file);
1957 if (dev)
1958 r = kvmppc_xive_native_connect_vcpu(dev, vcpu,
1959 cap->args[1]);
1960
1961 fdput(f);
1962 break;
1963 }
1964#endif
1965#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1966 case KVM_CAP_PPC_FWNMI:
1967 r = -EINVAL;
1968 if (!is_kvmppc_hv_enabled(vcpu->kvm))
1969 break;
1970 r = 0;
1971 vcpu->kvm->arch.fwnmi_enabled = true;
1972 break;
1973#endif
1974 default:
1975 r = -EINVAL;
1976 break;
1977 }
1978
1979 if (!r)
1980 r = kvmppc_sanity_check(vcpu);
1981
1982 return r;
1983}
1984
1985bool kvm_arch_intc_initialized(struct kvm *kvm)
1986{
1987#ifdef CONFIG_KVM_MPIC
1988 if (kvm->arch.mpic)
1989 return true;
1990#endif
1991#ifdef CONFIG_KVM_XICS
1992 if (kvm->arch.xics || kvm->arch.xive)
1993 return true;
1994#endif
1995 return false;
1996}
1997
1998int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1999 struct kvm_mp_state *mp_state)
2000{
2001 return -EINVAL;
2002}
2003
2004int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2005 struct kvm_mp_state *mp_state)
2006{
2007 return -EINVAL;
2008}
2009
2010long kvm_arch_vcpu_async_ioctl(struct file *filp,
2011 unsigned int ioctl, unsigned long arg)
2012{
2013 struct kvm_vcpu *vcpu = filp->private_data;
2014 void __user *argp = (void __user *)arg;
2015
2016 if (ioctl == KVM_INTERRUPT) {
2017 struct kvm_interrupt irq;
2018 if (copy_from_user(&irq, argp, sizeof(irq)))
2019 return -EFAULT;
2020 return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
2021 }
2022 return -ENOIOCTLCMD;
2023}
2024
2025long kvm_arch_vcpu_ioctl(struct file *filp,
2026 unsigned int ioctl, unsigned long arg)
2027{
2028 struct kvm_vcpu *vcpu = filp->private_data;
2029 void __user *argp = (void __user *)arg;
2030 long r;
2031
2032 switch (ioctl) {
2033 case KVM_ENABLE_CAP:
2034 {
2035 struct kvm_enable_cap cap;
2036 r = -EFAULT;
2037 vcpu_load(vcpu);
2038 if (copy_from_user(&cap, argp, sizeof(cap)))
2039 goto out;
2040 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2041 vcpu_put(vcpu);
2042 break;
2043 }
2044
2045 case KVM_SET_ONE_REG:
2046 case KVM_GET_ONE_REG:
2047 {
2048 struct kvm_one_reg reg;
2049 r = -EFAULT;
2050 if (copy_from_user(®, argp, sizeof(reg)))
2051 goto out;
2052 if (ioctl == KVM_SET_ONE_REG)
2053 r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®);
2054 else
2055 r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®);
2056 break;
2057 }
2058
2059#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
2060 case KVM_DIRTY_TLB: {
2061 struct kvm_dirty_tlb dirty;
2062 r = -EFAULT;
2063 vcpu_load(vcpu);
2064 if (copy_from_user(&dirty, argp, sizeof(dirty)))
2065 goto out;
2066 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
2067 vcpu_put(vcpu);
2068 break;
2069 }
2070#endif
2071 default:
2072 r = -EINVAL;
2073 }
2074
2075out:
2076 return r;
2077}
2078
2079vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2080{
2081 return VM_FAULT_SIGBUS;
2082}
2083
2084static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
2085{
2086 u32 inst_nop = 0x60000000;
2087#ifdef CONFIG_KVM_BOOKE_HV
2088 u32 inst_sc1 = 0x44000022;
2089 pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
2090 pvinfo->hcall[1] = cpu_to_be32(inst_nop);
2091 pvinfo->hcall[2] = cpu_to_be32(inst_nop);
2092 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2093#else
2094 u32 inst_lis = 0x3c000000;
2095 u32 inst_ori = 0x60000000;
2096 u32 inst_sc = 0x44000002;
2097 u32 inst_imm_mask = 0xffff;
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108 pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
2109 pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
2110 pvinfo->hcall[2] = cpu_to_be32(inst_sc);
2111 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2112#endif
2113
2114 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
2115
2116 return 0;
2117}
2118
2119int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
2120 bool line_status)
2121{
2122 if (!irqchip_in_kernel(kvm))
2123 return -ENXIO;
2124
2125 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
2126 irq_event->irq, irq_event->level,
2127 line_status);
2128 return 0;
2129}
2130
2131
2132int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
2133 struct kvm_enable_cap *cap)
2134{
2135 int r;
2136
2137 if (cap->flags)
2138 return -EINVAL;
2139
2140 switch (cap->cap) {
2141#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
2142 case KVM_CAP_PPC_ENABLE_HCALL: {
2143 unsigned long hcall = cap->args[0];
2144
2145 r = -EINVAL;
2146 if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
2147 cap->args[1] > 1)
2148 break;
2149 if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
2150 break;
2151 if (cap->args[1])
2152 set_bit(hcall / 4, kvm->arch.enabled_hcalls);
2153 else
2154 clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
2155 r = 0;
2156 break;
2157 }
2158 case KVM_CAP_PPC_SMT: {
2159 unsigned long mode = cap->args[0];
2160 unsigned long flags = cap->args[1];
2161
2162 r = -EINVAL;
2163 if (kvm->arch.kvm_ops->set_smt_mode)
2164 r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
2165 break;
2166 }
2167
2168 case KVM_CAP_PPC_NESTED_HV:
2169 r = -EINVAL;
2170 if (!is_kvmppc_hv_enabled(kvm) ||
2171 !kvm->arch.kvm_ops->enable_nested)
2172 break;
2173 r = kvm->arch.kvm_ops->enable_nested(kvm);
2174 break;
2175#endif
2176 default:
2177 r = -EINVAL;
2178 break;
2179 }
2180
2181 return r;
2182}
2183
2184#ifdef CONFIG_PPC_BOOK3S_64
2185
2186
2187
2188
2189
2190
2191
2192
2193#ifdef CONFIG_PPC_PSERIES
2194static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2195{
2196 struct h_cpu_char_result c;
2197 unsigned long rc;
2198
2199 if (!machine_is(pseries))
2200 return -ENOTTY;
2201
2202 rc = plpar_get_cpu_characteristics(&c);
2203 if (rc == H_SUCCESS) {
2204 cp->character = c.character;
2205 cp->behaviour = c.behaviour;
2206 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2207 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2208 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2209 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2210 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2211 KVM_PPC_CPU_CHAR_BR_HINT_HONOURED |
2212 KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF |
2213 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2214 KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2215 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2216 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2217 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2218 KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2219 }
2220 return 0;
2221}
2222#else
2223static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2224{
2225 return -ENOTTY;
2226}
2227#endif
2228
2229static inline bool have_fw_feat(struct device_node *fw_features,
2230 const char *state, const char *name)
2231{
2232 struct device_node *np;
2233 bool r = false;
2234
2235 np = of_get_child_by_name(fw_features, name);
2236 if (np) {
2237 r = of_property_read_bool(np, state);
2238 of_node_put(np);
2239 }
2240 return r;
2241}
2242
2243static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2244{
2245 struct device_node *np, *fw_features;
2246 int r;
2247
2248 memset(cp, 0, sizeof(*cp));
2249 r = pseries_get_cpu_char(cp);
2250 if (r != -ENOTTY)
2251 return r;
2252
2253 np = of_find_node_by_name(NULL, "ibm,opal");
2254 if (np) {
2255 fw_features = of_get_child_by_name(np, "fw-features");
2256 of_node_put(np);
2257 if (!fw_features)
2258 return 0;
2259 if (have_fw_feat(fw_features, "enabled",
2260 "inst-spec-barrier-ori31,31,0"))
2261 cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31;
2262 if (have_fw_feat(fw_features, "enabled",
2263 "fw-bcctrl-serialized"))
2264 cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED;
2265 if (have_fw_feat(fw_features, "enabled",
2266 "inst-l1d-flush-ori30,30,0"))
2267 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30;
2268 if (have_fw_feat(fw_features, "enabled",
2269 "inst-l1d-flush-trig2"))
2270 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2;
2271 if (have_fw_feat(fw_features, "enabled",
2272 "fw-l1d-thread-split"))
2273 cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV;
2274 if (have_fw_feat(fw_features, "enabled",
2275 "fw-count-cache-disabled"))
2276 cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
2277 if (have_fw_feat(fw_features, "enabled",
2278 "fw-count-cache-flush-bcctr2,0,0"))
2279 cp->character |= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2280 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2281 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2282 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2283 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2284 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2285 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2286 KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2287
2288 if (have_fw_feat(fw_features, "enabled",
2289 "speculation-policy-favor-security"))
2290 cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY;
2291 if (!have_fw_feat(fw_features, "disabled",
2292 "needs-l1d-flush-msr-pr-0-to-1"))
2293 cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR;
2294 if (!have_fw_feat(fw_features, "disabled",
2295 "needs-spec-barrier-for-bound-checks"))
2296 cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
2297 if (have_fw_feat(fw_features, "enabled",
2298 "needs-count-cache-flush-on-context-switch"))
2299 cp->behaviour |= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2300 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2301 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2302 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2303 KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2304
2305 of_node_put(fw_features);
2306 }
2307
2308 return 0;
2309}
2310#endif
2311
2312long kvm_arch_vm_ioctl(struct file *filp,
2313 unsigned int ioctl, unsigned long arg)
2314{
2315 struct kvm *kvm __maybe_unused = filp->private_data;
2316 void __user *argp = (void __user *)arg;
2317 long r;
2318
2319 switch (ioctl) {
2320 case KVM_PPC_GET_PVINFO: {
2321 struct kvm_ppc_pvinfo pvinfo;
2322 memset(&pvinfo, 0, sizeof(pvinfo));
2323 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
2324 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
2325 r = -EFAULT;
2326 goto out;
2327 }
2328
2329 break;
2330 }
2331#ifdef CONFIG_SPAPR_TCE_IOMMU
2332 case KVM_CREATE_SPAPR_TCE_64: {
2333 struct kvm_create_spapr_tce_64 create_tce_64;
2334
2335 r = -EFAULT;
2336 if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
2337 goto out;
2338 if (create_tce_64.flags) {
2339 r = -EINVAL;
2340 goto out;
2341 }
2342 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2343 goto out;
2344 }
2345 case KVM_CREATE_SPAPR_TCE: {
2346 struct kvm_create_spapr_tce create_tce;
2347 struct kvm_create_spapr_tce_64 create_tce_64;
2348
2349 r = -EFAULT;
2350 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
2351 goto out;
2352
2353 create_tce_64.liobn = create_tce.liobn;
2354 create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
2355 create_tce_64.offset = 0;
2356 create_tce_64.size = create_tce.window_size >>
2357 IOMMU_PAGE_SHIFT_4K;
2358 create_tce_64.flags = 0;
2359 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2360 goto out;
2361 }
2362#endif
2363#ifdef CONFIG_PPC_BOOK3S_64
2364 case KVM_PPC_GET_SMMU_INFO: {
2365 struct kvm_ppc_smmu_info info;
2366 struct kvm *kvm = filp->private_data;
2367
2368 memset(&info, 0, sizeof(info));
2369 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
2370 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2371 r = -EFAULT;
2372 break;
2373 }
2374 case KVM_PPC_RTAS_DEFINE_TOKEN: {
2375 struct kvm *kvm = filp->private_data;
2376
2377 r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
2378 break;
2379 }
2380 case KVM_PPC_CONFIGURE_V3_MMU: {
2381 struct kvm *kvm = filp->private_data;
2382 struct kvm_ppc_mmuv3_cfg cfg;
2383
2384 r = -EINVAL;
2385 if (!kvm->arch.kvm_ops->configure_mmu)
2386 goto out;
2387 r = -EFAULT;
2388 if (copy_from_user(&cfg, argp, sizeof(cfg)))
2389 goto out;
2390 r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg);
2391 break;
2392 }
2393 case KVM_PPC_GET_RMMU_INFO: {
2394 struct kvm *kvm = filp->private_data;
2395 struct kvm_ppc_rmmu_info info;
2396
2397 r = -EINVAL;
2398 if (!kvm->arch.kvm_ops->get_rmmu_info)
2399 goto out;
2400 r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info);
2401 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2402 r = -EFAULT;
2403 break;
2404 }
2405 case KVM_PPC_GET_CPU_CHAR: {
2406 struct kvm_ppc_cpu_char cpuchar;
2407
2408 r = kvmppc_get_cpu_char(&cpuchar);
2409 if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar)))
2410 r = -EFAULT;
2411 break;
2412 }
2413 default: {
2414 struct kvm *kvm = filp->private_data;
2415 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
2416 }
2417#else
2418 default:
2419 r = -ENOTTY;
2420#endif
2421 }
2422out:
2423 return r;
2424}
2425
2426static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
2427static unsigned long nr_lpids;
2428
2429long kvmppc_alloc_lpid(void)
2430{
2431 long lpid;
2432
2433 do {
2434 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
2435 if (lpid >= nr_lpids) {
2436 pr_err("%s: No LPIDs free\n", __func__);
2437 return -ENOMEM;
2438 }
2439 } while (test_and_set_bit(lpid, lpid_inuse));
2440
2441 return lpid;
2442}
2443EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
2444
2445void kvmppc_claim_lpid(long lpid)
2446{
2447 set_bit(lpid, lpid_inuse);
2448}
2449EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
2450
2451void kvmppc_free_lpid(long lpid)
2452{
2453 clear_bit(lpid, lpid_inuse);
2454}
2455EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
2456
2457void kvmppc_init_lpid(unsigned long nr_lpids_param)
2458{
2459 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
2460 memset(lpid_inuse, 0, sizeof(lpid_inuse));
2461}
2462EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
2463
2464int kvm_arch_init(void *opaque)
2465{
2466 return 0;
2467}
2468
2469EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);
2470