1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/errno.h>
22#include <linux/err.h>
23#include <linux/kvm_host.h>
24#include <linux/vmalloc.h>
25#include <linux/hrtimer.h>
26#include <linux/sched/signal.h>
27#include <linux/fs.h>
28#include <linux/slab.h>
29#include <linux/file.h>
30#include <linux/module.h>
31#include <linux/irqbypass.h>
32#include <linux/kvm_irqfd.h>
33#include <asm/cputable.h>
34#include <linux/uaccess.h>
35#include <asm/kvm_ppc.h>
36#include <asm/tlbflush.h>
37#include <asm/cputhreads.h>
38#include <asm/irqflags.h>
39#include <asm/iommu.h>
40#include <asm/switch_to.h>
41#include <asm/xive.h>
42#ifdef CONFIG_PPC_PSERIES
43#include <asm/hvcall.h>
44#include <asm/plpar_wrappers.h>
45#endif
46
47#include "timing.h"
48#include "irq.h"
49#include "../mm/mmu_decl.h"
50
51#define CREATE_TRACE_POINTS
52#include "trace.h"
53
54struct kvmppc_ops *kvmppc_hv_ops;
55EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
56struct kvmppc_ops *kvmppc_pr_ops;
57EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
58
59
60int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
61{
62 return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
63}
64
65bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
66{
67 return false;
68}
69
70int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
71{
72 return 1;
73}
74
75
76
77
78
79
80
81
82
83
84int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
85{
86 int r;
87
88 WARN_ON(irqs_disabled());
89 hard_irq_disable();
90
91 while (true) {
92 if (need_resched()) {
93 local_irq_enable();
94 cond_resched();
95 hard_irq_disable();
96 continue;
97 }
98
99 if (signal_pending(current)) {
100 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
101 vcpu->run->exit_reason = KVM_EXIT_INTR;
102 r = -EINTR;
103 break;
104 }
105
106 vcpu->mode = IN_GUEST_MODE;
107
108
109
110
111
112
113
114
115
116
117 smp_mb();
118
119 if (kvm_request_pending(vcpu)) {
120
121 local_irq_enable();
122 trace_kvm_check_requests(vcpu);
123 r = kvmppc_core_check_requests(vcpu);
124 hard_irq_disable();
125 if (r > 0)
126 continue;
127 break;
128 }
129
130 if (kvmppc_core_prepare_to_enter(vcpu)) {
131
132
133 continue;
134 }
135
136 guest_enter_irqoff();
137 return 1;
138 }
139
140
141 local_irq_enable();
142 return r;
143}
144EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
145
146#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
147static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
148{
149 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
150 int i;
151
152 shared->sprg0 = swab64(shared->sprg0);
153 shared->sprg1 = swab64(shared->sprg1);
154 shared->sprg2 = swab64(shared->sprg2);
155 shared->sprg3 = swab64(shared->sprg3);
156 shared->srr0 = swab64(shared->srr0);
157 shared->srr1 = swab64(shared->srr1);
158 shared->dar = swab64(shared->dar);
159 shared->msr = swab64(shared->msr);
160 shared->dsisr = swab32(shared->dsisr);
161 shared->int_pending = swab32(shared->int_pending);
162 for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
163 shared->sr[i] = swab32(shared->sr[i]);
164}
165#endif
166
167int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
168{
169 int nr = kvmppc_get_gpr(vcpu, 11);
170 int r;
171 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
172 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
173 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
174 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
175 unsigned long r2 = 0;
176
177 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
178
179 param1 &= 0xffffffff;
180 param2 &= 0xffffffff;
181 param3 &= 0xffffffff;
182 param4 &= 0xffffffff;
183 }
184
185 switch (nr) {
186 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
187 {
188#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
189
190 int shared_big_endian = true;
191 if (vcpu->arch.intr_msr & MSR_LE)
192 shared_big_endian = false;
193 if (shared_big_endian != vcpu->arch.shared_big_endian)
194 kvmppc_swab_shared(vcpu);
195 vcpu->arch.shared_big_endian = shared_big_endian;
196#endif
197
198 if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
199
200
201
202
203
204 vcpu->arch.disable_kernel_nx = true;
205 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
206 }
207
208 vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
209 vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
210
211#ifdef CONFIG_PPC_64K_PAGES
212
213
214
215
216 if ((vcpu->arch.magic_page_pa & 0xf000) !=
217 ((ulong)vcpu->arch.shared & 0xf000)) {
218 void *old_shared = vcpu->arch.shared;
219 ulong shared = (ulong)vcpu->arch.shared;
220 void *new_shared;
221
222 shared &= PAGE_MASK;
223 shared |= vcpu->arch.magic_page_pa & 0xf000;
224 new_shared = (void*)shared;
225 memcpy(new_shared, old_shared, 0x1000);
226 vcpu->arch.shared = new_shared;
227 }
228#endif
229
230 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
231
232 r = EV_SUCCESS;
233 break;
234 }
235 case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
236 r = EV_SUCCESS;
237#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
238 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
239#endif
240
241
242 break;
243 case EV_HCALL_TOKEN(EV_IDLE):
244 r = EV_SUCCESS;
245 kvm_vcpu_block(vcpu);
246 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
247 break;
248 default:
249 r = EV_UNIMPLEMENTED;
250 break;
251 }
252
253 kvmppc_set_gpr(vcpu, 4, r2);
254
255 return r;
256}
257EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
258
259int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
260{
261 int r = false;
262
263
264 if (!vcpu->arch.pvr)
265 goto out;
266
267
268 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
269 goto out;
270
271
272 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
273 goto out;
274
275#ifdef CONFIG_KVM_BOOKE_HV
276 if (!cpu_has_feature(CPU_FTR_EMB_HV))
277 goto out;
278#endif
279
280 r = true;
281
282out:
283 vcpu->arch.sane = r;
284 return r ? 0 : -EINVAL;
285}
286EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
287
288int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
289{
290 enum emulation_result er;
291 int r;
292
293 er = kvmppc_emulate_loadstore(vcpu);
294 switch (er) {
295 case EMULATE_DONE:
296
297
298 r = RESUME_GUEST_NV;
299 break;
300 case EMULATE_AGAIN:
301 r = RESUME_GUEST;
302 break;
303 case EMULATE_DO_MMIO:
304 run->exit_reason = KVM_EXIT_MMIO;
305
306
307
308
309 r = RESUME_HOST_NV;
310 break;
311 case EMULATE_FAIL:
312 {
313 u32 last_inst;
314
315 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
316
317 pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
318 r = RESUME_HOST;
319 break;
320 }
321 default:
322 WARN_ON(1);
323 r = RESUME_GUEST;
324 }
325
326 return r;
327}
328EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
329
330int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
331 bool data)
332{
333 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
334 struct kvmppc_pte pte;
335 int r;
336
337 vcpu->stat.st++;
338
339 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
340 XLATE_WRITE, &pte);
341 if (r < 0)
342 return r;
343
344 *eaddr = pte.raddr;
345
346 if (!pte.may_write)
347 return -EPERM;
348
349
350 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
351 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
352 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
353 void *magic = vcpu->arch.shared;
354 magic += pte.eaddr & 0xfff;
355 memcpy(magic, ptr, size);
356 return EMULATE_DONE;
357 }
358
359 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
360 return EMULATE_DO_MMIO;
361
362 return EMULATE_DONE;
363}
364EXPORT_SYMBOL_GPL(kvmppc_st);
365
366int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
367 bool data)
368{
369 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
370 struct kvmppc_pte pte;
371 int rc;
372
373 vcpu->stat.ld++;
374
375 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
376 XLATE_READ, &pte);
377 if (rc)
378 return rc;
379
380 *eaddr = pte.raddr;
381
382 if (!pte.may_read)
383 return -EPERM;
384
385 if (!data && !pte.may_execute)
386 return -ENOEXEC;
387
388
389 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
390 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
391 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
392 void *magic = vcpu->arch.shared;
393 magic += pte.eaddr & 0xfff;
394 memcpy(ptr, magic, size);
395 return EMULATE_DONE;
396 }
397
398 if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size))
399 return EMULATE_DO_MMIO;
400
401 return EMULATE_DONE;
402}
403EXPORT_SYMBOL_GPL(kvmppc_ld);
404
405int kvm_arch_hardware_enable(void)
406{
407 return 0;
408}
409
410int kvm_arch_hardware_setup(void)
411{
412 return 0;
413}
414
415void kvm_arch_check_processor_compat(void *rtn)
416{
417 *(int *)rtn = kvmppc_core_check_processor_compat();
418}
419
420int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
421{
422 struct kvmppc_ops *kvm_ops = NULL;
423
424
425
426 if (type == 0) {
427 if (kvmppc_hv_ops)
428 kvm_ops = kvmppc_hv_ops;
429 else
430 kvm_ops = kvmppc_pr_ops;
431 if (!kvm_ops)
432 goto err_out;
433 } else if (type == KVM_VM_PPC_HV) {
434 if (!kvmppc_hv_ops)
435 goto err_out;
436 kvm_ops = kvmppc_hv_ops;
437 } else if (type == KVM_VM_PPC_PR) {
438 if (!kvmppc_pr_ops)
439 goto err_out;
440 kvm_ops = kvmppc_pr_ops;
441 } else
442 goto err_out;
443
444 if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
445 return -ENOENT;
446
447 kvm->arch.kvm_ops = kvm_ops;
448 return kvmppc_core_init_vm(kvm);
449err_out:
450 return -EINVAL;
451}
452
453bool kvm_arch_has_vcpu_debugfs(void)
454{
455 return false;
456}
457
458int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
459{
460 return 0;
461}
462
463void kvm_arch_destroy_vm(struct kvm *kvm)
464{
465 unsigned int i;
466 struct kvm_vcpu *vcpu;
467
468#ifdef CONFIG_KVM_XICS
469
470
471
472
473
474 if (is_kvmppc_hv_enabled(kvm))
475 kick_all_cpus_sync();
476#endif
477
478 kvm_for_each_vcpu(i, vcpu, kvm)
479 kvm_arch_vcpu_free(vcpu);
480
481 mutex_lock(&kvm->lock);
482 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
483 kvm->vcpus[i] = NULL;
484
485 atomic_set(&kvm->online_vcpus, 0);
486
487 kvmppc_core_destroy_vm(kvm);
488
489 mutex_unlock(&kvm->lock);
490
491
492 module_put(kvm->arch.kvm_ops->owner);
493}
494
495int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
496{
497 int r;
498
499 int hv_enabled = kvmppc_hv_ops ? 1 : 0;
500
501 if (kvm) {
502
503
504
505
506 hv_enabled = is_kvmppc_hv_enabled(kvm);
507 }
508
509 switch (ext) {
510#ifdef CONFIG_BOOKE
511 case KVM_CAP_PPC_BOOKE_SREGS:
512 case KVM_CAP_PPC_BOOKE_WATCHDOG:
513 case KVM_CAP_PPC_EPR:
514#else
515 case KVM_CAP_PPC_SEGSTATE:
516 case KVM_CAP_PPC_HIOR:
517 case KVM_CAP_PPC_PAPR:
518#endif
519 case KVM_CAP_PPC_UNSET_IRQ:
520 case KVM_CAP_PPC_IRQ_LEVEL:
521 case KVM_CAP_ENABLE_CAP:
522 case KVM_CAP_ENABLE_CAP_VM:
523 case KVM_CAP_ONE_REG:
524 case KVM_CAP_IOEVENTFD:
525 case KVM_CAP_DEVICE_CTRL:
526 case KVM_CAP_IMMEDIATE_EXIT:
527 r = 1;
528 break;
529 case KVM_CAP_PPC_PAIRED_SINGLES:
530 case KVM_CAP_PPC_OSI:
531 case KVM_CAP_PPC_GET_PVINFO:
532#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
533 case KVM_CAP_SW_TLB:
534#endif
535
536 r = !hv_enabled;
537 break;
538#ifdef CONFIG_KVM_MPIC
539 case KVM_CAP_IRQ_MPIC:
540 r = 1;
541 break;
542#endif
543
544#ifdef CONFIG_PPC_BOOK3S_64
545 case KVM_CAP_SPAPR_TCE:
546 case KVM_CAP_SPAPR_TCE_64:
547
548 case KVM_CAP_SPAPR_TCE_VFIO:
549 case KVM_CAP_PPC_RTAS:
550 case KVM_CAP_PPC_FIXUP_HCALL:
551 case KVM_CAP_PPC_ENABLE_HCALL:
552#ifdef CONFIG_KVM_XICS
553 case KVM_CAP_IRQ_XICS:
554#endif
555 case KVM_CAP_PPC_GET_CPU_CHAR:
556 r = 1;
557 break;
558
559 case KVM_CAP_PPC_ALLOC_HTAB:
560 r = hv_enabled;
561 break;
562#endif
563#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
564 case KVM_CAP_PPC_SMT:
565 r = 0;
566 if (kvm) {
567 if (kvm->arch.emul_smt_mode > 1)
568 r = kvm->arch.emul_smt_mode;
569 else
570 r = kvm->arch.smt_mode;
571 } else if (hv_enabled) {
572 if (cpu_has_feature(CPU_FTR_ARCH_300))
573 r = 1;
574 else
575 r = threads_per_subcore;
576 }
577 break;
578 case KVM_CAP_PPC_SMT_POSSIBLE:
579 r = 1;
580 if (hv_enabled) {
581 if (!cpu_has_feature(CPU_FTR_ARCH_300))
582 r = ((threads_per_subcore << 1) - 1);
583 else
584
585 r = 8 | 4 | 2 | 1;
586 }
587 break;
588 case KVM_CAP_PPC_RMA:
589 r = 0;
590 break;
591 case KVM_CAP_PPC_HWRNG:
592 r = kvmppc_hwrng_present();
593 break;
594 case KVM_CAP_PPC_MMU_RADIX:
595 r = !!(hv_enabled && radix_enabled());
596 break;
597 case KVM_CAP_PPC_MMU_HASH_V3:
598 r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300));
599 break;
600#endif
601 case KVM_CAP_SYNC_MMU:
602#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
603 r = hv_enabled;
604#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
605 r = 1;
606#else
607 r = 0;
608#endif
609 break;
610#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
611 case KVM_CAP_PPC_HTAB_FD:
612 r = hv_enabled;
613 break;
614#endif
615 case KVM_CAP_NR_VCPUS:
616
617
618
619
620
621
622 if (hv_enabled)
623 r = num_present_cpus();
624 else
625 r = num_online_cpus();
626 break;
627 case KVM_CAP_NR_MEMSLOTS:
628 r = KVM_USER_MEM_SLOTS;
629 break;
630 case KVM_CAP_MAX_VCPUS:
631 r = KVM_MAX_VCPUS;
632 break;
633#ifdef CONFIG_PPC_BOOK3S_64
634 case KVM_CAP_PPC_GET_SMMU_INFO:
635 r = 1;
636 break;
637 case KVM_CAP_SPAPR_MULTITCE:
638 r = 1;
639 break;
640 case KVM_CAP_SPAPR_RESIZE_HPT:
641 r = !!hv_enabled;
642 break;
643#endif
644#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
645 case KVM_CAP_PPC_FWNMI:
646 r = hv_enabled;
647 break;
648#endif
649 case KVM_CAP_PPC_HTM:
650 r = hv_enabled &&
651 (cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM_COMP);
652 break;
653 default:
654 r = 0;
655 break;
656 }
657 return r;
658
659}
660
661long kvm_arch_dev_ioctl(struct file *filp,
662 unsigned int ioctl, unsigned long arg)
663{
664 return -EINVAL;
665}
666
667void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
668 struct kvm_memory_slot *dont)
669{
670 kvmppc_core_free_memslot(kvm, free, dont);
671}
672
673int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
674 unsigned long npages)
675{
676 return kvmppc_core_create_memslot(kvm, slot, npages);
677}
678
679int kvm_arch_prepare_memory_region(struct kvm *kvm,
680 struct kvm_memory_slot *memslot,
681 const struct kvm_userspace_memory_region *mem,
682 enum kvm_mr_change change)
683{
684 return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
685}
686
687void kvm_arch_commit_memory_region(struct kvm *kvm,
688 const struct kvm_userspace_memory_region *mem,
689 const struct kvm_memory_slot *old,
690 const struct kvm_memory_slot *new,
691 enum kvm_mr_change change)
692{
693 kvmppc_core_commit_memory_region(kvm, mem, old, new);
694}
695
696void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
697 struct kvm_memory_slot *slot)
698{
699 kvmppc_core_flush_memslot(kvm, slot);
700}
701
702struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
703{
704 struct kvm_vcpu *vcpu;
705 vcpu = kvmppc_core_vcpu_create(kvm, id);
706 if (!IS_ERR(vcpu)) {
707 vcpu->arch.wqp = &vcpu->wq;
708 kvmppc_create_vcpu_debugfs(vcpu, id);
709 }
710 return vcpu;
711}
712
713void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
714{
715}
716
717void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
718{
719
720 hrtimer_cancel(&vcpu->arch.dec_timer);
721
722 kvmppc_remove_vcpu_debugfs(vcpu);
723
724 switch (vcpu->arch.irq_type) {
725 case KVMPPC_IRQ_MPIC:
726 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
727 break;
728 case KVMPPC_IRQ_XICS:
729 if (xive_enabled())
730 kvmppc_xive_cleanup_vcpu(vcpu);
731 else
732 kvmppc_xics_free_icp(vcpu);
733 break;
734 }
735
736 kvmppc_core_vcpu_free(vcpu);
737}
738
739void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
740{
741 kvm_arch_vcpu_free(vcpu);
742}
743
744int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
745{
746 return kvmppc_core_pending_dec(vcpu);
747}
748
749static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
750{
751 struct kvm_vcpu *vcpu;
752
753 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
754 kvmppc_decrementer_func(vcpu);
755
756 return HRTIMER_NORESTART;
757}
758
759int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
760{
761 int ret;
762
763 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
764 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
765 vcpu->arch.dec_expires = get_tb();
766
767#ifdef CONFIG_KVM_EXIT_TIMING
768 mutex_init(&vcpu->arch.exit_timing_lock);
769#endif
770 ret = kvmppc_subarch_vcpu_init(vcpu);
771 return ret;
772}
773
774void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
775{
776 kvmppc_mmu_destroy(vcpu);
777 kvmppc_subarch_vcpu_uninit(vcpu);
778}
779
780void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
781{
782#ifdef CONFIG_BOOKE
783
784
785
786
787
788
789
790 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
791#endif
792 kvmppc_core_vcpu_load(vcpu, cpu);
793}
794
795void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
796{
797 kvmppc_core_vcpu_put(vcpu);
798#ifdef CONFIG_BOOKE
799 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
800#endif
801}
802
803
804
805
806
807
808
809bool kvm_arch_has_irq_bypass(void)
810{
811 return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) ||
812 (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer));
813}
814
815int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
816 struct irq_bypass_producer *prod)
817{
818 struct kvm_kernel_irqfd *irqfd =
819 container_of(cons, struct kvm_kernel_irqfd, consumer);
820 struct kvm *kvm = irqfd->kvm;
821
822 if (kvm->arch.kvm_ops->irq_bypass_add_producer)
823 return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
824
825 return 0;
826}
827
828void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
829 struct irq_bypass_producer *prod)
830{
831 struct kvm_kernel_irqfd *irqfd =
832 container_of(cons, struct kvm_kernel_irqfd, consumer);
833 struct kvm *kvm = irqfd->kvm;
834
835 if (kvm->arch.kvm_ops->irq_bypass_del_producer)
836 kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
837}
838
839#ifdef CONFIG_VSX
840static inline int kvmppc_get_vsr_dword_offset(int index)
841{
842 int offset;
843
844 if ((index != 0) && (index != 1))
845 return -1;
846
847#ifdef __BIG_ENDIAN
848 offset = index;
849#else
850 offset = 1 - index;
851#endif
852
853 return offset;
854}
855
856static inline int kvmppc_get_vsr_word_offset(int index)
857{
858 int offset;
859
860 if ((index > 3) || (index < 0))
861 return -1;
862
863#ifdef __BIG_ENDIAN
864 offset = index;
865#else
866 offset = 3 - index;
867#endif
868 return offset;
869}
870
871static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
872 u64 gpr)
873{
874 union kvmppc_one_reg val;
875 int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
876 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
877
878 if (offset == -1)
879 return;
880
881 if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
882 val.vval = VCPU_VSX_VR(vcpu, index);
883 val.vsxval[offset] = gpr;
884 VCPU_VSX_VR(vcpu, index) = val.vval;
885 } else {
886 VCPU_VSX_FPR(vcpu, index, offset) = gpr;
887 }
888}
889
890static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
891 u64 gpr)
892{
893 union kvmppc_one_reg val;
894 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
895
896 if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
897 val.vval = VCPU_VSX_VR(vcpu, index);
898 val.vsxval[0] = gpr;
899 val.vsxval[1] = gpr;
900 VCPU_VSX_VR(vcpu, index) = val.vval;
901 } else {
902 VCPU_VSX_FPR(vcpu, index, 0) = gpr;
903 VCPU_VSX_FPR(vcpu, index, 1) = gpr;
904 }
905}
906
907static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
908 u32 gpr32)
909{
910 union kvmppc_one_reg val;
911 int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
912 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
913 int dword_offset, word_offset;
914
915 if (offset == -1)
916 return;
917
918 if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
919 val.vval = VCPU_VSX_VR(vcpu, index);
920 val.vsx32val[offset] = gpr32;
921 VCPU_VSX_VR(vcpu, index) = val.vval;
922 } else {
923 dword_offset = offset / 2;
924 word_offset = offset % 2;
925 val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
926 val.vsx32val[word_offset] = gpr32;
927 VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
928 }
929}
930#endif
931
932#ifdef CONFIG_ALTIVEC
933static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
934 u64 gpr)
935{
936 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
937 u32 hi, lo;
938 u32 di;
939
940#ifdef __BIG_ENDIAN
941 hi = gpr >> 32;
942 lo = gpr & 0xffffffff;
943#else
944 lo = gpr >> 32;
945 hi = gpr & 0xffffffff;
946#endif
947
948 di = 2 - vcpu->arch.mmio_vmx_copy_nums;
949 if (di > 1)
950 return;
951
952 if (vcpu->arch.mmio_host_swabbed)
953 di = 1 - di;
954
955 VCPU_VSX_VR(vcpu, index).u[di * 2] = hi;
956 VCPU_VSX_VR(vcpu, index).u[di * 2 + 1] = lo;
957}
958#endif
959
960#ifdef CONFIG_PPC_FPU
961static inline u64 sp_to_dp(u32 fprs)
962{
963 u64 fprd;
964
965 preempt_disable();
966 enable_kernel_fp();
967 asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m" (fprd) : "m" (fprs)
968 : "fr0");
969 preempt_enable();
970 return fprd;
971}
972
973static inline u32 dp_to_sp(u64 fprd)
974{
975 u32 fprs;
976
977 preempt_disable();
978 enable_kernel_fp();
979 asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m" (fprs) : "m" (fprd)
980 : "fr0");
981 preempt_enable();
982 return fprs;
983}
984
985#else
986#define sp_to_dp(x) (x)
987#define dp_to_sp(x) (x)
988#endif
989
990static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
991 struct kvm_run *run)
992{
993 u64 uninitialized_var(gpr);
994
995 if (run->mmio.len > sizeof(gpr)) {
996 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
997 return;
998 }
999
1000 if (!vcpu->arch.mmio_host_swabbed) {
1001 switch (run->mmio.len) {
1002 case 8: gpr = *(u64 *)run->mmio.data; break;
1003 case 4: gpr = *(u32 *)run->mmio.data; break;
1004 case 2: gpr = *(u16 *)run->mmio.data; break;
1005 case 1: gpr = *(u8 *)run->mmio.data; break;
1006 }
1007 } else {
1008 switch (run->mmio.len) {
1009 case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
1010 case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
1011 case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
1012 case 1: gpr = *(u8 *)run->mmio.data; break;
1013 }
1014 }
1015
1016
1017 if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
1018 gpr = sp_to_dp(gpr);
1019
1020 if (vcpu->arch.mmio_sign_extend) {
1021 switch (run->mmio.len) {
1022#ifdef CONFIG_PPC64
1023 case 4:
1024 gpr = (s64)(s32)gpr;
1025 break;
1026#endif
1027 case 2:
1028 gpr = (s64)(s16)gpr;
1029 break;
1030 case 1:
1031 gpr = (s64)(s8)gpr;
1032 break;
1033 }
1034 }
1035
1036 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
1037 case KVM_MMIO_REG_GPR:
1038 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
1039 break;
1040 case KVM_MMIO_REG_FPR:
1041 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1042 break;
1043#ifdef CONFIG_PPC_BOOK3S
1044 case KVM_MMIO_REG_QPR:
1045 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1046 break;
1047 case KVM_MMIO_REG_FQPR:
1048 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1049 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1050 break;
1051#endif
1052#ifdef CONFIG_VSX
1053 case KVM_MMIO_REG_VSX:
1054 if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_DWORD)
1055 kvmppc_set_vsr_dword(vcpu, gpr);
1056 else if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_WORD)
1057 kvmppc_set_vsr_word(vcpu, gpr);
1058 else if (vcpu->arch.mmio_vsx_copy_type ==
1059 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
1060 kvmppc_set_vsr_dword_dump(vcpu, gpr);
1061 break;
1062#endif
1063#ifdef CONFIG_ALTIVEC
1064 case KVM_MMIO_REG_VMX:
1065 kvmppc_set_vmx_dword(vcpu, gpr);
1066 break;
1067#endif
1068 default:
1069 BUG();
1070 }
1071}
1072
1073static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1074 unsigned int rt, unsigned int bytes,
1075 int is_default_endian, int sign_extend)
1076{
1077 int idx, ret;
1078 bool host_swabbed;
1079
1080
1081 if (kvmppc_need_byteswap(vcpu)) {
1082 host_swabbed = is_default_endian;
1083 } else {
1084 host_swabbed = !is_default_endian;
1085 }
1086
1087 if (bytes > sizeof(run->mmio.data)) {
1088 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
1089 run->mmio.len);
1090 }
1091
1092 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1093 run->mmio.len = bytes;
1094 run->mmio.is_write = 0;
1095
1096 vcpu->arch.io_gpr = rt;
1097 vcpu->arch.mmio_host_swabbed = host_swabbed;
1098 vcpu->mmio_needed = 1;
1099 vcpu->mmio_is_write = 0;
1100 vcpu->arch.mmio_sign_extend = sign_extend;
1101
1102 idx = srcu_read_lock(&vcpu->kvm->srcu);
1103
1104 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1105 bytes, &run->mmio.data);
1106
1107 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1108
1109 if (!ret) {
1110 kvmppc_complete_mmio_load(vcpu, run);
1111 vcpu->mmio_needed = 0;
1112 return EMULATE_DONE;
1113 }
1114
1115 return EMULATE_DO_MMIO;
1116}
1117
1118int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1119 unsigned int rt, unsigned int bytes,
1120 int is_default_endian)
1121{
1122 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0);
1123}
1124EXPORT_SYMBOL_GPL(kvmppc_handle_load);
1125
1126
1127int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
1128 unsigned int rt, unsigned int bytes,
1129 int is_default_endian)
1130{
1131 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1);
1132}
1133
1134#ifdef CONFIG_VSX
1135int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1136 unsigned int rt, unsigned int bytes,
1137 int is_default_endian, int mmio_sign_extend)
1138{
1139 enum emulation_result emulated = EMULATE_DONE;
1140
1141
1142 if (vcpu->arch.mmio_vsx_copy_nums > 4)
1143 return EMULATE_FAIL;
1144
1145 while (vcpu->arch.mmio_vsx_copy_nums) {
1146 emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
1147 is_default_endian, mmio_sign_extend);
1148
1149 if (emulated != EMULATE_DONE)
1150 break;
1151
1152 vcpu->arch.paddr_accessed += run->mmio.len;
1153
1154 vcpu->arch.mmio_vsx_copy_nums--;
1155 vcpu->arch.mmio_vsx_offset++;
1156 }
1157 return emulated;
1158}
1159#endif
1160
1161int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
1162 u64 val, unsigned int bytes, int is_default_endian)
1163{
1164 void *data = run->mmio.data;
1165 int idx, ret;
1166 bool host_swabbed;
1167
1168
1169 if (kvmppc_need_byteswap(vcpu)) {
1170 host_swabbed = is_default_endian;
1171 } else {
1172 host_swabbed = !is_default_endian;
1173 }
1174
1175 if (bytes > sizeof(run->mmio.data)) {
1176 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
1177 run->mmio.len);
1178 }
1179
1180 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1181 run->mmio.len = bytes;
1182 run->mmio.is_write = 1;
1183 vcpu->mmio_needed = 1;
1184 vcpu->mmio_is_write = 1;
1185
1186 if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
1187 val = dp_to_sp(val);
1188
1189
1190 if (!host_swabbed) {
1191 switch (bytes) {
1192 case 8: *(u64 *)data = val; break;
1193 case 4: *(u32 *)data = val; break;
1194 case 2: *(u16 *)data = val; break;
1195 case 1: *(u8 *)data = val; break;
1196 }
1197 } else {
1198 switch (bytes) {
1199 case 8: *(u64 *)data = swab64(val); break;
1200 case 4: *(u32 *)data = swab32(val); break;
1201 case 2: *(u16 *)data = swab16(val); break;
1202 case 1: *(u8 *)data = val; break;
1203 }
1204 }
1205
1206 idx = srcu_read_lock(&vcpu->kvm->srcu);
1207
1208 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1209 bytes, &run->mmio.data);
1210
1211 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1212
1213 if (!ret) {
1214 vcpu->mmio_needed = 0;
1215 return EMULATE_DONE;
1216 }
1217
1218 return EMULATE_DO_MMIO;
1219}
1220EXPORT_SYMBOL_GPL(kvmppc_handle_store);
1221
1222#ifdef CONFIG_VSX
1223static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
1224{
1225 u32 dword_offset, word_offset;
1226 union kvmppc_one_reg reg;
1227 int vsx_offset = 0;
1228 int copy_type = vcpu->arch.mmio_vsx_copy_type;
1229 int result = 0;
1230
1231 switch (copy_type) {
1232 case KVMPPC_VSX_COPY_DWORD:
1233 vsx_offset =
1234 kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
1235
1236 if (vsx_offset == -1) {
1237 result = -1;
1238 break;
1239 }
1240
1241 if (!vcpu->arch.mmio_vsx_tx_sx_enabled) {
1242 *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
1243 } else {
1244 reg.vval = VCPU_VSX_VR(vcpu, rs);
1245 *val = reg.vsxval[vsx_offset];
1246 }
1247 break;
1248
1249 case KVMPPC_VSX_COPY_WORD:
1250 vsx_offset =
1251 kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
1252
1253 if (vsx_offset == -1) {
1254 result = -1;
1255 break;
1256 }
1257
1258 if (!vcpu->arch.mmio_vsx_tx_sx_enabled) {
1259 dword_offset = vsx_offset / 2;
1260 word_offset = vsx_offset % 2;
1261 reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
1262 *val = reg.vsx32val[word_offset];
1263 } else {
1264 reg.vval = VCPU_VSX_VR(vcpu, rs);
1265 *val = reg.vsx32val[vsx_offset];
1266 }
1267 break;
1268
1269 default:
1270 result = -1;
1271 break;
1272 }
1273
1274 return result;
1275}
1276
1277int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
1278 int rs, unsigned int bytes, int is_default_endian)
1279{
1280 u64 val;
1281 enum emulation_result emulated = EMULATE_DONE;
1282
1283 vcpu->arch.io_gpr = rs;
1284
1285
1286 if (vcpu->arch.mmio_vsx_copy_nums > 4)
1287 return EMULATE_FAIL;
1288
1289 while (vcpu->arch.mmio_vsx_copy_nums) {
1290 if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
1291 return EMULATE_FAIL;
1292
1293 emulated = kvmppc_handle_store(run, vcpu,
1294 val, bytes, is_default_endian);
1295
1296 if (emulated != EMULATE_DONE)
1297 break;
1298
1299 vcpu->arch.paddr_accessed += run->mmio.len;
1300
1301 vcpu->arch.mmio_vsx_copy_nums--;
1302 vcpu->arch.mmio_vsx_offset++;
1303 }
1304
1305 return emulated;
1306}
1307
1308static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
1309 struct kvm_run *run)
1310{
1311 enum emulation_result emulated = EMULATE_FAIL;
1312 int r;
1313
1314 vcpu->arch.paddr_accessed += run->mmio.len;
1315
1316 if (!vcpu->mmio_is_write) {
1317 emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr,
1318 run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
1319 } else {
1320 emulated = kvmppc_handle_vsx_store(run, vcpu,
1321 vcpu->arch.io_gpr, run->mmio.len, 1);
1322 }
1323
1324 switch (emulated) {
1325 case EMULATE_DO_MMIO:
1326 run->exit_reason = KVM_EXIT_MMIO;
1327 r = RESUME_HOST;
1328 break;
1329 case EMULATE_FAIL:
1330 pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
1331 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1332 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1333 r = RESUME_HOST;
1334 break;
1335 default:
1336 r = RESUME_GUEST;
1337 break;
1338 }
1339 return r;
1340}
1341#endif
1342
1343#ifdef CONFIG_ALTIVEC
1344
1345int kvmppc_handle_load128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu,
1346 unsigned int rt, int is_default_endian)
1347{
1348 enum emulation_result emulated = EMULATE_DONE;
1349
1350 while (vcpu->arch.mmio_vmx_copy_nums) {
1351 emulated = __kvmppc_handle_load(run, vcpu, rt, 8,
1352 is_default_endian, 0);
1353
1354 if (emulated != EMULATE_DONE)
1355 break;
1356
1357 vcpu->arch.paddr_accessed += run->mmio.len;
1358 vcpu->arch.mmio_vmx_copy_nums--;
1359 }
1360
1361 return emulated;
1362}
1363
1364static inline int kvmppc_get_vmx_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
1365{
1366 vector128 vrs = VCPU_VSX_VR(vcpu, rs);
1367 u32 di;
1368 u64 w0, w1;
1369
1370 di = 2 - vcpu->arch.mmio_vmx_copy_nums;
1371 if (di > 1)
1372 return -1;
1373
1374 if (vcpu->arch.mmio_host_swabbed)
1375 di = 1 - di;
1376
1377 w0 = vrs.u[di * 2];
1378 w1 = vrs.u[di * 2 + 1];
1379
1380#ifdef __BIG_ENDIAN
1381 *val = (w0 << 32) | w1;
1382#else
1383 *val = (w1 << 32) | w0;
1384#endif
1385 return 0;
1386}
1387
1388
1389int kvmppc_handle_store128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu,
1390 unsigned int rs, int is_default_endian)
1391{
1392 u64 val = 0;
1393 enum emulation_result emulated = EMULATE_DONE;
1394
1395 vcpu->arch.io_gpr = rs;
1396
1397 while (vcpu->arch.mmio_vmx_copy_nums) {
1398 if (kvmppc_get_vmx_data(vcpu, rs, &val) == -1)
1399 return EMULATE_FAIL;
1400
1401 emulated = kvmppc_handle_store(run, vcpu, val, 8,
1402 is_default_endian);
1403 if (emulated != EMULATE_DONE)
1404 break;
1405
1406 vcpu->arch.paddr_accessed += run->mmio.len;
1407 vcpu->arch.mmio_vmx_copy_nums--;
1408 }
1409
1410 return emulated;
1411}
1412
1413static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu,
1414 struct kvm_run *run)
1415{
1416 enum emulation_result emulated = EMULATE_FAIL;
1417 int r;
1418
1419 vcpu->arch.paddr_accessed += run->mmio.len;
1420
1421 if (!vcpu->mmio_is_write) {
1422 emulated = kvmppc_handle_load128_by2x64(run, vcpu,
1423 vcpu->arch.io_gpr, 1);
1424 } else {
1425 emulated = kvmppc_handle_store128_by2x64(run, vcpu,
1426 vcpu->arch.io_gpr, 1);
1427 }
1428
1429 switch (emulated) {
1430 case EMULATE_DO_MMIO:
1431 run->exit_reason = KVM_EXIT_MMIO;
1432 r = RESUME_HOST;
1433 break;
1434 case EMULATE_FAIL:
1435 pr_info("KVM: MMIO emulation failed (VMX repeat)\n");
1436 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1437 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1438 r = RESUME_HOST;
1439 break;
1440 default:
1441 r = RESUME_GUEST;
1442 break;
1443 }
1444 return r;
1445}
1446#endif
1447
1448int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1449{
1450 int r = 0;
1451 union kvmppc_one_reg val;
1452 int size;
1453
1454 size = one_reg_size(reg->id);
1455 if (size > sizeof(val))
1456 return -EINVAL;
1457
1458 r = kvmppc_get_one_reg(vcpu, reg->id, &val);
1459 if (r == -EINVAL) {
1460 r = 0;
1461 switch (reg->id) {
1462#ifdef CONFIG_ALTIVEC
1463 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1464 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1465 r = -ENXIO;
1466 break;
1467 }
1468 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
1469 break;
1470 case KVM_REG_PPC_VSCR:
1471 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1472 r = -ENXIO;
1473 break;
1474 }
1475 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
1476 break;
1477 case KVM_REG_PPC_VRSAVE:
1478 val = get_reg_val(reg->id, vcpu->arch.vrsave);
1479 break;
1480#endif
1481 default:
1482 r = -EINVAL;
1483 break;
1484 }
1485 }
1486
1487 if (r)
1488 return r;
1489
1490 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
1491 r = -EFAULT;
1492
1493 return r;
1494}
1495
1496int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1497{
1498 int r;
1499 union kvmppc_one_reg val;
1500 int size;
1501
1502 size = one_reg_size(reg->id);
1503 if (size > sizeof(val))
1504 return -EINVAL;
1505
1506 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
1507 return -EFAULT;
1508
1509 r = kvmppc_set_one_reg(vcpu, reg->id, &val);
1510 if (r == -EINVAL) {
1511 r = 0;
1512 switch (reg->id) {
1513#ifdef CONFIG_ALTIVEC
1514 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1515 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1516 r = -ENXIO;
1517 break;
1518 }
1519 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
1520 break;
1521 case KVM_REG_PPC_VSCR:
1522 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1523 r = -ENXIO;
1524 break;
1525 }
1526 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
1527 break;
1528 case KVM_REG_PPC_VRSAVE:
1529 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1530 r = -ENXIO;
1531 break;
1532 }
1533 vcpu->arch.vrsave = set_reg_val(reg->id, val);
1534 break;
1535#endif
1536 default:
1537 r = -EINVAL;
1538 break;
1539 }
1540 }
1541
1542 return r;
1543}
1544
1545int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
1546{
1547 int r;
1548
1549 vcpu_load(vcpu);
1550
1551 if (vcpu->mmio_needed) {
1552 vcpu->mmio_needed = 0;
1553 if (!vcpu->mmio_is_write)
1554 kvmppc_complete_mmio_load(vcpu, run);
1555#ifdef CONFIG_VSX
1556 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1557 vcpu->arch.mmio_vsx_copy_nums--;
1558 vcpu->arch.mmio_vsx_offset++;
1559 }
1560
1561 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1562 r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run);
1563 if (r == RESUME_HOST) {
1564 vcpu->mmio_needed = 1;
1565 goto out;
1566 }
1567 }
1568#endif
1569#ifdef CONFIG_ALTIVEC
1570 if (vcpu->arch.mmio_vmx_copy_nums > 0)
1571 vcpu->arch.mmio_vmx_copy_nums--;
1572
1573 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1574 r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run);
1575 if (r == RESUME_HOST) {
1576 vcpu->mmio_needed = 1;
1577 goto out;
1578 }
1579 }
1580#endif
1581 } else if (vcpu->arch.osi_needed) {
1582 u64 *gprs = run->osi.gprs;
1583 int i;
1584
1585 for (i = 0; i < 32; i++)
1586 kvmppc_set_gpr(vcpu, i, gprs[i]);
1587 vcpu->arch.osi_needed = 0;
1588 } else if (vcpu->arch.hcall_needed) {
1589 int i;
1590
1591 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1592 for (i = 0; i < 9; ++i)
1593 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1594 vcpu->arch.hcall_needed = 0;
1595#ifdef CONFIG_BOOKE
1596 } else if (vcpu->arch.epr_needed) {
1597 kvmppc_set_epr(vcpu, run->epr.epr);
1598 vcpu->arch.epr_needed = 0;
1599#endif
1600 }
1601
1602 kvm_sigset_activate(vcpu);
1603
1604 if (run->immediate_exit)
1605 r = -EINTR;
1606 else
1607 r = kvmppc_vcpu_run(run, vcpu);
1608
1609 kvm_sigset_deactivate(vcpu);
1610
1611#ifdef CONFIG_ALTIVEC
1612out:
1613#endif
1614 vcpu_put(vcpu);
1615 return r;
1616}
1617
1618int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1619{
1620 if (irq->irq == KVM_INTERRUPT_UNSET) {
1621 kvmppc_core_dequeue_external(vcpu);
1622 return 0;
1623 }
1624
1625 kvmppc_core_queue_external(vcpu, irq);
1626
1627 kvm_vcpu_kick(vcpu);
1628
1629 return 0;
1630}
1631
1632static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1633 struct kvm_enable_cap *cap)
1634{
1635 int r;
1636
1637 if (cap->flags)
1638 return -EINVAL;
1639
1640 switch (cap->cap) {
1641 case KVM_CAP_PPC_OSI:
1642 r = 0;
1643 vcpu->arch.osi_enabled = true;
1644 break;
1645 case KVM_CAP_PPC_PAPR:
1646 r = 0;
1647 vcpu->arch.papr_enabled = true;
1648 break;
1649 case KVM_CAP_PPC_EPR:
1650 r = 0;
1651 if (cap->args[0])
1652 vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1653 else
1654 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1655 break;
1656#ifdef CONFIG_BOOKE
1657 case KVM_CAP_PPC_BOOKE_WATCHDOG:
1658 r = 0;
1659 vcpu->arch.watchdog_enabled = true;
1660 break;
1661#endif
1662#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1663 case KVM_CAP_SW_TLB: {
1664 struct kvm_config_tlb cfg;
1665 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1666
1667 r = -EFAULT;
1668 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1669 break;
1670
1671 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1672 break;
1673 }
1674#endif
1675#ifdef CONFIG_KVM_MPIC
1676 case KVM_CAP_IRQ_MPIC: {
1677 struct fd f;
1678 struct kvm_device *dev;
1679
1680 r = -EBADF;
1681 f = fdget(cap->args[0]);
1682 if (!f.file)
1683 break;
1684
1685 r = -EPERM;
1686 dev = kvm_device_from_filp(f.file);
1687 if (dev)
1688 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1689
1690 fdput(f);
1691 break;
1692 }
1693#endif
1694#ifdef CONFIG_KVM_XICS
1695 case KVM_CAP_IRQ_XICS: {
1696 struct fd f;
1697 struct kvm_device *dev;
1698
1699 r = -EBADF;
1700 f = fdget(cap->args[0]);
1701 if (!f.file)
1702 break;
1703
1704 r = -EPERM;
1705 dev = kvm_device_from_filp(f.file);
1706 if (dev) {
1707 if (xive_enabled())
1708 r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
1709 else
1710 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1711 }
1712
1713 fdput(f);
1714 break;
1715 }
1716#endif
1717#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1718 case KVM_CAP_PPC_FWNMI:
1719 r = -EINVAL;
1720 if (!is_kvmppc_hv_enabled(vcpu->kvm))
1721 break;
1722 r = 0;
1723 vcpu->kvm->arch.fwnmi_enabled = true;
1724 break;
1725#endif
1726 default:
1727 r = -EINVAL;
1728 break;
1729 }
1730
1731 if (!r)
1732 r = kvmppc_sanity_check(vcpu);
1733
1734 return r;
1735}
1736
1737bool kvm_arch_intc_initialized(struct kvm *kvm)
1738{
1739#ifdef CONFIG_KVM_MPIC
1740 if (kvm->arch.mpic)
1741 return true;
1742#endif
1743#ifdef CONFIG_KVM_XICS
1744 if (kvm->arch.xics || kvm->arch.xive)
1745 return true;
1746#endif
1747 return false;
1748}
1749
1750int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1751 struct kvm_mp_state *mp_state)
1752{
1753 return -EINVAL;
1754}
1755
1756int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1757 struct kvm_mp_state *mp_state)
1758{
1759 return -EINVAL;
1760}
1761
1762long kvm_arch_vcpu_async_ioctl(struct file *filp,
1763 unsigned int ioctl, unsigned long arg)
1764{
1765 struct kvm_vcpu *vcpu = filp->private_data;
1766 void __user *argp = (void __user *)arg;
1767
1768 if (ioctl == KVM_INTERRUPT) {
1769 struct kvm_interrupt irq;
1770 if (copy_from_user(&irq, argp, sizeof(irq)))
1771 return -EFAULT;
1772 return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1773 }
1774 return -ENOIOCTLCMD;
1775}
1776
1777long kvm_arch_vcpu_ioctl(struct file *filp,
1778 unsigned int ioctl, unsigned long arg)
1779{
1780 struct kvm_vcpu *vcpu = filp->private_data;
1781 void __user *argp = (void __user *)arg;
1782 long r;
1783
1784 vcpu_load(vcpu);
1785
1786 switch (ioctl) {
1787 case KVM_ENABLE_CAP:
1788 {
1789 struct kvm_enable_cap cap;
1790 r = -EFAULT;
1791 if (copy_from_user(&cap, argp, sizeof(cap)))
1792 goto out;
1793 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1794 break;
1795 }
1796
1797 case KVM_SET_ONE_REG:
1798 case KVM_GET_ONE_REG:
1799 {
1800 struct kvm_one_reg reg;
1801 r = -EFAULT;
1802 if (copy_from_user(®, argp, sizeof(reg)))
1803 goto out;
1804 if (ioctl == KVM_SET_ONE_REG)
1805 r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®);
1806 else
1807 r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®);
1808 break;
1809 }
1810
1811#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1812 case KVM_DIRTY_TLB: {
1813 struct kvm_dirty_tlb dirty;
1814 r = -EFAULT;
1815 if (copy_from_user(&dirty, argp, sizeof(dirty)))
1816 goto out;
1817 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
1818 break;
1819 }
1820#endif
1821 default:
1822 r = -EINVAL;
1823 }
1824
1825out:
1826 vcpu_put(vcpu);
1827 return r;
1828}
1829
1830int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1831{
1832 return VM_FAULT_SIGBUS;
1833}
1834
1835static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
1836{
1837 u32 inst_nop = 0x60000000;
1838#ifdef CONFIG_KVM_BOOKE_HV
1839 u32 inst_sc1 = 0x44000022;
1840 pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
1841 pvinfo->hcall[1] = cpu_to_be32(inst_nop);
1842 pvinfo->hcall[2] = cpu_to_be32(inst_nop);
1843 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
1844#else
1845 u32 inst_lis = 0x3c000000;
1846 u32 inst_ori = 0x60000000;
1847 u32 inst_sc = 0x44000002;
1848 u32 inst_imm_mask = 0xffff;
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859 pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
1860 pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
1861 pvinfo->hcall[2] = cpu_to_be32(inst_sc);
1862 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
1863#endif
1864
1865 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
1866
1867 return 0;
1868}
1869
1870int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
1871 bool line_status)
1872{
1873 if (!irqchip_in_kernel(kvm))
1874 return -ENXIO;
1875
1876 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
1877 irq_event->irq, irq_event->level,
1878 line_status);
1879 return 0;
1880}
1881
1882
1883static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
1884 struct kvm_enable_cap *cap)
1885{
1886 int r;
1887
1888 if (cap->flags)
1889 return -EINVAL;
1890
1891 switch (cap->cap) {
1892#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1893 case KVM_CAP_PPC_ENABLE_HCALL: {
1894 unsigned long hcall = cap->args[0];
1895
1896 r = -EINVAL;
1897 if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
1898 cap->args[1] > 1)
1899 break;
1900 if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
1901 break;
1902 if (cap->args[1])
1903 set_bit(hcall / 4, kvm->arch.enabled_hcalls);
1904 else
1905 clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
1906 r = 0;
1907 break;
1908 }
1909 case KVM_CAP_PPC_SMT: {
1910 unsigned long mode = cap->args[0];
1911 unsigned long flags = cap->args[1];
1912
1913 r = -EINVAL;
1914 if (kvm->arch.kvm_ops->set_smt_mode)
1915 r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
1916 break;
1917 }
1918#endif
1919 default:
1920 r = -EINVAL;
1921 break;
1922 }
1923
1924 return r;
1925}
1926
1927#ifdef CONFIG_PPC_BOOK3S_64
1928
1929
1930
1931
1932
1933
1934
1935
1936#ifdef CONFIG_PPC_PSERIES
1937static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
1938{
1939 struct h_cpu_char_result c;
1940 unsigned long rc;
1941
1942 if (!machine_is(pseries))
1943 return -ENOTTY;
1944
1945 rc = plpar_get_cpu_characteristics(&c);
1946 if (rc == H_SUCCESS) {
1947 cp->character = c.character;
1948 cp->behaviour = c.behaviour;
1949 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
1950 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
1951 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
1952 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
1953 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
1954 KVM_PPC_CPU_CHAR_BR_HINT_HONOURED |
1955 KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF |
1956 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
1957 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
1958 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
1959 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
1960 }
1961 return 0;
1962}
1963#else
1964static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
1965{
1966 return -ENOTTY;
1967}
1968#endif
1969
1970static inline bool have_fw_feat(struct device_node *fw_features,
1971 const char *state, const char *name)
1972{
1973 struct device_node *np;
1974 bool r = false;
1975
1976 np = of_get_child_by_name(fw_features, name);
1977 if (np) {
1978 r = of_property_read_bool(np, state);
1979 of_node_put(np);
1980 }
1981 return r;
1982}
1983
1984static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
1985{
1986 struct device_node *np, *fw_features;
1987 int r;
1988
1989 memset(cp, 0, sizeof(*cp));
1990 r = pseries_get_cpu_char(cp);
1991 if (r != -ENOTTY)
1992 return r;
1993
1994 np = of_find_node_by_name(NULL, "ibm,opal");
1995 if (np) {
1996 fw_features = of_get_child_by_name(np, "fw-features");
1997 of_node_put(np);
1998 if (!fw_features)
1999 return 0;
2000 if (have_fw_feat(fw_features, "enabled",
2001 "inst-spec-barrier-ori31,31,0"))
2002 cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31;
2003 if (have_fw_feat(fw_features, "enabled",
2004 "fw-bcctrl-serialized"))
2005 cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED;
2006 if (have_fw_feat(fw_features, "enabled",
2007 "inst-l1d-flush-ori30,30,0"))
2008 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30;
2009 if (have_fw_feat(fw_features, "enabled",
2010 "inst-l1d-flush-trig2"))
2011 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2;
2012 if (have_fw_feat(fw_features, "enabled",
2013 "fw-l1d-thread-split"))
2014 cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV;
2015 if (have_fw_feat(fw_features, "enabled",
2016 "fw-count-cache-disabled"))
2017 cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
2018 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2019 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2020 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2021 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2022 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2023 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
2024
2025 if (have_fw_feat(fw_features, "enabled",
2026 "speculation-policy-favor-security"))
2027 cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY;
2028 if (!have_fw_feat(fw_features, "disabled",
2029 "needs-l1d-flush-msr-pr-0-to-1"))
2030 cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR;
2031 if (!have_fw_feat(fw_features, "disabled",
2032 "needs-spec-barrier-for-bound-checks"))
2033 cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
2034 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2035 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2036 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
2037
2038 of_node_put(fw_features);
2039 }
2040
2041 return 0;
2042}
2043#endif
2044
2045long kvm_arch_vm_ioctl(struct file *filp,
2046 unsigned int ioctl, unsigned long arg)
2047{
2048 struct kvm *kvm __maybe_unused = filp->private_data;
2049 void __user *argp = (void __user *)arg;
2050 long r;
2051
2052 switch (ioctl) {
2053 case KVM_PPC_GET_PVINFO: {
2054 struct kvm_ppc_pvinfo pvinfo;
2055 memset(&pvinfo, 0, sizeof(pvinfo));
2056 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
2057 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
2058 r = -EFAULT;
2059 goto out;
2060 }
2061
2062 break;
2063 }
2064 case KVM_ENABLE_CAP:
2065 {
2066 struct kvm_enable_cap cap;
2067 r = -EFAULT;
2068 if (copy_from_user(&cap, argp, sizeof(cap)))
2069 goto out;
2070 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
2071 break;
2072 }
2073#ifdef CONFIG_SPAPR_TCE_IOMMU
2074 case KVM_CREATE_SPAPR_TCE_64: {
2075 struct kvm_create_spapr_tce_64 create_tce_64;
2076
2077 r = -EFAULT;
2078 if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
2079 goto out;
2080 if (create_tce_64.flags) {
2081 r = -EINVAL;
2082 goto out;
2083 }
2084 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2085 goto out;
2086 }
2087 case KVM_CREATE_SPAPR_TCE: {
2088 struct kvm_create_spapr_tce create_tce;
2089 struct kvm_create_spapr_tce_64 create_tce_64;
2090
2091 r = -EFAULT;
2092 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
2093 goto out;
2094
2095 create_tce_64.liobn = create_tce.liobn;
2096 create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
2097 create_tce_64.offset = 0;
2098 create_tce_64.size = create_tce.window_size >>
2099 IOMMU_PAGE_SHIFT_4K;
2100 create_tce_64.flags = 0;
2101 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2102 goto out;
2103 }
2104#endif
2105#ifdef CONFIG_PPC_BOOK3S_64
2106 case KVM_PPC_GET_SMMU_INFO: {
2107 struct kvm_ppc_smmu_info info;
2108 struct kvm *kvm = filp->private_data;
2109
2110 memset(&info, 0, sizeof(info));
2111 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
2112 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2113 r = -EFAULT;
2114 break;
2115 }
2116 case KVM_PPC_RTAS_DEFINE_TOKEN: {
2117 struct kvm *kvm = filp->private_data;
2118
2119 r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
2120 break;
2121 }
2122 case KVM_PPC_CONFIGURE_V3_MMU: {
2123 struct kvm *kvm = filp->private_data;
2124 struct kvm_ppc_mmuv3_cfg cfg;
2125
2126 r = -EINVAL;
2127 if (!kvm->arch.kvm_ops->configure_mmu)
2128 goto out;
2129 r = -EFAULT;
2130 if (copy_from_user(&cfg, argp, sizeof(cfg)))
2131 goto out;
2132 r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg);
2133 break;
2134 }
2135 case KVM_PPC_GET_RMMU_INFO: {
2136 struct kvm *kvm = filp->private_data;
2137 struct kvm_ppc_rmmu_info info;
2138
2139 r = -EINVAL;
2140 if (!kvm->arch.kvm_ops->get_rmmu_info)
2141 goto out;
2142 r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info);
2143 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2144 r = -EFAULT;
2145 break;
2146 }
2147 case KVM_PPC_GET_CPU_CHAR: {
2148 struct kvm_ppc_cpu_char cpuchar;
2149
2150 r = kvmppc_get_cpu_char(&cpuchar);
2151 if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar)))
2152 r = -EFAULT;
2153 break;
2154 }
2155 default: {
2156 struct kvm *kvm = filp->private_data;
2157 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
2158 }
2159#else
2160 default:
2161 r = -ENOTTY;
2162#endif
2163 }
2164out:
2165 return r;
2166}
2167
2168static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
2169static unsigned long nr_lpids;
2170
2171long kvmppc_alloc_lpid(void)
2172{
2173 long lpid;
2174
2175 do {
2176 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
2177 if (lpid >= nr_lpids) {
2178 pr_err("%s: No LPIDs free\n", __func__);
2179 return -ENOMEM;
2180 }
2181 } while (test_and_set_bit(lpid, lpid_inuse));
2182
2183 return lpid;
2184}
2185EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
2186
2187void kvmppc_claim_lpid(long lpid)
2188{
2189 set_bit(lpid, lpid_inuse);
2190}
2191EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
2192
2193void kvmppc_free_lpid(long lpid)
2194{
2195 clear_bit(lpid, lpid_inuse);
2196}
2197EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
2198
2199void kvmppc_init_lpid(unsigned long nr_lpids_param)
2200{
2201 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
2202 memset(lpid_inuse, 0, sizeof(lpid_inuse));
2203}
2204EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
2205
2206int kvm_arch_init(void *opaque)
2207{
2208 return 0;
2209}
2210
2211EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);
2212