1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/errno.h>
22#include <linux/err.h>
23#include <linux/kvm_host.h>
24#include <linux/vmalloc.h>
25#include <linux/hrtimer.h>
26#include <linux/sched/signal.h>
27#include <linux/fs.h>
28#include <linux/slab.h>
29#include <linux/file.h>
30#include <linux/module.h>
31#include <linux/irqbypass.h>
32#include <linux/kvm_irqfd.h>
33#include <asm/cputable.h>
34#include <linux/uaccess.h>
35#include <asm/kvm_ppc.h>
36#include <asm/tlbflush.h>
37#include <asm/cputhreads.h>
38#include <asm/irqflags.h>
39#include <asm/iommu.h>
40#include <asm/switch_to.h>
41#include <asm/xive.h>
42
43#include "timing.h"
44#include "irq.h"
45#include "../mm/mmu_decl.h"
46
47#define CREATE_TRACE_POINTS
48#include "trace.h"
49
50struct kvmppc_ops *kvmppc_hv_ops;
51EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
52struct kvmppc_ops *kvmppc_pr_ops;
53EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
54
55
56int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
57{
58 return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
59}
60
61int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
62{
63 return 1;
64}
65
66
67
68
69
70
71
72
73
74
75int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
76{
77 int r;
78
79 WARN_ON(irqs_disabled());
80 hard_irq_disable();
81
82 while (true) {
83 if (need_resched()) {
84 local_irq_enable();
85 cond_resched();
86 hard_irq_disable();
87 continue;
88 }
89
90 if (signal_pending(current)) {
91 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
92 vcpu->run->exit_reason = KVM_EXIT_INTR;
93 r = -EINTR;
94 break;
95 }
96
97 vcpu->mode = IN_GUEST_MODE;
98
99
100
101
102
103
104
105
106
107
108 smp_mb();
109
110 if (kvm_request_pending(vcpu)) {
111
112 local_irq_enable();
113 trace_kvm_check_requests(vcpu);
114 r = kvmppc_core_check_requests(vcpu);
115 hard_irq_disable();
116 if (r > 0)
117 continue;
118 break;
119 }
120
121 if (kvmppc_core_prepare_to_enter(vcpu)) {
122
123
124 continue;
125 }
126
127 guest_enter_irqoff();
128 return 1;
129 }
130
131
132 local_irq_enable();
133 return r;
134}
135EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
136
137#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
138static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
139{
140 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
141 int i;
142
143 shared->sprg0 = swab64(shared->sprg0);
144 shared->sprg1 = swab64(shared->sprg1);
145 shared->sprg2 = swab64(shared->sprg2);
146 shared->sprg3 = swab64(shared->sprg3);
147 shared->srr0 = swab64(shared->srr0);
148 shared->srr1 = swab64(shared->srr1);
149 shared->dar = swab64(shared->dar);
150 shared->msr = swab64(shared->msr);
151 shared->dsisr = swab32(shared->dsisr);
152 shared->int_pending = swab32(shared->int_pending);
153 for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
154 shared->sr[i] = swab32(shared->sr[i]);
155}
156#endif
157
158int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
159{
160 int nr = kvmppc_get_gpr(vcpu, 11);
161 int r;
162 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
163 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
164 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
165 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
166 unsigned long r2 = 0;
167
168 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
169
170 param1 &= 0xffffffff;
171 param2 &= 0xffffffff;
172 param3 &= 0xffffffff;
173 param4 &= 0xffffffff;
174 }
175
176 switch (nr) {
177 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
178 {
179#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
180
181 int shared_big_endian = true;
182 if (vcpu->arch.intr_msr & MSR_LE)
183 shared_big_endian = false;
184 if (shared_big_endian != vcpu->arch.shared_big_endian)
185 kvmppc_swab_shared(vcpu);
186 vcpu->arch.shared_big_endian = shared_big_endian;
187#endif
188
189 if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
190
191
192
193
194
195 vcpu->arch.disable_kernel_nx = true;
196 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
197 }
198
199 vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
200 vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
201
202#ifdef CONFIG_PPC_64K_PAGES
203
204
205
206
207 if ((vcpu->arch.magic_page_pa & 0xf000) !=
208 ((ulong)vcpu->arch.shared & 0xf000)) {
209 void *old_shared = vcpu->arch.shared;
210 ulong shared = (ulong)vcpu->arch.shared;
211 void *new_shared;
212
213 shared &= PAGE_MASK;
214 shared |= vcpu->arch.magic_page_pa & 0xf000;
215 new_shared = (void*)shared;
216 memcpy(new_shared, old_shared, 0x1000);
217 vcpu->arch.shared = new_shared;
218 }
219#endif
220
221 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
222
223 r = EV_SUCCESS;
224 break;
225 }
226 case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
227 r = EV_SUCCESS;
228#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
229 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
230#endif
231
232
233 break;
234 case EV_HCALL_TOKEN(EV_IDLE):
235 r = EV_SUCCESS;
236 kvm_vcpu_block(vcpu);
237 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
238 break;
239 default:
240 r = EV_UNIMPLEMENTED;
241 break;
242 }
243
244 kvmppc_set_gpr(vcpu, 4, r2);
245
246 return r;
247}
248EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
249
250int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
251{
252 int r = false;
253
254
255 if (!vcpu->arch.pvr)
256 goto out;
257
258
259 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
260 goto out;
261
262
263 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
264 goto out;
265
266#ifdef CONFIG_KVM_BOOKE_HV
267 if (!cpu_has_feature(CPU_FTR_EMB_HV))
268 goto out;
269#endif
270
271 r = true;
272
273out:
274 vcpu->arch.sane = r;
275 return r ? 0 : -EINVAL;
276}
277EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
278
279int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
280{
281 enum emulation_result er;
282 int r;
283
284 er = kvmppc_emulate_loadstore(vcpu);
285 switch (er) {
286 case EMULATE_DONE:
287
288
289 r = RESUME_GUEST_NV;
290 break;
291 case EMULATE_AGAIN:
292 r = RESUME_GUEST;
293 break;
294 case EMULATE_DO_MMIO:
295 run->exit_reason = KVM_EXIT_MMIO;
296
297
298
299
300 r = RESUME_HOST_NV;
301 break;
302 case EMULATE_FAIL:
303 {
304 u32 last_inst;
305
306 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
307
308 pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
309 r = RESUME_HOST;
310 break;
311 }
312 default:
313 WARN_ON(1);
314 r = RESUME_GUEST;
315 }
316
317 return r;
318}
319EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
320
321int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
322 bool data)
323{
324 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
325 struct kvmppc_pte pte;
326 int r;
327
328 vcpu->stat.st++;
329
330 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
331 XLATE_WRITE, &pte);
332 if (r < 0)
333 return r;
334
335 *eaddr = pte.raddr;
336
337 if (!pte.may_write)
338 return -EPERM;
339
340
341 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
342 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
343 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
344 void *magic = vcpu->arch.shared;
345 magic += pte.eaddr & 0xfff;
346 memcpy(magic, ptr, size);
347 return EMULATE_DONE;
348 }
349
350 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
351 return EMULATE_DO_MMIO;
352
353 return EMULATE_DONE;
354}
355EXPORT_SYMBOL_GPL(kvmppc_st);
356
357int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
358 bool data)
359{
360 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
361 struct kvmppc_pte pte;
362 int rc;
363
364 vcpu->stat.ld++;
365
366 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
367 XLATE_READ, &pte);
368 if (rc)
369 return rc;
370
371 *eaddr = pte.raddr;
372
373 if (!pte.may_read)
374 return -EPERM;
375
376 if (!data && !pte.may_execute)
377 return -ENOEXEC;
378
379
380 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
381 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
382 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
383 void *magic = vcpu->arch.shared;
384 magic += pte.eaddr & 0xfff;
385 memcpy(ptr, magic, size);
386 return EMULATE_DONE;
387 }
388
389 if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size))
390 return EMULATE_DO_MMIO;
391
392 return EMULATE_DONE;
393}
394EXPORT_SYMBOL_GPL(kvmppc_ld);
395
396int kvm_arch_hardware_enable(void)
397{
398 return 0;
399}
400
401int kvm_arch_hardware_setup(void)
402{
403 return 0;
404}
405
406void kvm_arch_check_processor_compat(void *rtn)
407{
408 *(int *)rtn = kvmppc_core_check_processor_compat();
409}
410
411int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
412{
413 struct kvmppc_ops *kvm_ops = NULL;
414
415
416
417 if (type == 0) {
418 if (kvmppc_hv_ops)
419 kvm_ops = kvmppc_hv_ops;
420 else
421 kvm_ops = kvmppc_pr_ops;
422 if (!kvm_ops)
423 goto err_out;
424 } else if (type == KVM_VM_PPC_HV) {
425 if (!kvmppc_hv_ops)
426 goto err_out;
427 kvm_ops = kvmppc_hv_ops;
428 } else if (type == KVM_VM_PPC_PR) {
429 if (!kvmppc_pr_ops)
430 goto err_out;
431 kvm_ops = kvmppc_pr_ops;
432 } else
433 goto err_out;
434
435 if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
436 return -ENOENT;
437
438 kvm->arch.kvm_ops = kvm_ops;
439 return kvmppc_core_init_vm(kvm);
440err_out:
441 return -EINVAL;
442}
443
444bool kvm_arch_has_vcpu_debugfs(void)
445{
446 return false;
447}
448
449int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
450{
451 return 0;
452}
453
454void kvm_arch_destroy_vm(struct kvm *kvm)
455{
456 unsigned int i;
457 struct kvm_vcpu *vcpu;
458
459#ifdef CONFIG_KVM_XICS
460
461
462
463
464
465 if (is_kvmppc_hv_enabled(kvm))
466 kick_all_cpus_sync();
467#endif
468
469 kvm_for_each_vcpu(i, vcpu, kvm)
470 kvm_arch_vcpu_free(vcpu);
471
472 mutex_lock(&kvm->lock);
473 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
474 kvm->vcpus[i] = NULL;
475
476 atomic_set(&kvm->online_vcpus, 0);
477
478 kvmppc_core_destroy_vm(kvm);
479
480 mutex_unlock(&kvm->lock);
481
482
483 module_put(kvm->arch.kvm_ops->owner);
484}
485
486int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
487{
488 int r;
489
490 int hv_enabled = kvmppc_hv_ops ? 1 : 0;
491
492 if (kvm) {
493
494
495
496
497 hv_enabled = is_kvmppc_hv_enabled(kvm);
498 }
499
500 switch (ext) {
501#ifdef CONFIG_BOOKE
502 case KVM_CAP_PPC_BOOKE_SREGS:
503 case KVM_CAP_PPC_BOOKE_WATCHDOG:
504 case KVM_CAP_PPC_EPR:
505#else
506 case KVM_CAP_PPC_SEGSTATE:
507 case KVM_CAP_PPC_HIOR:
508 case KVM_CAP_PPC_PAPR:
509#endif
510 case KVM_CAP_PPC_UNSET_IRQ:
511 case KVM_CAP_PPC_IRQ_LEVEL:
512 case KVM_CAP_ENABLE_CAP:
513 case KVM_CAP_ENABLE_CAP_VM:
514 case KVM_CAP_ONE_REG:
515 case KVM_CAP_IOEVENTFD:
516 case KVM_CAP_DEVICE_CTRL:
517 case KVM_CAP_IMMEDIATE_EXIT:
518 r = 1;
519 break;
520 case KVM_CAP_PPC_PAIRED_SINGLES:
521 case KVM_CAP_PPC_OSI:
522 case KVM_CAP_PPC_GET_PVINFO:
523#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
524 case KVM_CAP_SW_TLB:
525#endif
526
527 r = !hv_enabled;
528 break;
529#ifdef CONFIG_KVM_MPIC
530 case KVM_CAP_IRQ_MPIC:
531 r = 1;
532 break;
533#endif
534
535#ifdef CONFIG_PPC_BOOK3S_64
536 case KVM_CAP_SPAPR_TCE:
537 case KVM_CAP_SPAPR_TCE_64:
538
539 case KVM_CAP_SPAPR_TCE_VFIO:
540 case KVM_CAP_PPC_RTAS:
541 case KVM_CAP_PPC_FIXUP_HCALL:
542 case KVM_CAP_PPC_ENABLE_HCALL:
543#ifdef CONFIG_KVM_XICS
544 case KVM_CAP_IRQ_XICS:
545#endif
546 r = 1;
547 break;
548
549 case KVM_CAP_PPC_ALLOC_HTAB:
550 r = hv_enabled;
551 break;
552#endif
553#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
554 case KVM_CAP_PPC_SMT:
555 r = 0;
556 if (kvm) {
557 if (kvm->arch.emul_smt_mode > 1)
558 r = kvm->arch.emul_smt_mode;
559 else
560 r = kvm->arch.smt_mode;
561 } else if (hv_enabled) {
562 if (cpu_has_feature(CPU_FTR_ARCH_300))
563 r = 1;
564 else
565 r = threads_per_subcore;
566 }
567 break;
568 case KVM_CAP_PPC_SMT_POSSIBLE:
569 r = 1;
570 if (hv_enabled) {
571 if (!cpu_has_feature(CPU_FTR_ARCH_300))
572 r = ((threads_per_subcore << 1) - 1);
573 else
574
575 r = 8 | 4 | 2 | 1;
576 }
577 break;
578 case KVM_CAP_PPC_RMA:
579 r = 0;
580 break;
581 case KVM_CAP_PPC_HWRNG:
582 r = kvmppc_hwrng_present();
583 break;
584 case KVM_CAP_PPC_MMU_RADIX:
585 r = !!(hv_enabled && radix_enabled());
586 break;
587 case KVM_CAP_PPC_MMU_HASH_V3:
588 r = !!(hv_enabled && !radix_enabled() &&
589 cpu_has_feature(CPU_FTR_ARCH_300));
590 break;
591#endif
592 case KVM_CAP_SYNC_MMU:
593#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
594 r = hv_enabled;
595#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
596 r = 1;
597#else
598 r = 0;
599#endif
600 break;
601#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
602 case KVM_CAP_PPC_HTAB_FD:
603 r = hv_enabled;
604 break;
605#endif
606 case KVM_CAP_NR_VCPUS:
607
608
609
610
611
612
613 if (hv_enabled)
614 r = num_present_cpus();
615 else
616 r = num_online_cpus();
617 break;
618 case KVM_CAP_NR_MEMSLOTS:
619 r = KVM_USER_MEM_SLOTS;
620 break;
621 case KVM_CAP_MAX_VCPUS:
622 r = KVM_MAX_VCPUS;
623 break;
624#ifdef CONFIG_PPC_BOOK3S_64
625 case KVM_CAP_PPC_GET_SMMU_INFO:
626 r = 1;
627 break;
628 case KVM_CAP_SPAPR_MULTITCE:
629 r = 1;
630 break;
631 case KVM_CAP_SPAPR_RESIZE_HPT:
632
633 r = !!hv_enabled && !cpu_has_feature(CPU_FTR_ARCH_300);
634 break;
635#endif
636#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
637 case KVM_CAP_PPC_FWNMI:
638 r = hv_enabled;
639 break;
640#endif
641 case KVM_CAP_PPC_HTM:
642 r = cpu_has_feature(CPU_FTR_TM_COMP) &&
643 is_kvmppc_hv_enabled(kvm);
644 break;
645 default:
646 r = 0;
647 break;
648 }
649 return r;
650
651}
652
653long kvm_arch_dev_ioctl(struct file *filp,
654 unsigned int ioctl, unsigned long arg)
655{
656 return -EINVAL;
657}
658
659void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
660 struct kvm_memory_slot *dont)
661{
662 kvmppc_core_free_memslot(kvm, free, dont);
663}
664
665int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
666 unsigned long npages)
667{
668 return kvmppc_core_create_memslot(kvm, slot, npages);
669}
670
671int kvm_arch_prepare_memory_region(struct kvm *kvm,
672 struct kvm_memory_slot *memslot,
673 const struct kvm_userspace_memory_region *mem,
674 enum kvm_mr_change change)
675{
676 return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
677}
678
679void kvm_arch_commit_memory_region(struct kvm *kvm,
680 const struct kvm_userspace_memory_region *mem,
681 const struct kvm_memory_slot *old,
682 const struct kvm_memory_slot *new,
683 enum kvm_mr_change change)
684{
685 kvmppc_core_commit_memory_region(kvm, mem, old, new);
686}
687
688void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
689 struct kvm_memory_slot *slot)
690{
691 kvmppc_core_flush_memslot(kvm, slot);
692}
693
694struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
695{
696 struct kvm_vcpu *vcpu;
697 vcpu = kvmppc_core_vcpu_create(kvm, id);
698 if (!IS_ERR(vcpu)) {
699 vcpu->arch.wqp = &vcpu->wq;
700 kvmppc_create_vcpu_debugfs(vcpu, id);
701 }
702 return vcpu;
703}
704
705void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
706{
707}
708
709void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
710{
711
712 hrtimer_cancel(&vcpu->arch.dec_timer);
713
714 kvmppc_remove_vcpu_debugfs(vcpu);
715
716 switch (vcpu->arch.irq_type) {
717 case KVMPPC_IRQ_MPIC:
718 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
719 break;
720 case KVMPPC_IRQ_XICS:
721 if (xive_enabled())
722 kvmppc_xive_cleanup_vcpu(vcpu);
723 else
724 kvmppc_xics_free_icp(vcpu);
725 break;
726 }
727
728 kvmppc_core_vcpu_free(vcpu);
729}
730
731void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
732{
733 kvm_arch_vcpu_free(vcpu);
734}
735
736int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
737{
738 return kvmppc_core_pending_dec(vcpu);
739}
740
741static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
742{
743 struct kvm_vcpu *vcpu;
744
745 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
746 kvmppc_decrementer_func(vcpu);
747
748 return HRTIMER_NORESTART;
749}
750
751int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
752{
753 int ret;
754
755 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
756 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
757 vcpu->arch.dec_expires = ~(u64)0;
758
759#ifdef CONFIG_KVM_EXIT_TIMING
760 mutex_init(&vcpu->arch.exit_timing_lock);
761#endif
762 ret = kvmppc_subarch_vcpu_init(vcpu);
763 return ret;
764}
765
766void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
767{
768 kvmppc_mmu_destroy(vcpu);
769 kvmppc_subarch_vcpu_uninit(vcpu);
770}
771
772void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
773{
774#ifdef CONFIG_BOOKE
775
776
777
778
779
780
781
782 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
783#endif
784 kvmppc_core_vcpu_load(vcpu, cpu);
785}
786
787void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
788{
789 kvmppc_core_vcpu_put(vcpu);
790#ifdef CONFIG_BOOKE
791 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
792#endif
793}
794
795
796
797
798
799
800
801bool kvm_arch_has_irq_bypass(void)
802{
803 return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) ||
804 (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer));
805}
806
807int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
808 struct irq_bypass_producer *prod)
809{
810 struct kvm_kernel_irqfd *irqfd =
811 container_of(cons, struct kvm_kernel_irqfd, consumer);
812 struct kvm *kvm = irqfd->kvm;
813
814 if (kvm->arch.kvm_ops->irq_bypass_add_producer)
815 return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
816
817 return 0;
818}
819
820void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
821 struct irq_bypass_producer *prod)
822{
823 struct kvm_kernel_irqfd *irqfd =
824 container_of(cons, struct kvm_kernel_irqfd, consumer);
825 struct kvm *kvm = irqfd->kvm;
826
827 if (kvm->arch.kvm_ops->irq_bypass_del_producer)
828 kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
829}
830
831#ifdef CONFIG_VSX
832static inline int kvmppc_get_vsr_dword_offset(int index)
833{
834 int offset;
835
836 if ((index != 0) && (index != 1))
837 return -1;
838
839#ifdef __BIG_ENDIAN
840 offset = index;
841#else
842 offset = 1 - index;
843#endif
844
845 return offset;
846}
847
848static inline int kvmppc_get_vsr_word_offset(int index)
849{
850 int offset;
851
852 if ((index > 3) || (index < 0))
853 return -1;
854
855#ifdef __BIG_ENDIAN
856 offset = index;
857#else
858 offset = 3 - index;
859#endif
860 return offset;
861}
862
863static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
864 u64 gpr)
865{
866 union kvmppc_one_reg val;
867 int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
868 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
869
870 if (offset == -1)
871 return;
872
873 if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
874 val.vval = VCPU_VSX_VR(vcpu, index);
875 val.vsxval[offset] = gpr;
876 VCPU_VSX_VR(vcpu, index) = val.vval;
877 } else {
878 VCPU_VSX_FPR(vcpu, index, offset) = gpr;
879 }
880}
881
882static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
883 u64 gpr)
884{
885 union kvmppc_one_reg val;
886 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
887
888 if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
889 val.vval = VCPU_VSX_VR(vcpu, index);
890 val.vsxval[0] = gpr;
891 val.vsxval[1] = gpr;
892 VCPU_VSX_VR(vcpu, index) = val.vval;
893 } else {
894 VCPU_VSX_FPR(vcpu, index, 0) = gpr;
895 VCPU_VSX_FPR(vcpu, index, 1) = gpr;
896 }
897}
898
899static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
900 u32 gpr32)
901{
902 union kvmppc_one_reg val;
903 int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
904 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
905 int dword_offset, word_offset;
906
907 if (offset == -1)
908 return;
909
910 if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
911 val.vval = VCPU_VSX_VR(vcpu, index);
912 val.vsx32val[offset] = gpr32;
913 VCPU_VSX_VR(vcpu, index) = val.vval;
914 } else {
915 dword_offset = offset / 2;
916 word_offset = offset % 2;
917 val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
918 val.vsx32val[word_offset] = gpr32;
919 VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
920 }
921}
922#endif
923
924#ifdef CONFIG_PPC_FPU
925static inline u64 sp_to_dp(u32 fprs)
926{
927 u64 fprd;
928
929 preempt_disable();
930 enable_kernel_fp();
931 asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m" (fprd) : "m" (fprs)
932 : "fr0");
933 preempt_enable();
934 return fprd;
935}
936
937static inline u32 dp_to_sp(u64 fprd)
938{
939 u32 fprs;
940
941 preempt_disable();
942 enable_kernel_fp();
943 asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m" (fprs) : "m" (fprd)
944 : "fr0");
945 preempt_enable();
946 return fprs;
947}
948
949#else
950#define sp_to_dp(x) (x)
951#define dp_to_sp(x) (x)
952#endif
953
954static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
955 struct kvm_run *run)
956{
957 u64 uninitialized_var(gpr);
958
959 if (run->mmio.len > sizeof(gpr)) {
960 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
961 return;
962 }
963
964 if (!vcpu->arch.mmio_host_swabbed) {
965 switch (run->mmio.len) {
966 case 8: gpr = *(u64 *)run->mmio.data; break;
967 case 4: gpr = *(u32 *)run->mmio.data; break;
968 case 2: gpr = *(u16 *)run->mmio.data; break;
969 case 1: gpr = *(u8 *)run->mmio.data; break;
970 }
971 } else {
972 switch (run->mmio.len) {
973 case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
974 case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
975 case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
976 case 1: gpr = *(u8 *)run->mmio.data; break;
977 }
978 }
979
980
981 if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
982 gpr = sp_to_dp(gpr);
983
984 if (vcpu->arch.mmio_sign_extend) {
985 switch (run->mmio.len) {
986#ifdef CONFIG_PPC64
987 case 4:
988 gpr = (s64)(s32)gpr;
989 break;
990#endif
991 case 2:
992 gpr = (s64)(s16)gpr;
993 break;
994 case 1:
995 gpr = (s64)(s8)gpr;
996 break;
997 }
998 }
999
1000 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
1001 case KVM_MMIO_REG_GPR:
1002 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
1003 break;
1004 case KVM_MMIO_REG_FPR:
1005 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1006 break;
1007#ifdef CONFIG_PPC_BOOK3S
1008 case KVM_MMIO_REG_QPR:
1009 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1010 break;
1011 case KVM_MMIO_REG_FQPR:
1012 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1013 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1014 break;
1015#endif
1016#ifdef CONFIG_VSX
1017 case KVM_MMIO_REG_VSX:
1018 if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_DWORD)
1019 kvmppc_set_vsr_dword(vcpu, gpr);
1020 else if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_WORD)
1021 kvmppc_set_vsr_word(vcpu, gpr);
1022 else if (vcpu->arch.mmio_vsx_copy_type ==
1023 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
1024 kvmppc_set_vsr_dword_dump(vcpu, gpr);
1025 break;
1026#endif
1027 default:
1028 BUG();
1029 }
1030}
1031
1032static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1033 unsigned int rt, unsigned int bytes,
1034 int is_default_endian, int sign_extend)
1035{
1036 int idx, ret;
1037 bool host_swabbed;
1038
1039
1040 if (kvmppc_need_byteswap(vcpu)) {
1041 host_swabbed = is_default_endian;
1042 } else {
1043 host_swabbed = !is_default_endian;
1044 }
1045
1046 if (bytes > sizeof(run->mmio.data)) {
1047 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
1048 run->mmio.len);
1049 }
1050
1051 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1052 run->mmio.len = bytes;
1053 run->mmio.is_write = 0;
1054
1055 vcpu->arch.io_gpr = rt;
1056 vcpu->arch.mmio_host_swabbed = host_swabbed;
1057 vcpu->mmio_needed = 1;
1058 vcpu->mmio_is_write = 0;
1059 vcpu->arch.mmio_sign_extend = sign_extend;
1060
1061 idx = srcu_read_lock(&vcpu->kvm->srcu);
1062
1063 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1064 bytes, &run->mmio.data);
1065
1066 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1067
1068 if (!ret) {
1069 kvmppc_complete_mmio_load(vcpu, run);
1070 vcpu->mmio_needed = 0;
1071 return EMULATE_DONE;
1072 }
1073
1074 return EMULATE_DO_MMIO;
1075}
1076
1077int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1078 unsigned int rt, unsigned int bytes,
1079 int is_default_endian)
1080{
1081 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0);
1082}
1083EXPORT_SYMBOL_GPL(kvmppc_handle_load);
1084
1085
1086int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
1087 unsigned int rt, unsigned int bytes,
1088 int is_default_endian)
1089{
1090 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1);
1091}
1092
1093#ifdef CONFIG_VSX
1094int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1095 unsigned int rt, unsigned int bytes,
1096 int is_default_endian, int mmio_sign_extend)
1097{
1098 enum emulation_result emulated = EMULATE_DONE;
1099
1100
1101 if ( (vcpu->arch.mmio_vsx_copy_nums > 4) ||
1102 (vcpu->arch.mmio_vsx_copy_nums < 0) ) {
1103 return EMULATE_FAIL;
1104 }
1105
1106 while (vcpu->arch.mmio_vsx_copy_nums) {
1107 emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
1108 is_default_endian, mmio_sign_extend);
1109
1110 if (emulated != EMULATE_DONE)
1111 break;
1112
1113 vcpu->arch.paddr_accessed += run->mmio.len;
1114
1115 vcpu->arch.mmio_vsx_copy_nums--;
1116 vcpu->arch.mmio_vsx_offset++;
1117 }
1118 return emulated;
1119}
1120#endif
1121
1122int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
1123 u64 val, unsigned int bytes, int is_default_endian)
1124{
1125 void *data = run->mmio.data;
1126 int idx, ret;
1127 bool host_swabbed;
1128
1129
1130 if (kvmppc_need_byteswap(vcpu)) {
1131 host_swabbed = is_default_endian;
1132 } else {
1133 host_swabbed = !is_default_endian;
1134 }
1135
1136 if (bytes > sizeof(run->mmio.data)) {
1137 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
1138 run->mmio.len);
1139 }
1140
1141 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1142 run->mmio.len = bytes;
1143 run->mmio.is_write = 1;
1144 vcpu->mmio_needed = 1;
1145 vcpu->mmio_is_write = 1;
1146
1147 if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
1148 val = dp_to_sp(val);
1149
1150
1151 if (!host_swabbed) {
1152 switch (bytes) {
1153 case 8: *(u64 *)data = val; break;
1154 case 4: *(u32 *)data = val; break;
1155 case 2: *(u16 *)data = val; break;
1156 case 1: *(u8 *)data = val; break;
1157 }
1158 } else {
1159 switch (bytes) {
1160 case 8: *(u64 *)data = swab64(val); break;
1161 case 4: *(u32 *)data = swab32(val); break;
1162 case 2: *(u16 *)data = swab16(val); break;
1163 case 1: *(u8 *)data = val; break;
1164 }
1165 }
1166
1167 idx = srcu_read_lock(&vcpu->kvm->srcu);
1168
1169 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1170 bytes, &run->mmio.data);
1171
1172 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1173
1174 if (!ret) {
1175 vcpu->mmio_needed = 0;
1176 return EMULATE_DONE;
1177 }
1178
1179 return EMULATE_DO_MMIO;
1180}
1181EXPORT_SYMBOL_GPL(kvmppc_handle_store);
1182
1183#ifdef CONFIG_VSX
1184static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
1185{
1186 u32 dword_offset, word_offset;
1187 union kvmppc_one_reg reg;
1188 int vsx_offset = 0;
1189 int copy_type = vcpu->arch.mmio_vsx_copy_type;
1190 int result = 0;
1191
1192 switch (copy_type) {
1193 case KVMPPC_VSX_COPY_DWORD:
1194 vsx_offset =
1195 kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
1196
1197 if (vsx_offset == -1) {
1198 result = -1;
1199 break;
1200 }
1201
1202 if (!vcpu->arch.mmio_vsx_tx_sx_enabled) {
1203 *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
1204 } else {
1205 reg.vval = VCPU_VSX_VR(vcpu, rs);
1206 *val = reg.vsxval[vsx_offset];
1207 }
1208 break;
1209
1210 case KVMPPC_VSX_COPY_WORD:
1211 vsx_offset =
1212 kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
1213
1214 if (vsx_offset == -1) {
1215 result = -1;
1216 break;
1217 }
1218
1219 if (!vcpu->arch.mmio_vsx_tx_sx_enabled) {
1220 dword_offset = vsx_offset / 2;
1221 word_offset = vsx_offset % 2;
1222 reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
1223 *val = reg.vsx32val[word_offset];
1224 } else {
1225 reg.vval = VCPU_VSX_VR(vcpu, rs);
1226 *val = reg.vsx32val[vsx_offset];
1227 }
1228 break;
1229
1230 default:
1231 result = -1;
1232 break;
1233 }
1234
1235 return result;
1236}
1237
1238int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
1239 int rs, unsigned int bytes, int is_default_endian)
1240{
1241 u64 val;
1242 enum emulation_result emulated = EMULATE_DONE;
1243
1244 vcpu->arch.io_gpr = rs;
1245
1246
1247 if ( (vcpu->arch.mmio_vsx_copy_nums > 4) ||
1248 (vcpu->arch.mmio_vsx_copy_nums < 0) ) {
1249 return EMULATE_FAIL;
1250 }
1251
1252 while (vcpu->arch.mmio_vsx_copy_nums) {
1253 if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
1254 return EMULATE_FAIL;
1255
1256 emulated = kvmppc_handle_store(run, vcpu,
1257 val, bytes, is_default_endian);
1258
1259 if (emulated != EMULATE_DONE)
1260 break;
1261
1262 vcpu->arch.paddr_accessed += run->mmio.len;
1263
1264 vcpu->arch.mmio_vsx_copy_nums--;
1265 vcpu->arch.mmio_vsx_offset++;
1266 }
1267
1268 return emulated;
1269}
1270
1271static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
1272 struct kvm_run *run)
1273{
1274 enum emulation_result emulated = EMULATE_FAIL;
1275 int r;
1276
1277 vcpu->arch.paddr_accessed += run->mmio.len;
1278
1279 if (!vcpu->mmio_is_write) {
1280 emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr,
1281 run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
1282 } else {
1283 emulated = kvmppc_handle_vsx_store(run, vcpu,
1284 vcpu->arch.io_gpr, run->mmio.len, 1);
1285 }
1286
1287 switch (emulated) {
1288 case EMULATE_DO_MMIO:
1289 run->exit_reason = KVM_EXIT_MMIO;
1290 r = RESUME_HOST;
1291 break;
1292 case EMULATE_FAIL:
1293 pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
1294 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1295 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1296 r = RESUME_HOST;
1297 break;
1298 default:
1299 r = RESUME_GUEST;
1300 break;
1301 }
1302 return r;
1303}
1304#endif
1305
1306int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1307{
1308 int r = 0;
1309 union kvmppc_one_reg val;
1310 int size;
1311
1312 size = one_reg_size(reg->id);
1313 if (size > sizeof(val))
1314 return -EINVAL;
1315
1316 r = kvmppc_get_one_reg(vcpu, reg->id, &val);
1317 if (r == -EINVAL) {
1318 r = 0;
1319 switch (reg->id) {
1320#ifdef CONFIG_ALTIVEC
1321 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1322 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1323 r = -ENXIO;
1324 break;
1325 }
1326 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
1327 break;
1328 case KVM_REG_PPC_VSCR:
1329 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1330 r = -ENXIO;
1331 break;
1332 }
1333 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
1334 break;
1335 case KVM_REG_PPC_VRSAVE:
1336 val = get_reg_val(reg->id, vcpu->arch.vrsave);
1337 break;
1338#endif
1339 default:
1340 r = -EINVAL;
1341 break;
1342 }
1343 }
1344
1345 if (r)
1346 return r;
1347
1348 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
1349 r = -EFAULT;
1350
1351 return r;
1352}
1353
1354int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1355{
1356 int r;
1357 union kvmppc_one_reg val;
1358 int size;
1359
1360 size = one_reg_size(reg->id);
1361 if (size > sizeof(val))
1362 return -EINVAL;
1363
1364 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
1365 return -EFAULT;
1366
1367 r = kvmppc_set_one_reg(vcpu, reg->id, &val);
1368 if (r == -EINVAL) {
1369 r = 0;
1370 switch (reg->id) {
1371#ifdef CONFIG_ALTIVEC
1372 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1373 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1374 r = -ENXIO;
1375 break;
1376 }
1377 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
1378 break;
1379 case KVM_REG_PPC_VSCR:
1380 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1381 r = -ENXIO;
1382 break;
1383 }
1384 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
1385 break;
1386 case KVM_REG_PPC_VRSAVE:
1387 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1388 r = -ENXIO;
1389 break;
1390 }
1391 vcpu->arch.vrsave = set_reg_val(reg->id, val);
1392 break;
1393#endif
1394 default:
1395 r = -EINVAL;
1396 break;
1397 }
1398 }
1399
1400 return r;
1401}
1402
1403int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
1404{
1405 int r;
1406 sigset_t sigsaved;
1407
1408 if (vcpu->mmio_needed) {
1409 vcpu->mmio_needed = 0;
1410 if (!vcpu->mmio_is_write)
1411 kvmppc_complete_mmio_load(vcpu, run);
1412#ifdef CONFIG_VSX
1413 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1414 vcpu->arch.mmio_vsx_copy_nums--;
1415 vcpu->arch.mmio_vsx_offset++;
1416 }
1417
1418 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1419 r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run);
1420 if (r == RESUME_HOST) {
1421 vcpu->mmio_needed = 1;
1422 return r;
1423 }
1424 }
1425#endif
1426 } else if (vcpu->arch.osi_needed) {
1427 u64 *gprs = run->osi.gprs;
1428 int i;
1429
1430 for (i = 0; i < 32; i++)
1431 kvmppc_set_gpr(vcpu, i, gprs[i]);
1432 vcpu->arch.osi_needed = 0;
1433 } else if (vcpu->arch.hcall_needed) {
1434 int i;
1435
1436 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1437 for (i = 0; i < 9; ++i)
1438 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1439 vcpu->arch.hcall_needed = 0;
1440#ifdef CONFIG_BOOKE
1441 } else if (vcpu->arch.epr_needed) {
1442 kvmppc_set_epr(vcpu, run->epr.epr);
1443 vcpu->arch.epr_needed = 0;
1444#endif
1445 }
1446
1447 if (vcpu->sigset_active)
1448 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1449
1450 if (run->immediate_exit)
1451 r = -EINTR;
1452 else
1453 r = kvmppc_vcpu_run(run, vcpu);
1454
1455 if (vcpu->sigset_active)
1456 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1457
1458 return r;
1459}
1460
1461int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1462{
1463 if (irq->irq == KVM_INTERRUPT_UNSET) {
1464 kvmppc_core_dequeue_external(vcpu);
1465 return 0;
1466 }
1467
1468 kvmppc_core_queue_external(vcpu, irq);
1469
1470 kvm_vcpu_kick(vcpu);
1471
1472 return 0;
1473}
1474
1475static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1476 struct kvm_enable_cap *cap)
1477{
1478 int r;
1479
1480 if (cap->flags)
1481 return -EINVAL;
1482
1483 switch (cap->cap) {
1484 case KVM_CAP_PPC_OSI:
1485 r = 0;
1486 vcpu->arch.osi_enabled = true;
1487 break;
1488 case KVM_CAP_PPC_PAPR:
1489 r = 0;
1490 vcpu->arch.papr_enabled = true;
1491 break;
1492 case KVM_CAP_PPC_EPR:
1493 r = 0;
1494 if (cap->args[0])
1495 vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1496 else
1497 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1498 break;
1499#ifdef CONFIG_BOOKE
1500 case KVM_CAP_PPC_BOOKE_WATCHDOG:
1501 r = 0;
1502 vcpu->arch.watchdog_enabled = true;
1503 break;
1504#endif
1505#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1506 case KVM_CAP_SW_TLB: {
1507 struct kvm_config_tlb cfg;
1508 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1509
1510 r = -EFAULT;
1511 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1512 break;
1513
1514 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1515 break;
1516 }
1517#endif
1518#ifdef CONFIG_KVM_MPIC
1519 case KVM_CAP_IRQ_MPIC: {
1520 struct fd f;
1521 struct kvm_device *dev;
1522
1523 r = -EBADF;
1524 f = fdget(cap->args[0]);
1525 if (!f.file)
1526 break;
1527
1528 r = -EPERM;
1529 dev = kvm_device_from_filp(f.file);
1530 if (dev)
1531 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1532
1533 fdput(f);
1534 break;
1535 }
1536#endif
1537#ifdef CONFIG_KVM_XICS
1538 case KVM_CAP_IRQ_XICS: {
1539 struct fd f;
1540 struct kvm_device *dev;
1541
1542 r = -EBADF;
1543 f = fdget(cap->args[0]);
1544 if (!f.file)
1545 break;
1546
1547 r = -EPERM;
1548 dev = kvm_device_from_filp(f.file);
1549 if (dev) {
1550 if (xive_enabled())
1551 r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
1552 else
1553 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1554 }
1555
1556 fdput(f);
1557 break;
1558 }
1559#endif
1560#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1561 case KVM_CAP_PPC_FWNMI:
1562 r = -EINVAL;
1563 if (!is_kvmppc_hv_enabled(vcpu->kvm))
1564 break;
1565 r = 0;
1566 vcpu->kvm->arch.fwnmi_enabled = true;
1567 break;
1568#endif
1569 default:
1570 r = -EINVAL;
1571 break;
1572 }
1573
1574 if (!r)
1575 r = kvmppc_sanity_check(vcpu);
1576
1577 return r;
1578}
1579
1580bool kvm_arch_intc_initialized(struct kvm *kvm)
1581{
1582#ifdef CONFIG_KVM_MPIC
1583 if (kvm->arch.mpic)
1584 return true;
1585#endif
1586#ifdef CONFIG_KVM_XICS
1587 if (kvm->arch.xics || kvm->arch.xive)
1588 return true;
1589#endif
1590 return false;
1591}
1592
1593int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1594 struct kvm_mp_state *mp_state)
1595{
1596 return -EINVAL;
1597}
1598
1599int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1600 struct kvm_mp_state *mp_state)
1601{
1602 return -EINVAL;
1603}
1604
1605long kvm_arch_vcpu_ioctl(struct file *filp,
1606 unsigned int ioctl, unsigned long arg)
1607{
1608 struct kvm_vcpu *vcpu = filp->private_data;
1609 void __user *argp = (void __user *)arg;
1610 long r;
1611
1612 switch (ioctl) {
1613 case KVM_INTERRUPT: {
1614 struct kvm_interrupt irq;
1615 r = -EFAULT;
1616 if (copy_from_user(&irq, argp, sizeof(irq)))
1617 goto out;
1618 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1619 goto out;
1620 }
1621
1622 case KVM_ENABLE_CAP:
1623 {
1624 struct kvm_enable_cap cap;
1625 r = -EFAULT;
1626 if (copy_from_user(&cap, argp, sizeof(cap)))
1627 goto out;
1628 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1629 break;
1630 }
1631
1632 case KVM_SET_ONE_REG:
1633 case KVM_GET_ONE_REG:
1634 {
1635 struct kvm_one_reg reg;
1636 r = -EFAULT;
1637 if (copy_from_user(®, argp, sizeof(reg)))
1638 goto out;
1639 if (ioctl == KVM_SET_ONE_REG)
1640 r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®);
1641 else
1642 r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®);
1643 break;
1644 }
1645
1646#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1647 case KVM_DIRTY_TLB: {
1648 struct kvm_dirty_tlb dirty;
1649 r = -EFAULT;
1650 if (copy_from_user(&dirty, argp, sizeof(dirty)))
1651 goto out;
1652 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
1653 break;
1654 }
1655#endif
1656 default:
1657 r = -EINVAL;
1658 }
1659
1660out:
1661 return r;
1662}
1663
1664int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1665{
1666 return VM_FAULT_SIGBUS;
1667}
1668
1669static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
1670{
1671 u32 inst_nop = 0x60000000;
1672#ifdef CONFIG_KVM_BOOKE_HV
1673 u32 inst_sc1 = 0x44000022;
1674 pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
1675 pvinfo->hcall[1] = cpu_to_be32(inst_nop);
1676 pvinfo->hcall[2] = cpu_to_be32(inst_nop);
1677 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
1678#else
1679 u32 inst_lis = 0x3c000000;
1680 u32 inst_ori = 0x60000000;
1681 u32 inst_sc = 0x44000002;
1682 u32 inst_imm_mask = 0xffff;
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693 pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
1694 pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
1695 pvinfo->hcall[2] = cpu_to_be32(inst_sc);
1696 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
1697#endif
1698
1699 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
1700
1701 return 0;
1702}
1703
1704int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
1705 bool line_status)
1706{
1707 if (!irqchip_in_kernel(kvm))
1708 return -ENXIO;
1709
1710 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
1711 irq_event->irq, irq_event->level,
1712 line_status);
1713 return 0;
1714}
1715
1716
1717static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
1718 struct kvm_enable_cap *cap)
1719{
1720 int r;
1721
1722 if (cap->flags)
1723 return -EINVAL;
1724
1725 switch (cap->cap) {
1726#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1727 case KVM_CAP_PPC_ENABLE_HCALL: {
1728 unsigned long hcall = cap->args[0];
1729
1730 r = -EINVAL;
1731 if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
1732 cap->args[1] > 1)
1733 break;
1734 if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
1735 break;
1736 if (cap->args[1])
1737 set_bit(hcall / 4, kvm->arch.enabled_hcalls);
1738 else
1739 clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
1740 r = 0;
1741 break;
1742 }
1743 case KVM_CAP_PPC_SMT: {
1744 unsigned long mode = cap->args[0];
1745 unsigned long flags = cap->args[1];
1746
1747 r = -EINVAL;
1748 if (kvm->arch.kvm_ops->set_smt_mode)
1749 r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
1750 break;
1751 }
1752#endif
1753 default:
1754 r = -EINVAL;
1755 break;
1756 }
1757
1758 return r;
1759}
1760
1761long kvm_arch_vm_ioctl(struct file *filp,
1762 unsigned int ioctl, unsigned long arg)
1763{
1764 struct kvm *kvm __maybe_unused = filp->private_data;
1765 void __user *argp = (void __user *)arg;
1766 long r;
1767
1768 switch (ioctl) {
1769 case KVM_PPC_GET_PVINFO: {
1770 struct kvm_ppc_pvinfo pvinfo;
1771 memset(&pvinfo, 0, sizeof(pvinfo));
1772 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
1773 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
1774 r = -EFAULT;
1775 goto out;
1776 }
1777
1778 break;
1779 }
1780 case KVM_ENABLE_CAP:
1781 {
1782 struct kvm_enable_cap cap;
1783 r = -EFAULT;
1784 if (copy_from_user(&cap, argp, sizeof(cap)))
1785 goto out;
1786 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1787 break;
1788 }
1789#ifdef CONFIG_SPAPR_TCE_IOMMU
1790 case KVM_CREATE_SPAPR_TCE_64: {
1791 struct kvm_create_spapr_tce_64 create_tce_64;
1792
1793 r = -EFAULT;
1794 if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
1795 goto out;
1796 if (create_tce_64.flags) {
1797 r = -EINVAL;
1798 goto out;
1799 }
1800 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
1801 goto out;
1802 }
1803 case KVM_CREATE_SPAPR_TCE: {
1804 struct kvm_create_spapr_tce create_tce;
1805 struct kvm_create_spapr_tce_64 create_tce_64;
1806
1807 r = -EFAULT;
1808 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
1809 goto out;
1810
1811 create_tce_64.liobn = create_tce.liobn;
1812 create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
1813 create_tce_64.offset = 0;
1814 create_tce_64.size = create_tce.window_size >>
1815 IOMMU_PAGE_SHIFT_4K;
1816 create_tce_64.flags = 0;
1817 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
1818 goto out;
1819 }
1820#endif
1821#ifdef CONFIG_PPC_BOOK3S_64
1822 case KVM_PPC_GET_SMMU_INFO: {
1823 struct kvm_ppc_smmu_info info;
1824 struct kvm *kvm = filp->private_data;
1825
1826 memset(&info, 0, sizeof(info));
1827 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
1828 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
1829 r = -EFAULT;
1830 break;
1831 }
1832 case KVM_PPC_RTAS_DEFINE_TOKEN: {
1833 struct kvm *kvm = filp->private_data;
1834
1835 r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
1836 break;
1837 }
1838 case KVM_PPC_CONFIGURE_V3_MMU: {
1839 struct kvm *kvm = filp->private_data;
1840 struct kvm_ppc_mmuv3_cfg cfg;
1841
1842 r = -EINVAL;
1843 if (!kvm->arch.kvm_ops->configure_mmu)
1844 goto out;
1845 r = -EFAULT;
1846 if (copy_from_user(&cfg, argp, sizeof(cfg)))
1847 goto out;
1848 r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg);
1849 break;
1850 }
1851 case KVM_PPC_GET_RMMU_INFO: {
1852 struct kvm *kvm = filp->private_data;
1853 struct kvm_ppc_rmmu_info info;
1854
1855 r = -EINVAL;
1856 if (!kvm->arch.kvm_ops->get_rmmu_info)
1857 goto out;
1858 r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info);
1859 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
1860 r = -EFAULT;
1861 break;
1862 }
1863 default: {
1864 struct kvm *kvm = filp->private_data;
1865 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
1866 }
1867#else
1868 default:
1869 r = -ENOTTY;
1870#endif
1871 }
1872out:
1873 return r;
1874}
1875
1876static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
1877static unsigned long nr_lpids;
1878
1879long kvmppc_alloc_lpid(void)
1880{
1881 long lpid;
1882
1883 do {
1884 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
1885 if (lpid >= nr_lpids) {
1886 pr_err("%s: No LPIDs free\n", __func__);
1887 return -ENOMEM;
1888 }
1889 } while (test_and_set_bit(lpid, lpid_inuse));
1890
1891 return lpid;
1892}
1893EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
1894
1895void kvmppc_claim_lpid(long lpid)
1896{
1897 set_bit(lpid, lpid_inuse);
1898}
1899EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
1900
1901void kvmppc_free_lpid(long lpid)
1902{
1903 clear_bit(lpid, lpid_inuse);
1904}
1905EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
1906
1907void kvmppc_init_lpid(unsigned long nr_lpids_param)
1908{
1909 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
1910 memset(lpid_inuse, 0, sizeof(lpid_inuse));
1911}
1912EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
1913
1914int kvm_arch_init(void *opaque)
1915{
1916 return 0;
1917}
1918
1919EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);
1920