1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/errno.h>
22#include <linux/err.h>
23#include <linux/kvm_host.h>
24#include <linux/vmalloc.h>
25#include <linux/hrtimer.h>
26#include <linux/fs.h>
27#include <linux/slab.h>
28#include <linux/file.h>
29#include <linux/module.h>
30#include <linux/irqbypass.h>
31#include <linux/kvm_irqfd.h>
32#include <asm/cputable.h>
33#include <asm/uaccess.h>
34#include <asm/kvm_ppc.h>
35#include <asm/tlbflush.h>
36#include <asm/cputhreads.h>
37#include <asm/irqflags.h>
38#include <asm/machdep.h>
39#ifdef CONFIG_PPC_PSERIES
40#include <asm/hvcall.h>
41#include <asm/plpar_wrappers.h>
42#endif
43
44#include "timing.h"
45#include "irq.h"
46#include "../mm/mmu_decl.h"
47
48#define CREATE_TRACE_POINTS
49#include "trace.h"
50
51struct kvmppc_ops *kvmppc_hv_ops;
52EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
53struct kvmppc_ops *kvmppc_pr_ops;
54EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
55
56
57int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
58{
59 return !!(v->arch.pending_exceptions) ||
60 v->requests;
61}
62
63int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
64{
65 return 1;
66}
67
68
69
70
71
72
73
74
75
76
77int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
78{
79 int r;
80
81 WARN_ON(irqs_disabled());
82 hard_irq_disable();
83
84 while (true) {
85 if (need_resched()) {
86 local_irq_enable();
87 cond_resched();
88 hard_irq_disable();
89 continue;
90 }
91
92 if (signal_pending(current)) {
93 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
94 vcpu->run->exit_reason = KVM_EXIT_INTR;
95 r = -EINTR;
96 break;
97 }
98
99 vcpu->mode = IN_GUEST_MODE;
100
101
102
103
104
105
106
107 smp_mb();
108
109 if (vcpu->requests) {
110
111 local_irq_enable();
112 trace_kvm_check_requests(vcpu);
113 r = kvmppc_core_check_requests(vcpu);
114 hard_irq_disable();
115 if (r > 0)
116 continue;
117 break;
118 }
119
120 if (kvmppc_core_prepare_to_enter(vcpu)) {
121
122
123 continue;
124 }
125
126 __kvm_guest_enter();
127 return 1;
128 }
129
130
131 local_irq_enable();
132 return r;
133}
134EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
135
136#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
137static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
138{
139 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
140 int i;
141
142 shared->sprg0 = swab64(shared->sprg0);
143 shared->sprg1 = swab64(shared->sprg1);
144 shared->sprg2 = swab64(shared->sprg2);
145 shared->sprg3 = swab64(shared->sprg3);
146 shared->srr0 = swab64(shared->srr0);
147 shared->srr1 = swab64(shared->srr1);
148 shared->dar = swab64(shared->dar);
149 shared->msr = swab64(shared->msr);
150 shared->dsisr = swab32(shared->dsisr);
151 shared->int_pending = swab32(shared->int_pending);
152 for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
153 shared->sr[i] = swab32(shared->sr[i]);
154}
155#endif
156
157int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
158{
159 int nr = kvmppc_get_gpr(vcpu, 11);
160 int r;
161 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
162 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
163 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
164 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
165 unsigned long r2 = 0;
166
167 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
168
169 param1 &= 0xffffffff;
170 param2 &= 0xffffffff;
171 param3 &= 0xffffffff;
172 param4 &= 0xffffffff;
173 }
174
175 switch (nr) {
176 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
177 {
178#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
179
180 int shared_big_endian = true;
181 if (vcpu->arch.intr_msr & MSR_LE)
182 shared_big_endian = false;
183 if (shared_big_endian != vcpu->arch.shared_big_endian)
184 kvmppc_swab_shared(vcpu);
185 vcpu->arch.shared_big_endian = shared_big_endian;
186#endif
187
188 if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
189
190
191
192
193
194 vcpu->arch.disable_kernel_nx = true;
195 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
196 }
197
198 vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
199 vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
200
201#ifdef CONFIG_PPC_64K_PAGES
202
203
204
205
206 if ((vcpu->arch.magic_page_pa & 0xf000) !=
207 ((ulong)vcpu->arch.shared & 0xf000)) {
208 void *old_shared = vcpu->arch.shared;
209 ulong shared = (ulong)vcpu->arch.shared;
210 void *new_shared;
211
212 shared &= PAGE_MASK;
213 shared |= vcpu->arch.magic_page_pa & 0xf000;
214 new_shared = (void*)shared;
215 memcpy(new_shared, old_shared, 0x1000);
216 vcpu->arch.shared = new_shared;
217 }
218#endif
219
220 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
221
222 r = EV_SUCCESS;
223 break;
224 }
225 case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
226 r = EV_SUCCESS;
227#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
228
229 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
230#endif
231
232
233 break;
234 case EV_HCALL_TOKEN(EV_IDLE):
235 r = EV_SUCCESS;
236 kvm_vcpu_block(vcpu);
237 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
238 break;
239 default:
240 r = EV_UNIMPLEMENTED;
241 break;
242 }
243
244 kvmppc_set_gpr(vcpu, 4, r2);
245
246 return r;
247}
248EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
249
250int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
251{
252 int r = false;
253
254
255 if (!vcpu->arch.pvr)
256 goto out;
257
258
259 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
260 goto out;
261
262
263 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
264 goto out;
265
266#ifdef CONFIG_KVM_BOOKE_HV
267 if (!cpu_has_feature(CPU_FTR_EMB_HV))
268 goto out;
269#endif
270
271 r = true;
272
273out:
274 vcpu->arch.sane = r;
275 return r ? 0 : -EINVAL;
276}
277EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
278
279int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
280{
281 enum emulation_result er;
282 int r;
283
284 er = kvmppc_emulate_loadstore(vcpu);
285 switch (er) {
286 case EMULATE_DONE:
287
288
289 r = RESUME_GUEST_NV;
290 break;
291 case EMULATE_AGAIN:
292 r = RESUME_GUEST;
293 break;
294 case EMULATE_DO_MMIO:
295 run->exit_reason = KVM_EXIT_MMIO;
296
297
298
299
300 r = RESUME_HOST_NV;
301 break;
302 case EMULATE_FAIL:
303 {
304 u32 last_inst;
305
306 kvmppc_get_last_inst(vcpu, false, &last_inst);
307
308 pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
309 r = RESUME_HOST;
310 break;
311 }
312 default:
313 WARN_ON(1);
314 r = RESUME_GUEST;
315 }
316
317 return r;
318}
319EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
320
321int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
322 bool data)
323{
324 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
325 struct kvmppc_pte pte;
326 int r;
327
328 vcpu->stat.st++;
329
330 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
331 XLATE_WRITE, &pte);
332 if (r < 0)
333 return r;
334
335 *eaddr = pte.raddr;
336
337 if (!pte.may_write)
338 return -EPERM;
339
340
341 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
342 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
343 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
344 void *magic = vcpu->arch.shared;
345 magic += pte.eaddr & 0xfff;
346 memcpy(magic, ptr, size);
347 return EMULATE_DONE;
348 }
349
350 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
351 return EMULATE_DO_MMIO;
352
353 return EMULATE_DONE;
354}
355EXPORT_SYMBOL_GPL(kvmppc_st);
356
357int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
358 bool data)
359{
360 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
361 struct kvmppc_pte pte;
362 int rc;
363
364 vcpu->stat.ld++;
365
366 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
367 XLATE_READ, &pte);
368 if (rc)
369 return rc;
370
371 *eaddr = pte.raddr;
372
373 if (!pte.may_read)
374 return -EPERM;
375
376 if (!data && !pte.may_execute)
377 return -ENOEXEC;
378
379
380 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
381 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
382 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
383 void *magic = vcpu->arch.shared;
384 magic += pte.eaddr & 0xfff;
385 memcpy(ptr, magic, size);
386 return EMULATE_DONE;
387 }
388
389 if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size))
390 return EMULATE_DO_MMIO;
391
392 return EMULATE_DONE;
393}
394EXPORT_SYMBOL_GPL(kvmppc_ld);
395
396int kvm_arch_hardware_enable(void)
397{
398 return 0;
399}
400
401void kvm_arch_hardware_disable(void)
402{
403}
404
405int kvm_arch_hardware_setup(void)
406{
407 return 0;
408}
409
410void kvm_arch_hardware_unsetup(void)
411{
412}
413
414void kvm_arch_check_processor_compat(void *rtn)
415{
416 *(int *)rtn = kvmppc_core_check_processor_compat();
417}
418
419int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
420{
421 struct kvmppc_ops *kvm_ops = NULL;
422
423
424
425 if (type == 0) {
426 if (kvmppc_hv_ops)
427 kvm_ops = kvmppc_hv_ops;
428 else
429 kvm_ops = kvmppc_pr_ops;
430 if (!kvm_ops)
431 goto err_out;
432 } else if (type == KVM_VM_PPC_HV) {
433 if (!kvmppc_hv_ops)
434 goto err_out;
435 kvm_ops = kvmppc_hv_ops;
436 } else if (type == KVM_VM_PPC_PR) {
437 if (!kvmppc_pr_ops)
438 goto err_out;
439 kvm_ops = kvmppc_pr_ops;
440 } else
441 goto err_out;
442
443 if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
444 return -ENOENT;
445
446 kvm->arch.kvm_ops = kvm_ops;
447 return kvmppc_core_init_vm(kvm);
448err_out:
449 return -EINVAL;
450}
451
452bool kvm_arch_has_vcpu_debugfs(void)
453{
454 return false;
455}
456
457int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
458{
459 return 0;
460}
461
462void kvm_arch_destroy_vm(struct kvm *kvm)
463{
464 unsigned int i;
465 struct kvm_vcpu *vcpu;
466
467#ifdef CONFIG_KVM_XICS
468
469
470
471
472
473 if (is_kvmppc_hv_enabled(kvm))
474 kick_all_cpus_sync();
475#endif
476
477 kvm_for_each_vcpu(i, vcpu, kvm)
478 kvm_arch_vcpu_free(vcpu);
479
480 mutex_lock(&kvm->lock);
481 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
482 kvm->vcpus[i] = NULL;
483
484 atomic_set(&kvm->online_vcpus, 0);
485
486 kvmppc_core_destroy_vm(kvm);
487
488 mutex_unlock(&kvm->lock);
489
490
491 module_put(kvm->arch.kvm_ops->owner);
492}
493
494void kvm_arch_sync_events(struct kvm *kvm)
495{
496}
497
498int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
499{
500 int r;
501
502 int hv_enabled = kvmppc_hv_ops ? 1 : 0;
503
504 if (kvm) {
505
506
507
508
509 hv_enabled = is_kvmppc_hv_enabled(kvm);
510 }
511
512 switch (ext) {
513#ifdef CONFIG_BOOKE
514 case KVM_CAP_PPC_BOOKE_SREGS:
515 case KVM_CAP_PPC_BOOKE_WATCHDOG:
516 case KVM_CAP_PPC_EPR:
517#else
518 case KVM_CAP_PPC_SEGSTATE:
519 case KVM_CAP_PPC_HIOR:
520 case KVM_CAP_PPC_PAPR:
521#endif
522 case KVM_CAP_PPC_UNSET_IRQ:
523 case KVM_CAP_PPC_IRQ_LEVEL:
524 case KVM_CAP_ENABLE_CAP:
525 case KVM_CAP_ENABLE_CAP_VM:
526 case KVM_CAP_ONE_REG:
527 case KVM_CAP_IOEVENTFD:
528 case KVM_CAP_DEVICE_CTRL:
529 r = 1;
530 break;
531 case KVM_CAP_PPC_PAIRED_SINGLES:
532 case KVM_CAP_PPC_OSI:
533 case KVM_CAP_PPC_GET_PVINFO:
534#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
535 case KVM_CAP_SW_TLB:
536#endif
537
538 r = !hv_enabled;
539 break;
540#ifdef CONFIG_KVM_MMIO
541 case KVM_CAP_COALESCED_MMIO:
542 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
543 break;
544#endif
545#ifdef CONFIG_KVM_MPIC
546 case KVM_CAP_IRQ_MPIC:
547 r = 1;
548 break;
549#endif
550
551#ifdef CONFIG_PPC_BOOK3S_64
552 case KVM_CAP_SPAPR_TCE:
553 case KVM_CAP_PPC_ALLOC_HTAB:
554 case KVM_CAP_PPC_RTAS:
555 case KVM_CAP_PPC_FIXUP_HCALL:
556 case KVM_CAP_PPC_ENABLE_HCALL:
557#ifdef CONFIG_KVM_XICS
558 case KVM_CAP_IRQ_XICS:
559#endif
560 case KVM_CAP_PPC_GET_CPU_CHAR:
561 r = 1;
562 break;
563#endif
564#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
565 case KVM_CAP_PPC_SMT:
566 if (hv_enabled)
567 r = threads_per_subcore;
568 else
569 r = 0;
570 break;
571 case KVM_CAP_PPC_RMA:
572 r = 0;
573 break;
574#endif
575 case KVM_CAP_SYNC_MMU:
576#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
577 r = hv_enabled;
578#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
579 r = 1;
580#else
581 r = 0;
582#endif
583 break;
584#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
585 case KVM_CAP_PPC_HTAB_FD:
586 r = hv_enabled;
587 break;
588#endif
589 case KVM_CAP_NR_VCPUS:
590
591
592
593
594
595
596 if (hv_enabled)
597 r = num_present_cpus();
598 else
599 r = num_online_cpus();
600 break;
601 case KVM_CAP_NR_MEMSLOTS:
602 r = KVM_USER_MEM_SLOTS;
603 break;
604 case KVM_CAP_MAX_VCPUS:
605 r = KVM_MAX_VCPUS;
606 break;
607#ifdef CONFIG_PPC_BOOK3S_64
608 case KVM_CAP_PPC_GET_SMMU_INFO:
609 r = 1;
610 break;
611 case KVM_CAP_SPAPR_MULTITCE:
612 r = 1;
613 break;
614 case KVM_CAP_SPAPR_RESIZE_HPT:
615 r = !!hv_enabled;
616 break;
617#endif
618 default:
619 r = 0;
620 break;
621 }
622 return r;
623
624}
625
626long kvm_arch_dev_ioctl(struct file *filp,
627 unsigned int ioctl, unsigned long arg)
628{
629 return -EINVAL;
630}
631
632void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
633 struct kvm_memory_slot *dont)
634{
635 kvmppc_core_free_memslot(kvm, free, dont);
636}
637
638int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
639 unsigned long npages)
640{
641 return kvmppc_core_create_memslot(kvm, slot, npages);
642}
643
644void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
645{
646}
647
648int kvm_arch_prepare_memory_region(struct kvm *kvm,
649 struct kvm_memory_slot *memslot,
650 const struct kvm_userspace_memory_region *mem,
651 enum kvm_mr_change change)
652{
653 return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
654}
655
656void kvm_arch_commit_memory_region(struct kvm *kvm,
657 const struct kvm_userspace_memory_region *mem,
658 const struct kvm_memory_slot *old,
659 const struct kvm_memory_slot *new,
660 enum kvm_mr_change change)
661{
662 kvmppc_core_commit_memory_region(kvm, mem, old, new);
663}
664
665void kvm_arch_flush_shadow_all(struct kvm *kvm)
666{
667}
668
669void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
670 struct kvm_memory_slot *slot)
671{
672 kvmppc_core_flush_memslot(kvm, slot);
673}
674
675struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
676{
677 struct kvm_vcpu *vcpu;
678 vcpu = kvmppc_core_vcpu_create(kvm, id);
679 if (!IS_ERR(vcpu)) {
680 vcpu->arch.wqp = &vcpu->wq;
681 kvmppc_create_vcpu_debugfs(vcpu, id);
682 }
683 return vcpu;
684}
685
686void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
687{
688}
689
690void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
691{
692
693 hrtimer_cancel(&vcpu->arch.dec_timer);
694
695 kvmppc_remove_vcpu_debugfs(vcpu);
696
697 switch (vcpu->arch.irq_type) {
698 case KVMPPC_IRQ_MPIC:
699 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
700 break;
701 case KVMPPC_IRQ_XICS:
702 kvmppc_xics_free_icp(vcpu);
703 break;
704 }
705
706 kvmppc_core_vcpu_free(vcpu);
707}
708
709void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
710{
711 kvm_arch_vcpu_free(vcpu);
712}
713
714int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
715{
716 return kvmppc_core_pending_dec(vcpu);
717}
718
719static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
720{
721 struct kvm_vcpu *vcpu;
722
723 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
724 kvmppc_decrementer_func(vcpu);
725
726 return HRTIMER_NORESTART;
727}
728
729int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
730{
731 int ret;
732
733 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
734 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
735 vcpu->arch.dec_expires = ~(u64)0;
736
737#ifdef CONFIG_KVM_EXIT_TIMING
738 mutex_init(&vcpu->arch.exit_timing_lock);
739#endif
740 ret = kvmppc_subarch_vcpu_init(vcpu);
741 return ret;
742}
743
744void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
745{
746 kvmppc_mmu_destroy(vcpu);
747 kvmppc_subarch_vcpu_uninit(vcpu);
748}
749
750void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
751{
752}
753
754void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
755{
756#ifdef CONFIG_BOOKE
757
758
759
760
761
762
763
764 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
765#endif
766 kvmppc_core_vcpu_load(vcpu, cpu);
767}
768
769void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
770{
771 kvmppc_core_vcpu_put(vcpu);
772#ifdef CONFIG_BOOKE
773 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
774#endif
775}
776
777static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
778 struct kvm_run *run)
779{
780 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data);
781}
782
783
784
785
786
787
788
789bool kvm_arch_has_irq_bypass(void)
790{
791 return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) ||
792 (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer));
793}
794
795int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
796 struct irq_bypass_producer *prod)
797{
798 struct kvm_kernel_irqfd *irqfd =
799 container_of(cons, struct kvm_kernel_irqfd, consumer);
800 struct kvm *kvm = irqfd->kvm;
801
802 if (kvm->arch.kvm_ops->irq_bypass_add_producer)
803 return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
804
805 return 0;
806}
807
808void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
809 struct irq_bypass_producer *prod)
810{
811 struct kvm_kernel_irqfd *irqfd =
812 container_of(cons, struct kvm_kernel_irqfd, consumer);
813 struct kvm *kvm = irqfd->kvm;
814
815 if (kvm->arch.kvm_ops->irq_bypass_del_producer)
816 kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
817}
818
819static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
820 struct kvm_run *run)
821{
822 u64 uninitialized_var(gpr);
823
824 if (run->mmio.len > sizeof(gpr)) {
825 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
826 return;
827 }
828
829 if (!vcpu->arch.mmio_host_swabbed) {
830 switch (run->mmio.len) {
831 case 8: gpr = *(u64 *)run->mmio.data; break;
832 case 4: gpr = *(u32 *)run->mmio.data; break;
833 case 2: gpr = *(u16 *)run->mmio.data; break;
834 case 1: gpr = *(u8 *)run->mmio.data; break;
835 }
836 } else {
837 switch (run->mmio.len) {
838 case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
839 case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
840 case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
841 case 1: gpr = *(u8 *)run->mmio.data; break;
842 }
843 }
844
845 if (vcpu->arch.mmio_sign_extend) {
846 switch (run->mmio.len) {
847#ifdef CONFIG_PPC64
848 case 4:
849 gpr = (s64)(s32)gpr;
850 break;
851#endif
852 case 2:
853 gpr = (s64)(s16)gpr;
854 break;
855 case 1:
856 gpr = (s64)(s8)gpr;
857 break;
858 }
859 }
860
861 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
862
863 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
864 case KVM_MMIO_REG_GPR:
865 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
866 break;
867 case KVM_MMIO_REG_FPR:
868 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
869 break;
870#ifdef CONFIG_PPC_BOOK3S
871 case KVM_MMIO_REG_QPR:
872 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
873 break;
874 case KVM_MMIO_REG_FQPR:
875 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
876 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
877 break;
878#endif
879 default:
880 BUG();
881 }
882}
883
884static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
885 unsigned int rt, unsigned int bytes,
886 int is_default_endian, int sign_extend)
887{
888 int idx, ret;
889 bool host_swabbed;
890
891
892 if (kvmppc_need_byteswap(vcpu)) {
893 host_swabbed = is_default_endian;
894 } else {
895 host_swabbed = !is_default_endian;
896 }
897
898 if (bytes > sizeof(run->mmio.data)) {
899 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
900 run->mmio.len);
901 }
902
903 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
904 run->mmio.len = bytes;
905 run->mmio.is_write = 0;
906
907 vcpu->arch.io_gpr = rt;
908 vcpu->arch.mmio_host_swabbed = host_swabbed;
909 vcpu->mmio_needed = 1;
910 vcpu->mmio_is_write = 0;
911 vcpu->arch.mmio_sign_extend = sign_extend;
912
913 idx = srcu_read_lock(&vcpu->kvm->srcu);
914
915 ret = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr,
916 bytes, &run->mmio.data);
917
918 srcu_read_unlock(&vcpu->kvm->srcu, idx);
919
920 if (!ret) {
921 kvmppc_complete_mmio_load(vcpu, run);
922 vcpu->mmio_needed = 0;
923 return EMULATE_DONE;
924 }
925
926 return EMULATE_DO_MMIO;
927}
928
929int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
930 unsigned int rt, unsigned int bytes,
931 int is_default_endian)
932{
933 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0);
934}
935EXPORT_SYMBOL_GPL(kvmppc_handle_load);
936
937
938int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
939 unsigned int rt, unsigned int bytes,
940 int is_default_endian)
941{
942 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1);
943}
944
945int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
946 u64 val, unsigned int bytes, int is_default_endian)
947{
948 void *data = run->mmio.data;
949 int idx, ret;
950 bool host_swabbed;
951
952
953 if (kvmppc_need_byteswap(vcpu)) {
954 host_swabbed = is_default_endian;
955 } else {
956 host_swabbed = !is_default_endian;
957 }
958
959 if (bytes > sizeof(run->mmio.data)) {
960 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
961 run->mmio.len);
962 }
963
964 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
965 run->mmio.len = bytes;
966 run->mmio.is_write = 1;
967 vcpu->mmio_needed = 1;
968 vcpu->mmio_is_write = 1;
969
970
971 if (!host_swabbed) {
972 switch (bytes) {
973 case 8: *(u64 *)data = val; break;
974 case 4: *(u32 *)data = val; break;
975 case 2: *(u16 *)data = val; break;
976 case 1: *(u8 *)data = val; break;
977 }
978 } else {
979 switch (bytes) {
980 case 8: *(u64 *)data = swab64(val); break;
981 case 4: *(u32 *)data = swab32(val); break;
982 case 2: *(u16 *)data = swab16(val); break;
983 case 1: *(u8 *)data = val; break;
984 }
985 }
986
987 idx = srcu_read_lock(&vcpu->kvm->srcu);
988
989 ret = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr,
990 bytes, &run->mmio.data);
991
992 srcu_read_unlock(&vcpu->kvm->srcu, idx);
993
994 if (!ret) {
995 vcpu->mmio_needed = 0;
996 return EMULATE_DONE;
997 }
998
999 return EMULATE_DO_MMIO;
1000}
1001EXPORT_SYMBOL_GPL(kvmppc_handle_store);
1002
1003int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
1004{
1005 int r;
1006 sigset_t sigsaved;
1007
1008 if (vcpu->sigset_active)
1009 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1010
1011 if (vcpu->mmio_needed) {
1012 if (!vcpu->mmio_is_write)
1013 kvmppc_complete_mmio_load(vcpu, run);
1014 vcpu->mmio_needed = 0;
1015 } else if (vcpu->arch.dcr_needed) {
1016 if (!vcpu->arch.dcr_is_write)
1017 kvmppc_complete_dcr_load(vcpu, run);
1018 vcpu->arch.dcr_needed = 0;
1019 } else if (vcpu->arch.osi_needed) {
1020 u64 *gprs = run->osi.gprs;
1021 int i;
1022
1023 for (i = 0; i < 32; i++)
1024 kvmppc_set_gpr(vcpu, i, gprs[i]);
1025 vcpu->arch.osi_needed = 0;
1026 } else if (vcpu->arch.hcall_needed) {
1027 int i;
1028
1029 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1030 for (i = 0; i < 9; ++i)
1031 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1032 vcpu->arch.hcall_needed = 0;
1033#ifdef CONFIG_BOOKE
1034 } else if (vcpu->arch.epr_needed) {
1035 kvmppc_set_epr(vcpu, run->epr.epr);
1036 vcpu->arch.epr_needed = 0;
1037#endif
1038 }
1039
1040 r = kvmppc_vcpu_run(run, vcpu);
1041
1042 if (vcpu->sigset_active)
1043 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1044
1045 return r;
1046}
1047
1048int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1049{
1050 if (irq->irq == KVM_INTERRUPT_UNSET) {
1051 kvmppc_core_dequeue_external(vcpu);
1052 return 0;
1053 }
1054
1055 kvmppc_core_queue_external(vcpu, irq);
1056
1057 kvm_vcpu_kick(vcpu);
1058
1059 return 0;
1060}
1061
1062static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1063 struct kvm_enable_cap *cap)
1064{
1065 int r;
1066
1067 if (cap->flags)
1068 return -EINVAL;
1069
1070 switch (cap->cap) {
1071 case KVM_CAP_PPC_OSI:
1072 r = 0;
1073 vcpu->arch.osi_enabled = true;
1074 break;
1075 case KVM_CAP_PPC_PAPR:
1076 r = 0;
1077 vcpu->arch.papr_enabled = true;
1078 break;
1079 case KVM_CAP_PPC_EPR:
1080 r = 0;
1081 if (cap->args[0])
1082 vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1083 else
1084 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1085 break;
1086#ifdef CONFIG_BOOKE
1087 case KVM_CAP_PPC_BOOKE_WATCHDOG:
1088 r = 0;
1089 vcpu->arch.watchdog_enabled = true;
1090 break;
1091#endif
1092#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1093 case KVM_CAP_SW_TLB: {
1094 struct kvm_config_tlb cfg;
1095 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1096
1097 r = -EFAULT;
1098 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1099 break;
1100
1101 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1102 break;
1103 }
1104#endif
1105#ifdef CONFIG_KVM_MPIC
1106 case KVM_CAP_IRQ_MPIC: {
1107 struct fd f;
1108 struct kvm_device *dev;
1109
1110 r = -EBADF;
1111 f = fdget(cap->args[0]);
1112 if (!f.file)
1113 break;
1114
1115 r = -EPERM;
1116 dev = kvm_device_from_filp(f.file);
1117 if (dev)
1118 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1119
1120 fdput(f);
1121 break;
1122 }
1123#endif
1124#ifdef CONFIG_KVM_XICS
1125 case KVM_CAP_IRQ_XICS: {
1126 struct fd f;
1127 struct kvm_device *dev;
1128
1129 r = -EBADF;
1130 f = fdget(cap->args[0]);
1131 if (!f.file)
1132 break;
1133
1134 r = -EPERM;
1135 dev = kvm_device_from_filp(f.file);
1136 if (dev)
1137 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1138
1139 fdput(f);
1140 break;
1141 }
1142#endif
1143 default:
1144 r = -EINVAL;
1145 break;
1146 }
1147
1148 if (!r)
1149 r = kvmppc_sanity_check(vcpu);
1150
1151 return r;
1152}
1153
1154bool kvm_arch_intc_initialized(struct kvm *kvm)
1155{
1156#ifdef CONFIG_KVM_MPIC
1157 if (kvm->arch.mpic)
1158 return true;
1159#endif
1160#ifdef CONFIG_KVM_XICS
1161 if (kvm->arch.xics)
1162 return true;
1163#endif
1164 return false;
1165}
1166
1167int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1168 struct kvm_mp_state *mp_state)
1169{
1170 return -EINVAL;
1171}
1172
1173int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1174 struct kvm_mp_state *mp_state)
1175{
1176 return -EINVAL;
1177}
1178
1179long kvm_arch_vcpu_ioctl(struct file *filp,
1180 unsigned int ioctl, unsigned long arg)
1181{
1182 struct kvm_vcpu *vcpu = filp->private_data;
1183 void __user *argp = (void __user *)arg;
1184 long r;
1185
1186 switch (ioctl) {
1187 case KVM_INTERRUPT: {
1188 struct kvm_interrupt irq;
1189 r = -EFAULT;
1190 if (copy_from_user(&irq, argp, sizeof(irq)))
1191 goto out;
1192 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1193 goto out;
1194 }
1195
1196 case KVM_ENABLE_CAP:
1197 {
1198 struct kvm_enable_cap cap;
1199 r = -EFAULT;
1200 if (copy_from_user(&cap, argp, sizeof(cap)))
1201 goto out;
1202 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1203 break;
1204 }
1205
1206 case KVM_SET_ONE_REG:
1207 case KVM_GET_ONE_REG:
1208 {
1209 struct kvm_one_reg reg;
1210 r = -EFAULT;
1211 if (copy_from_user(®, argp, sizeof(reg)))
1212 goto out;
1213 if (ioctl == KVM_SET_ONE_REG)
1214 r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®);
1215 else
1216 r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®);
1217 break;
1218 }
1219
1220#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1221 case KVM_DIRTY_TLB: {
1222 struct kvm_dirty_tlb dirty;
1223 r = -EFAULT;
1224 if (copy_from_user(&dirty, argp, sizeof(dirty)))
1225 goto out;
1226 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
1227 break;
1228 }
1229#endif
1230 default:
1231 r = -EINVAL;
1232 }
1233
1234out:
1235 return r;
1236}
1237
1238int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1239{
1240 return VM_FAULT_SIGBUS;
1241}
1242
1243static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
1244{
1245 u32 inst_nop = 0x60000000;
1246#ifdef CONFIG_KVM_BOOKE_HV
1247 u32 inst_sc1 = 0x44000022;
1248 pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
1249 pvinfo->hcall[1] = cpu_to_be32(inst_nop);
1250 pvinfo->hcall[2] = cpu_to_be32(inst_nop);
1251 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
1252#else
1253 u32 inst_lis = 0x3c000000;
1254 u32 inst_ori = 0x60000000;
1255 u32 inst_sc = 0x44000002;
1256 u32 inst_imm_mask = 0xffff;
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267 pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
1268 pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
1269 pvinfo->hcall[2] = cpu_to_be32(inst_sc);
1270 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
1271#endif
1272
1273 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
1274
1275 return 0;
1276}
1277
1278int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
1279 bool line_status)
1280{
1281 if (!irqchip_in_kernel(kvm))
1282 return -ENXIO;
1283
1284 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
1285 irq_event->irq, irq_event->level,
1286 line_status);
1287 return 0;
1288}
1289
1290
1291static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
1292 struct kvm_enable_cap *cap)
1293{
1294 int r;
1295
1296 if (cap->flags)
1297 return -EINVAL;
1298
1299 switch (cap->cap) {
1300#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1301 case KVM_CAP_PPC_ENABLE_HCALL: {
1302 unsigned long hcall = cap->args[0];
1303
1304 r = -EINVAL;
1305 if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
1306 cap->args[1] > 1)
1307 break;
1308 if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
1309 break;
1310 if (cap->args[1])
1311 set_bit(hcall / 4, kvm->arch.enabled_hcalls);
1312 else
1313 clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
1314 r = 0;
1315 break;
1316 }
1317#endif
1318 default:
1319 r = -EINVAL;
1320 break;
1321 }
1322
1323 return r;
1324}
1325
1326#ifdef CONFIG_PPC_BOOK3S_64
1327
1328
1329
1330
1331
1332
1333
1334
1335#ifdef CONFIG_PPC_PSERIES
1336static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
1337{
1338 struct h_cpu_char_result c;
1339 unsigned long rc;
1340
1341 if (!machine_is(pseries))
1342 return -ENOTTY;
1343
1344 rc = plpar_get_cpu_characteristics(&c);
1345 if (rc == H_SUCCESS) {
1346 cp->character = c.character;
1347 cp->behaviour = c.behaviour;
1348 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
1349 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
1350 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
1351 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
1352 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
1353 KVM_PPC_CPU_CHAR_BR_HINT_HONOURED |
1354 KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF |
1355 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
1356 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
1357 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
1358 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
1359 }
1360 return 0;
1361}
1362#else
1363static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
1364{
1365 return -ENOTTY;
1366}
1367#endif
1368
1369static inline bool have_fw_feat(struct device_node *fw_features,
1370 const char *state, const char *name)
1371{
1372 struct device_node *np;
1373 bool r = false;
1374
1375 np = of_get_child_by_name(fw_features, name);
1376 if (np) {
1377 r = of_property_read_bool(np, state);
1378 of_node_put(np);
1379 }
1380 return r;
1381}
1382
1383static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
1384{
1385 struct device_node *np, *fw_features;
1386 int r;
1387
1388 memset(cp, 0, sizeof(*cp));
1389 r = pseries_get_cpu_char(cp);
1390 if (r != -ENOTTY)
1391 return r;
1392
1393 np = of_find_node_by_name(NULL, "ibm,opal");
1394 if (np) {
1395 fw_features = of_get_child_by_name(np, "fw-features");
1396 of_node_put(np);
1397 if (!fw_features)
1398 return 0;
1399 if (have_fw_feat(fw_features, "enabled",
1400 "inst-spec-barrier-ori31,31,0"))
1401 cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31;
1402 if (have_fw_feat(fw_features, "enabled",
1403 "fw-bcctrl-serialized"))
1404 cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED;
1405 if (have_fw_feat(fw_features, "enabled",
1406 "inst-l1d-flush-ori30,30,0"))
1407 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30;
1408 if (have_fw_feat(fw_features, "enabled",
1409 "inst-l1d-flush-trig2"))
1410 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2;
1411 if (have_fw_feat(fw_features, "enabled",
1412 "fw-l1d-thread-split"))
1413 cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV;
1414 if (have_fw_feat(fw_features, "enabled",
1415 "fw-count-cache-disabled"))
1416 cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
1417 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
1418 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
1419 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
1420 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
1421 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
1422 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
1423
1424 if (have_fw_feat(fw_features, "enabled",
1425 "speculation-policy-favor-security"))
1426 cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY;
1427 if (!have_fw_feat(fw_features, "disabled",
1428 "needs-l1d-flush-msr-pr-0-to-1"))
1429 cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR;
1430 if (!have_fw_feat(fw_features, "disabled",
1431 "needs-spec-barrier-for-bound-checks"))
1432 cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
1433 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
1434 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
1435 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
1436
1437 of_node_put(fw_features);
1438 }
1439
1440 return 0;
1441}
1442#endif
1443
1444long kvm_arch_vm_ioctl(struct file *filp,
1445 unsigned int ioctl, unsigned long arg)
1446{
1447 struct kvm *kvm __maybe_unused = filp->private_data;
1448 void __user *argp = (void __user *)arg;
1449 long r;
1450
1451 switch (ioctl) {
1452 case KVM_PPC_GET_PVINFO: {
1453 struct kvm_ppc_pvinfo pvinfo;
1454 memset(&pvinfo, 0, sizeof(pvinfo));
1455 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
1456 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
1457 r = -EFAULT;
1458 goto out;
1459 }
1460
1461 break;
1462 }
1463 case KVM_ENABLE_CAP:
1464 {
1465 struct kvm_enable_cap cap;
1466 r = -EFAULT;
1467 if (copy_from_user(&cap, argp, sizeof(cap)))
1468 goto out;
1469 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1470 break;
1471 }
1472#ifdef CONFIG_PPC_BOOK3S_64
1473 case KVM_CREATE_SPAPR_TCE: {
1474 struct kvm_create_spapr_tce create_tce;
1475
1476 r = -EFAULT;
1477 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
1478 goto out;
1479 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
1480 goto out;
1481 }
1482 case KVM_PPC_GET_SMMU_INFO: {
1483 struct kvm_ppc_smmu_info info;
1484 struct kvm *kvm = filp->private_data;
1485
1486 memset(&info, 0, sizeof(info));
1487 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
1488 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
1489 r = -EFAULT;
1490 break;
1491 }
1492 case KVM_PPC_RTAS_DEFINE_TOKEN: {
1493 struct kvm *kvm = filp->private_data;
1494
1495 r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
1496 break;
1497 }
1498 case KVM_PPC_GET_CPU_CHAR: {
1499 struct kvm_ppc_cpu_char cpuchar;
1500
1501 r = kvmppc_get_cpu_char(&cpuchar);
1502 if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar)))
1503 r = -EFAULT;
1504 break;
1505 }
1506 default: {
1507 struct kvm *kvm = filp->private_data;
1508 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
1509 }
1510#else
1511 default:
1512 r = -ENOTTY;
1513#endif
1514 }
1515out:
1516 return r;
1517}
1518
1519static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
1520static unsigned long nr_lpids;
1521
1522long kvmppc_alloc_lpid(void)
1523{
1524 long lpid;
1525
1526 do {
1527 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
1528 if (lpid >= nr_lpids) {
1529 pr_err("%s: No LPIDs free\n", __func__);
1530 return -ENOMEM;
1531 }
1532 } while (test_and_set_bit(lpid, lpid_inuse));
1533
1534 return lpid;
1535}
1536EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
1537
1538void kvmppc_claim_lpid(long lpid)
1539{
1540 set_bit(lpid, lpid_inuse);
1541}
1542EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
1543
1544void kvmppc_free_lpid(long lpid)
1545{
1546 clear_bit(lpid, lpid_inuse);
1547}
1548EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
1549
1550void kvmppc_init_lpid(unsigned long nr_lpids_param)
1551{
1552 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
1553 memset(lpid_inuse, 0, sizeof(lpid_inuse));
1554}
1555EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
1556
1557int kvm_arch_init(void *opaque)
1558{
1559 return 0;
1560}
1561
1562void kvm_arch_exit(void)
1563{
1564
1565}
1566
1567EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);
1568