1
2
3
4
5
6
7
8
9#include <linux/cpu.h>
10#include <linux/kvm_host.h>
11#include <linux/preempt.h>
12#include <linux/export.h>
13#include <linux/sched.h>
14#include <linux/spinlock.h>
15#include <linux/init.h>
16#include <linux/memblock.h>
17#include <linux/sizes.h>
18#include <linux/cma.h>
19#include <linux/bitops.h>
20
21#include <asm/asm-prototypes.h>
22#include <asm/cputable.h>
23#include <asm/kvm_ppc.h>
24#include <asm/kvm_book3s.h>
25#include <asm/archrandom.h>
26#include <asm/xics.h>
27#include <asm/xive.h>
28#include <asm/dbell.h>
29#include <asm/cputhreads.h>
30#include <asm/io.h>
31#include <asm/opal.h>
32#include <asm/smp.h>
33
34#define KVM_CMA_CHUNK_ORDER 18
35
36#include "book3s_xics.h"
37#include "book3s_xive.h"
38
39
40
41
42unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu);
43unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server);
44int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
45 unsigned long mfrr);
46int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr);
47int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr);
48EXPORT_SYMBOL_GPL(__xive_vm_h_xirr);
49EXPORT_SYMBOL_GPL(__xive_vm_h_ipoll);
50EXPORT_SYMBOL_GPL(__xive_vm_h_ipi);
51EXPORT_SYMBOL_GPL(__xive_vm_h_cppr);
52EXPORT_SYMBOL_GPL(__xive_vm_h_eoi);
53
54
55
56
57
58#define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT)
59
60
61
62static unsigned long kvm_cma_resv_ratio = 5;
63
64static struct cma *kvm_cma;
65
66static int __init early_parse_kvm_cma_resv(char *p)
67{
68 pr_debug("%s(%s)\n", __func__, p);
69 if (!p)
70 return -EINVAL;
71 return kstrtoul(p, 0, &kvm_cma_resv_ratio);
72}
73early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv);
74
75struct page *kvm_alloc_hpt_cma(unsigned long nr_pages)
76{
77 VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
78
79 return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES),
80 false);
81}
82EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma);
83
84void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages)
85{
86 cma_release(kvm_cma, page, nr_pages);
87}
88EXPORT_SYMBOL_GPL(kvm_free_hpt_cma);
89
90
91
92
93
94
95
96
97
98void __init kvm_cma_reserve(void)
99{
100 unsigned long align_size;
101 struct memblock_region *reg;
102 phys_addr_t selected_size = 0;
103
104
105
106
107 if (!cpu_has_feature(CPU_FTR_HVMODE))
108 return;
109
110
111
112
113 for_each_memblock(memory, reg)
114 selected_size += memblock_region_memory_end_pfn(reg) -
115 memblock_region_memory_base_pfn(reg);
116
117 selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT;
118 if (selected_size) {
119 pr_debug("%s: reserving %ld MiB for global area\n", __func__,
120 (unsigned long)selected_size / SZ_1M);
121 align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
122 cma_declare_contiguous(0, selected_size, 0, align_size,
123 KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, "kvm_cma",
124 &kvm_cma);
125 }
126}
127
128
129
130
131
132
133
134
135long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
136 unsigned int yield_count)
137{
138 struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
139 int ptid = local_paca->kvm_hstate.ptid;
140 int threads_running;
141 int threads_ceded;
142 int threads_conferring;
143 u64 stop = get_tb() + 10 * tb_ticks_per_usec;
144 int rv = H_SUCCESS;
145
146 set_bit(ptid, &vc->conferring_threads);
147 while ((get_tb() < stop) && !VCORE_IS_EXITING(vc)) {
148 threads_running = VCORE_ENTRY_MAP(vc);
149 threads_ceded = vc->napping_threads;
150 threads_conferring = vc->conferring_threads;
151 if ((threads_ceded | threads_conferring) == threads_running) {
152 rv = H_TOO_HARD;
153 break;
154 }
155 }
156 clear_bit(ptid, &vc->conferring_threads);
157 return rv;
158}
159
160
161
162
163
164
165
166
167static atomic_t hv_vm_count;
168
169void kvm_hv_vm_activated(void)
170{
171 get_online_cpus();
172 atomic_inc(&hv_vm_count);
173 put_online_cpus();
174}
175EXPORT_SYMBOL_GPL(kvm_hv_vm_activated);
176
177void kvm_hv_vm_deactivated(void)
178{
179 get_online_cpus();
180 atomic_dec(&hv_vm_count);
181 put_online_cpus();
182}
183EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated);
184
185bool kvm_hv_mode_active(void)
186{
187 return atomic_read(&hv_vm_count) != 0;
188}
189
190extern int hcall_real_table[], hcall_real_table_end[];
191
192int kvmppc_hcall_impl_hv_realmode(unsigned long cmd)
193{
194 cmd /= 4;
195 if (cmd < hcall_real_table_end - hcall_real_table &&
196 hcall_real_table[cmd])
197 return 1;
198
199 return 0;
200}
201EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode);
202
203int kvmppc_hwrng_present(void)
204{
205 return powernv_hwrng_present();
206}
207EXPORT_SYMBOL_GPL(kvmppc_hwrng_present);
208
209long kvmppc_h_random(struct kvm_vcpu *vcpu)
210{
211 int r;
212
213
214 if (kvm_is_radix(vcpu->kvm) && (mfmsr() & MSR_IR))
215 r = powernv_get_random_long(&vcpu->arch.regs.gpr[4]);
216 else
217 r = powernv_get_random_real_mode(&vcpu->arch.regs.gpr[4]);
218 if (r)
219 return H_SUCCESS;
220
221 return H_HARDWARE;
222}
223
224
225
226
227
228
229void kvmhv_rm_send_ipi(int cpu)
230{
231 void __iomem *xics_phys;
232 unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
233
234
235 if (kvmhv_on_pseries()) {
236 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
237
238 plpar_hcall_raw(H_IPI, retbuf, get_hard_smp_processor_id(cpu),
239 IPI_PRIORITY);
240 return;
241 }
242
243
244 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
245 msg |= get_hard_smp_processor_id(cpu);
246 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
247 return;
248 }
249
250
251 if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
252 cpu_first_thread_sibling(cpu) ==
253 cpu_first_thread_sibling(raw_smp_processor_id())) {
254 msg |= cpu_thread_in_core(cpu);
255 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
256 return;
257 }
258
259
260 if (WARN_ON_ONCE(xive_enabled()))
261 return;
262
263
264 xics_phys = paca_ptrs[cpu]->kvm_hstate.xics_phys;
265 if (xics_phys)
266 __raw_rm_writeb(IPI_PRIORITY, xics_phys + XICS_MFRR);
267 else
268 opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY);
269}
270
271
272
273
274
275static void kvmhv_interrupt_vcore(struct kvmppc_vcore *vc, int active)
276{
277 int cpu = vc->pcpu;
278
279
280 smp_mb();
281 for (; active; active >>= 1, ++cpu)
282 if (active & 1)
283 kvmhv_rm_send_ipi(cpu);
284}
285
286void kvmhv_commence_exit(int trap)
287{
288 struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
289 int ptid = local_paca->kvm_hstate.ptid;
290 struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode;
291 int me, ee, i, t;
292 int cpu0;
293
294
295
296 me = 0x100 << ptid;
297 do {
298 ee = vc->entry_exit_map;
299 } while (cmpxchg(&vc->entry_exit_map, ee, ee | me) != ee);
300
301
302 if ((ee >> 8) != 0)
303 return;
304
305
306
307
308
309
310 if (trap != BOOK3S_INTERRUPT_HV_DECREMENTER)
311 kvmhv_interrupt_vcore(vc, ee & ~(1 << ptid));
312
313
314
315
316
317 if (!sip)
318 return;
319
320 for (i = 0; i < MAX_SUBCORES; ++i) {
321 vc = sip->vc[i];
322 if (!vc)
323 break;
324 do {
325 ee = vc->entry_exit_map;
326
327 if ((ee >> 8) != 0)
328 break;
329 } while (cmpxchg(&vc->entry_exit_map, ee,
330 ee | VCORE_EXIT_REQ) != ee);
331 if ((ee >> 8) == 0)
332 kvmhv_interrupt_vcore(vc, ee);
333 }
334
335
336
337
338
339
340 if (sip->lpcr_req) {
341 if (cmpxchg(&sip->do_restore, 0, 1) == 0) {
342 vc = local_paca->kvm_hstate.kvm_vcore;
343 cpu0 = vc->pcpu + ptid - local_paca->kvm_hstate.tid;
344 for (t = 1; t < threads_per_core; ++t) {
345 if (sip->napped[t])
346 kvmhv_rm_send_ipi(cpu0 + t);
347 }
348 }
349 }
350}
351
352struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
353EXPORT_SYMBOL_GPL(kvmppc_host_rm_ops_hv);
354
355#ifdef CONFIG_KVM_XICS
356static struct kvmppc_irq_map *get_irqmap(struct kvmppc_passthru_irqmap *pimap,
357 u32 xisr)
358{
359 int i;
360
361
362
363
364
365
366
367
368
369
370
371 for (i = 0; i < pimap->n_mapped; i++) {
372 if (xisr == pimap->mapped[i].r_hwirq) {
373
374
375
376
377 smp_rmb();
378 return &pimap->mapped[i];
379 }
380 }
381 return NULL;
382}
383
384
385
386
387
388
389
390
391
392
393
394
395
396static int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again)
397{
398 struct kvmppc_passthru_irqmap *pimap;
399 struct kvmppc_irq_map *irq_map;
400 struct kvm_vcpu *vcpu;
401
402 vcpu = local_paca->kvm_hstate.kvm_vcpu;
403 if (!vcpu)
404 return 1;
405 pimap = kvmppc_get_passthru_irqmap(vcpu->kvm);
406 if (!pimap)
407 return 1;
408 irq_map = get_irqmap(pimap, xisr);
409 if (!irq_map)
410 return 1;
411
412
413 local_paca->kvm_hstate.saved_xirr = 0;
414
415 return kvmppc_deliver_irq_passthru(vcpu, xirr, irq_map, pimap, again);
416}
417
418#else
419static inline int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again)
420{
421 return 1;
422}
423#endif
424
425
426
427
428
429
430
431
432
433
434static long kvmppc_read_one_intr(bool *again);
435
436long kvmppc_read_intr(void)
437{
438 long ret = 0;
439 long rc;
440 bool again;
441
442 if (xive_enabled())
443 return 1;
444
445 do {
446 again = false;
447 rc = kvmppc_read_one_intr(&again);
448 if (rc && (ret == 0 || rc > ret))
449 ret = rc;
450 } while (again);
451 return ret;
452}
453
454static long kvmppc_read_one_intr(bool *again)
455{
456 void __iomem *xics_phys;
457 u32 h_xirr;
458 __be32 xirr;
459 u32 xisr;
460 u8 host_ipi;
461 int64_t rc;
462
463 if (xive_enabled())
464 return 1;
465
466
467 host_ipi = local_paca->kvm_hstate.host_ipi;
468 if (host_ipi)
469 return 1;
470
471
472 if (kvmhv_on_pseries()) {
473 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
474
475 rc = plpar_hcall_raw(H_XIRR, retbuf, 0xFF);
476 xirr = cpu_to_be32(retbuf[0]);
477 } else {
478 xics_phys = local_paca->kvm_hstate.xics_phys;
479 rc = 0;
480 if (!xics_phys)
481 rc = opal_int_get_xirr(&xirr, false);
482 else
483 xirr = __raw_rm_readl(xics_phys + XICS_XIRR);
484 }
485 if (rc < 0)
486 return 1;
487
488
489
490
491
492
493
494 h_xirr = be32_to_cpu(xirr);
495 local_paca->kvm_hstate.saved_xirr = h_xirr;
496 xisr = h_xirr & 0xffffff;
497
498
499
500
501 smp_mb();
502
503
504 if (!xisr)
505 return 0;
506
507
508
509
510
511 if (xisr == XICS_IPI) {
512 rc = 0;
513 if (kvmhv_on_pseries()) {
514 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
515
516 plpar_hcall_raw(H_IPI, retbuf,
517 hard_smp_processor_id(), 0xff);
518 plpar_hcall_raw(H_EOI, retbuf, h_xirr);
519 } else if (xics_phys) {
520 __raw_rm_writeb(0xff, xics_phys + XICS_MFRR);
521 __raw_rm_writel(xirr, xics_phys + XICS_XIRR);
522 } else {
523 opal_int_set_mfrr(hard_smp_processor_id(), 0xff);
524 rc = opal_int_eoi(h_xirr);
525 }
526
527 *again = rc > 0;
528
529
530
531
532
533 smp_mb();
534
535
536
537
538
539
540 host_ipi = local_paca->kvm_hstate.host_ipi;
541 if (unlikely(host_ipi != 0)) {
542
543
544
545 if (kvmhv_on_pseries()) {
546 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
547
548 plpar_hcall_raw(H_IPI, retbuf,
549 hard_smp_processor_id(),
550 IPI_PRIORITY);
551 } else if (xics_phys)
552 __raw_rm_writeb(IPI_PRIORITY,
553 xics_phys + XICS_MFRR);
554 else
555 opal_int_set_mfrr(hard_smp_processor_id(),
556 IPI_PRIORITY);
557
558 smp_mb();
559 return 1;
560 }
561
562
563 local_paca->kvm_hstate.saved_xirr = 0;
564 return -1;
565 }
566
567 return kvmppc_check_passthru(xisr, xirr, again);
568}
569
570#ifdef CONFIG_KVM_XICS
571static inline bool is_rm(void)
572{
573 return !(mfmsr() & MSR_DR);
574}
575
576unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
577{
578 if (!kvmppc_xics_enabled(vcpu))
579 return H_TOO_HARD;
580 if (xive_enabled()) {
581 if (is_rm())
582 return xive_rm_h_xirr(vcpu);
583 if (unlikely(!__xive_vm_h_xirr))
584 return H_NOT_AVAILABLE;
585 return __xive_vm_h_xirr(vcpu);
586 } else
587 return xics_rm_h_xirr(vcpu);
588}
589
590unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu)
591{
592 if (!kvmppc_xics_enabled(vcpu))
593 return H_TOO_HARD;
594 vcpu->arch.regs.gpr[5] = get_tb();
595 if (xive_enabled()) {
596 if (is_rm())
597 return xive_rm_h_xirr(vcpu);
598 if (unlikely(!__xive_vm_h_xirr))
599 return H_NOT_AVAILABLE;
600 return __xive_vm_h_xirr(vcpu);
601 } else
602 return xics_rm_h_xirr(vcpu);
603}
604
605unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
606{
607 if (!kvmppc_xics_enabled(vcpu))
608 return H_TOO_HARD;
609 if (xive_enabled()) {
610 if (is_rm())
611 return xive_rm_h_ipoll(vcpu, server);
612 if (unlikely(!__xive_vm_h_ipoll))
613 return H_NOT_AVAILABLE;
614 return __xive_vm_h_ipoll(vcpu, server);
615 } else
616 return H_TOO_HARD;
617}
618
619int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
620 unsigned long mfrr)
621{
622 if (!kvmppc_xics_enabled(vcpu))
623 return H_TOO_HARD;
624 if (xive_enabled()) {
625 if (is_rm())
626 return xive_rm_h_ipi(vcpu, server, mfrr);
627 if (unlikely(!__xive_vm_h_ipi))
628 return H_NOT_AVAILABLE;
629 return __xive_vm_h_ipi(vcpu, server, mfrr);
630 } else
631 return xics_rm_h_ipi(vcpu, server, mfrr);
632}
633
634int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
635{
636 if (!kvmppc_xics_enabled(vcpu))
637 return H_TOO_HARD;
638 if (xive_enabled()) {
639 if (is_rm())
640 return xive_rm_h_cppr(vcpu, cppr);
641 if (unlikely(!__xive_vm_h_cppr))
642 return H_NOT_AVAILABLE;
643 return __xive_vm_h_cppr(vcpu, cppr);
644 } else
645 return xics_rm_h_cppr(vcpu, cppr);
646}
647
648int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
649{
650 if (!kvmppc_xics_enabled(vcpu))
651 return H_TOO_HARD;
652 if (xive_enabled()) {
653 if (is_rm())
654 return xive_rm_h_eoi(vcpu, xirr);
655 if (unlikely(!__xive_vm_h_eoi))
656 return H_NOT_AVAILABLE;
657 return __xive_vm_h_eoi(vcpu, xirr);
658 } else
659 return xics_rm_h_eoi(vcpu, xirr);
660}
661#endif
662
663void kvmppc_bad_interrupt(struct pt_regs *regs)
664{
665
666
667
668
669 if (TRAP(regs) == 0x100) {
670 get_paca()->in_nmi++;
671 system_reset_exception(regs);
672 get_paca()->in_nmi--;
673 } else if (TRAP(regs) == 0x200) {
674 machine_check_exception(regs);
675 } else {
676 die("Bad interrupt in KVM entry/exit code", regs, SIGABRT);
677 }
678 panic("Bad KVM trap");
679}
680
681
682
683
684
685
686#define PHASE_REALMODE 1
687#define PHASE_SET_LPCR 2
688#define PHASE_OUT_OF_GUEST 4
689#define PHASE_RESET_LPCR 8
690
691#define ALL(p) (((p) << 24) | ((p) << 16) | ((p) << 8) | (p))
692
693static void wait_for_sync(struct kvm_split_mode *sip, int phase)
694{
695 int thr = local_paca->kvm_hstate.tid;
696
697 sip->lpcr_sync.phase[thr] |= phase;
698 phase = ALL(phase);
699 while ((sip->lpcr_sync.allphases & phase) != phase) {
700 HMT_low();
701 barrier();
702 }
703 HMT_medium();
704}
705
706void kvmhv_p9_set_lpcr(struct kvm_split_mode *sip)
707{
708 unsigned long rb, set;
709
710
711 wait_for_sync(sip, PHASE_REALMODE);
712
713
714 mtspr(SPRN_LPCR, sip->lpcr_req);
715 mtspr(SPRN_LPID, sip->lpidr_req);
716 isync();
717
718
719 if (local_paca->kvm_hstate.tid == 0) {
720 sip->do_set = 0;
721 asm volatile("ptesync" : : : "memory");
722 for (set = 0; set < POWER9_TLB_SETS_RADIX; ++set) {
723 rb = TLBIEL_INVAL_SET_LPID +
724 (set << TLBIEL_INVAL_SET_SHIFT);
725 asm volatile(PPC_TLBIEL(%0, %1, 0, 0, 0) : :
726 "r" (rb), "r" (0));
727 }
728 asm volatile("ptesync" : : : "memory");
729 }
730
731
732 wait_for_sync(sip, PHASE_SET_LPCR);
733
734 smp_rmb();
735}
736
737
738
739
740
741
742void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip)
743{
744
745 wait_for_sync(sip, PHASE_OUT_OF_GUEST);
746
747 mtspr(SPRN_LPID, 0);
748 mtspr(SPRN_LPCR, sip->host_lpcr);
749 isync();
750
751 if (local_paca->kvm_hstate.tid == 0) {
752 sip->do_restore = 0;
753 smp_wmb();
754 }
755
756 wait_for_sync(sip, PHASE_RESET_LPCR);
757 smp_mb();
758 local_paca->kvm_hstate.kvm_split_mode = NULL;
759}
760
761
762
763
764
765void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu)
766{
767 int ext;
768 unsigned long vec = 0;
769 unsigned long lpcr;
770
771
772 ext = (vcpu->arch.pending_exceptions >> BOOK3S_IRQPRIO_EXTERNAL) & 1;
773 lpcr = mfspr(SPRN_LPCR);
774 lpcr |= ext << LPCR_MER_SH;
775 mtspr(SPRN_LPCR, lpcr);
776 isync();
777
778 if (vcpu->arch.shregs.msr & MSR_EE) {
779 if (ext) {
780 vec = BOOK3S_INTERRUPT_EXTERNAL;
781 } else {
782 long int dec = mfspr(SPRN_DEC);
783 if (!(lpcr & LPCR_LD))
784 dec = (int) dec;
785 if (dec < 0)
786 vec = BOOK3S_INTERRUPT_DECREMENTER;
787 }
788 }
789 if (vec) {
790 unsigned long msr, old_msr = vcpu->arch.shregs.msr;
791
792 kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu));
793 kvmppc_set_srr1(vcpu, old_msr);
794 kvmppc_set_pc(vcpu, vec);
795 msr = vcpu->arch.intr_msr;
796 if (MSR_TM_ACTIVE(old_msr))
797 msr |= MSR_TS_S;
798 vcpu->arch.shregs.msr = msr;
799 }
800
801 if (vcpu->arch.doorbell_request) {
802 mtspr(SPRN_DPDES, 1);
803 vcpu->arch.vcore->dpdes = 1;
804 smp_wmb();
805 vcpu->arch.doorbell_request = 0;
806 }
807}
808