1
2
3
4
5
6
7
8
9#include <linux/cpu.h>
10#include <linux/kvm_host.h>
11#include <linux/preempt.h>
12#include <linux/export.h>
13#include <linux/sched.h>
14#include <linux/spinlock.h>
15#include <linux/init.h>
16#include <linux/memblock.h>
17#include <linux/sizes.h>
18#include <linux/cma.h>
19#include <linux/bitops.h>
20
21#include <asm/asm-prototypes.h>
22#include <asm/cputable.h>
23#include <asm/kvm_ppc.h>
24#include <asm/kvm_book3s.h>
25#include <asm/archrandom.h>
26#include <asm/xics.h>
27#include <asm/xive.h>
28#include <asm/dbell.h>
29#include <asm/cputhreads.h>
30#include <asm/io.h>
31#include <asm/opal.h>
32#include <asm/smp.h>
33
34#define KVM_CMA_CHUNK_ORDER 18
35
36#include "book3s_xics.h"
37#include "book3s_xive.h"
38
39
40
41
42unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu);
43unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server);
44int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
45 unsigned long mfrr);
46int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr);
47int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr);
48EXPORT_SYMBOL_GPL(__xive_vm_h_xirr);
49EXPORT_SYMBOL_GPL(__xive_vm_h_ipoll);
50EXPORT_SYMBOL_GPL(__xive_vm_h_ipi);
51EXPORT_SYMBOL_GPL(__xive_vm_h_cppr);
52EXPORT_SYMBOL_GPL(__xive_vm_h_eoi);
53
54
55
56
57
58#define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT)
59
60
61
62static unsigned long kvm_cma_resv_ratio = 5;
63
64static struct cma *kvm_cma;
65
66static int __init early_parse_kvm_cma_resv(char *p)
67{
68 pr_debug("%s(%s)\n", __func__, p);
69 if (!p)
70 return -EINVAL;
71 return kstrtoul(p, 0, &kvm_cma_resv_ratio);
72}
73early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv);
74
75struct page *kvm_alloc_hpt_cma(unsigned long nr_pages)
76{
77 VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
78
79 return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES),
80 false);
81}
82EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma);
83
84void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages)
85{
86 cma_release(kvm_cma, page, nr_pages);
87}
88EXPORT_SYMBOL_GPL(kvm_free_hpt_cma);
89
90
91
92
93
94
95
96
97
98void __init kvm_cma_reserve(void)
99{
100 unsigned long align_size;
101 struct memblock_region *reg;
102 phys_addr_t selected_size = 0;
103
104
105
106
107 if (!cpu_has_feature(CPU_FTR_HVMODE))
108 return;
109
110
111
112
113 for_each_memblock(memory, reg)
114 selected_size += memblock_region_memory_end_pfn(reg) -
115 memblock_region_memory_base_pfn(reg);
116
117 selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT;
118 if (selected_size) {
119 pr_debug("%s: reserving %ld MiB for global area\n", __func__,
120 (unsigned long)selected_size / SZ_1M);
121 align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
122 cma_declare_contiguous(0, selected_size, 0, align_size,
123 KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, "kvm_cma",
124 &kvm_cma);
125 }
126}
127
128
129
130
131
132
133
134
135long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
136 unsigned int yield_count)
137{
138 struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
139 int ptid = local_paca->kvm_hstate.ptid;
140 int threads_running;
141 int threads_ceded;
142 int threads_conferring;
143 u64 stop = get_tb() + 10 * tb_ticks_per_usec;
144 int rv = H_SUCCESS;
145
146 set_bit(ptid, &vc->conferring_threads);
147 while ((get_tb() < stop) && !VCORE_IS_EXITING(vc)) {
148 threads_running = VCORE_ENTRY_MAP(vc);
149 threads_ceded = vc->napping_threads;
150 threads_conferring = vc->conferring_threads;
151 if ((threads_ceded | threads_conferring) == threads_running) {
152 rv = H_TOO_HARD;
153 break;
154 }
155 }
156 clear_bit(ptid, &vc->conferring_threads);
157 return rv;
158}
159
160
161
162
163
164
165
166
167static atomic_t hv_vm_count;
168
169void kvm_hv_vm_activated(void)
170{
171 get_online_cpus();
172 atomic_inc(&hv_vm_count);
173 put_online_cpus();
174}
175EXPORT_SYMBOL_GPL(kvm_hv_vm_activated);
176
177void kvm_hv_vm_deactivated(void)
178{
179 get_online_cpus();
180 atomic_dec(&hv_vm_count);
181 put_online_cpus();
182}
183EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated);
184
185bool kvm_hv_mode_active(void)
186{
187 return atomic_read(&hv_vm_count) != 0;
188}
189
190extern int hcall_real_table[], hcall_real_table_end[];
191
192int kvmppc_hcall_impl_hv_realmode(unsigned long cmd)
193{
194 cmd /= 4;
195 if (cmd < hcall_real_table_end - hcall_real_table &&
196 hcall_real_table[cmd])
197 return 1;
198
199 return 0;
200}
201EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode);
202
203int kvmppc_hwrng_present(void)
204{
205 return powernv_hwrng_present();
206}
207EXPORT_SYMBOL_GPL(kvmppc_hwrng_present);
208
209long kvmppc_h_random(struct kvm_vcpu *vcpu)
210{
211 int r;
212
213
214 if (kvm_is_radix(vcpu->kvm) && (mfmsr() & MSR_IR))
215 r = powernv_get_random_long(&vcpu->arch.regs.gpr[4]);
216 else
217 r = powernv_get_random_real_mode(&vcpu->arch.regs.gpr[4]);
218 if (r)
219 return H_SUCCESS;
220
221 return H_HARDWARE;
222}
223
224
225
226
227
228
229void kvmhv_rm_send_ipi(int cpu)
230{
231 void __iomem *xics_phys;
232 unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
233
234
235 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
236 msg |= get_hard_smp_processor_id(cpu);
237 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
238 return;
239 }
240
241
242 if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
243 cpu_first_thread_sibling(cpu) ==
244 cpu_first_thread_sibling(raw_smp_processor_id())) {
245 msg |= cpu_thread_in_core(cpu);
246 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
247 return;
248 }
249
250
251 if (WARN_ON_ONCE(xive_enabled()))
252 return;
253
254
255 xics_phys = paca_ptrs[cpu]->kvm_hstate.xics_phys;
256 if (xics_phys)
257 __raw_rm_writeb(IPI_PRIORITY, xics_phys + XICS_MFRR);
258 else
259 opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY);
260}
261
262
263
264
265
266static void kvmhv_interrupt_vcore(struct kvmppc_vcore *vc, int active)
267{
268 int cpu = vc->pcpu;
269
270
271 smp_mb();
272 for (; active; active >>= 1, ++cpu)
273 if (active & 1)
274 kvmhv_rm_send_ipi(cpu);
275}
276
277void kvmhv_commence_exit(int trap)
278{
279 struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
280 int ptid = local_paca->kvm_hstate.ptid;
281 struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode;
282 int me, ee, i, t;
283 int cpu0;
284
285
286
287 me = 0x100 << ptid;
288 do {
289 ee = vc->entry_exit_map;
290 } while (cmpxchg(&vc->entry_exit_map, ee, ee | me) != ee);
291
292
293 if ((ee >> 8) != 0)
294 return;
295
296
297
298
299
300
301 if (trap != BOOK3S_INTERRUPT_HV_DECREMENTER)
302 kvmhv_interrupt_vcore(vc, ee & ~(1 << ptid));
303
304
305
306
307
308 if (!sip)
309 return;
310
311 for (i = 0; i < MAX_SUBCORES; ++i) {
312 vc = sip->vc[i];
313 if (!vc)
314 break;
315 do {
316 ee = vc->entry_exit_map;
317
318 if ((ee >> 8) != 0)
319 break;
320 } while (cmpxchg(&vc->entry_exit_map, ee,
321 ee | VCORE_EXIT_REQ) != ee);
322 if ((ee >> 8) == 0)
323 kvmhv_interrupt_vcore(vc, ee);
324 }
325
326
327
328
329
330
331 if (sip->lpcr_req) {
332 if (cmpxchg(&sip->do_restore, 0, 1) == 0) {
333 vc = local_paca->kvm_hstate.kvm_vcore;
334 cpu0 = vc->pcpu + ptid - local_paca->kvm_hstate.tid;
335 for (t = 1; t < threads_per_core; ++t) {
336 if (sip->napped[t])
337 kvmhv_rm_send_ipi(cpu0 + t);
338 }
339 }
340 }
341}
342
343struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
344EXPORT_SYMBOL_GPL(kvmppc_host_rm_ops_hv);
345
346#ifdef CONFIG_KVM_XICS
347static struct kvmppc_irq_map *get_irqmap(struct kvmppc_passthru_irqmap *pimap,
348 u32 xisr)
349{
350 int i;
351
352
353
354
355
356
357
358
359
360
361
362 for (i = 0; i < pimap->n_mapped; i++) {
363 if (xisr == pimap->mapped[i].r_hwirq) {
364
365
366
367
368 smp_rmb();
369 return &pimap->mapped[i];
370 }
371 }
372 return NULL;
373}
374
375
376
377
378
379
380
381
382
383
384
385
386
387static int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again)
388{
389 struct kvmppc_passthru_irqmap *pimap;
390 struct kvmppc_irq_map *irq_map;
391 struct kvm_vcpu *vcpu;
392
393 vcpu = local_paca->kvm_hstate.kvm_vcpu;
394 if (!vcpu)
395 return 1;
396 pimap = kvmppc_get_passthru_irqmap(vcpu->kvm);
397 if (!pimap)
398 return 1;
399 irq_map = get_irqmap(pimap, xisr);
400 if (!irq_map)
401 return 1;
402
403
404 local_paca->kvm_hstate.saved_xirr = 0;
405
406 return kvmppc_deliver_irq_passthru(vcpu, xirr, irq_map, pimap, again);
407}
408
409#else
410static inline int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again)
411{
412 return 1;
413}
414#endif
415
416
417
418
419
420
421
422
423
424
425static long kvmppc_read_one_intr(bool *again);
426
427long kvmppc_read_intr(void)
428{
429 long ret = 0;
430 long rc;
431 bool again;
432
433 if (xive_enabled())
434 return 1;
435
436 do {
437 again = false;
438 rc = kvmppc_read_one_intr(&again);
439 if (rc && (ret == 0 || rc > ret))
440 ret = rc;
441 } while (again);
442 return ret;
443}
444
445static long kvmppc_read_one_intr(bool *again)
446{
447 void __iomem *xics_phys;
448 u32 h_xirr;
449 __be32 xirr;
450 u32 xisr;
451 u8 host_ipi;
452 int64_t rc;
453
454 if (xive_enabled())
455 return 1;
456
457
458 host_ipi = local_paca->kvm_hstate.host_ipi;
459 if (host_ipi)
460 return 1;
461
462
463 xics_phys = local_paca->kvm_hstate.xics_phys;
464 rc = 0;
465 if (!xics_phys)
466 rc = opal_int_get_xirr(&xirr, false);
467 else
468 xirr = __raw_rm_readl(xics_phys + XICS_XIRR);
469 if (rc < 0)
470 return 1;
471
472
473
474
475
476
477
478 h_xirr = be32_to_cpu(xirr);
479 local_paca->kvm_hstate.saved_xirr = h_xirr;
480 xisr = h_xirr & 0xffffff;
481
482
483
484
485 smp_mb();
486
487
488 if (!xisr)
489 return 0;
490
491
492
493
494
495 if (xisr == XICS_IPI) {
496 rc = 0;
497 if (xics_phys) {
498 __raw_rm_writeb(0xff, xics_phys + XICS_MFRR);
499 __raw_rm_writel(xirr, xics_phys + XICS_XIRR);
500 } else {
501 opal_int_set_mfrr(hard_smp_processor_id(), 0xff);
502 rc = opal_int_eoi(h_xirr);
503 }
504
505 *again = rc > 0;
506
507
508
509
510
511 smp_mb();
512
513
514
515
516
517
518 host_ipi = local_paca->kvm_hstate.host_ipi;
519 if (unlikely(host_ipi != 0)) {
520
521
522
523 if (xics_phys)
524 __raw_rm_writeb(IPI_PRIORITY,
525 xics_phys + XICS_MFRR);
526 else
527 opal_int_set_mfrr(hard_smp_processor_id(),
528 IPI_PRIORITY);
529
530 smp_mb();
531 return 1;
532 }
533
534
535 local_paca->kvm_hstate.saved_xirr = 0;
536 return -1;
537 }
538
539 return kvmppc_check_passthru(xisr, xirr, again);
540}
541
542#ifdef CONFIG_KVM_XICS
543static inline bool is_rm(void)
544{
545 return !(mfmsr() & MSR_DR);
546}
547
548unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
549{
550 if (!kvmppc_xics_enabled(vcpu))
551 return H_TOO_HARD;
552 if (xive_enabled()) {
553 if (is_rm())
554 return xive_rm_h_xirr(vcpu);
555 if (unlikely(!__xive_vm_h_xirr))
556 return H_NOT_AVAILABLE;
557 return __xive_vm_h_xirr(vcpu);
558 } else
559 return xics_rm_h_xirr(vcpu);
560}
561
562unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu)
563{
564 if (!kvmppc_xics_enabled(vcpu))
565 return H_TOO_HARD;
566 vcpu->arch.regs.gpr[5] = get_tb();
567 if (xive_enabled()) {
568 if (is_rm())
569 return xive_rm_h_xirr(vcpu);
570 if (unlikely(!__xive_vm_h_xirr))
571 return H_NOT_AVAILABLE;
572 return __xive_vm_h_xirr(vcpu);
573 } else
574 return xics_rm_h_xirr(vcpu);
575}
576
577unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
578{
579 if (!kvmppc_xics_enabled(vcpu))
580 return H_TOO_HARD;
581 if (xive_enabled()) {
582 if (is_rm())
583 return xive_rm_h_ipoll(vcpu, server);
584 if (unlikely(!__xive_vm_h_ipoll))
585 return H_NOT_AVAILABLE;
586 return __xive_vm_h_ipoll(vcpu, server);
587 } else
588 return H_TOO_HARD;
589}
590
591int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
592 unsigned long mfrr)
593{
594 if (!kvmppc_xics_enabled(vcpu))
595 return H_TOO_HARD;
596 if (xive_enabled()) {
597 if (is_rm())
598 return xive_rm_h_ipi(vcpu, server, mfrr);
599 if (unlikely(!__xive_vm_h_ipi))
600 return H_NOT_AVAILABLE;
601 return __xive_vm_h_ipi(vcpu, server, mfrr);
602 } else
603 return xics_rm_h_ipi(vcpu, server, mfrr);
604}
605
606int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
607{
608 if (!kvmppc_xics_enabled(vcpu))
609 return H_TOO_HARD;
610 if (xive_enabled()) {
611 if (is_rm())
612 return xive_rm_h_cppr(vcpu, cppr);
613 if (unlikely(!__xive_vm_h_cppr))
614 return H_NOT_AVAILABLE;
615 return __xive_vm_h_cppr(vcpu, cppr);
616 } else
617 return xics_rm_h_cppr(vcpu, cppr);
618}
619
620int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
621{
622 if (!kvmppc_xics_enabled(vcpu))
623 return H_TOO_HARD;
624 if (xive_enabled()) {
625 if (is_rm())
626 return xive_rm_h_eoi(vcpu, xirr);
627 if (unlikely(!__xive_vm_h_eoi))
628 return H_NOT_AVAILABLE;
629 return __xive_vm_h_eoi(vcpu, xirr);
630 } else
631 return xics_rm_h_eoi(vcpu, xirr);
632}
633#endif
634
635void kvmppc_bad_interrupt(struct pt_regs *regs)
636{
637
638
639
640
641 if (TRAP(regs) == 0x100) {
642 get_paca()->in_nmi++;
643 system_reset_exception(regs);
644 get_paca()->in_nmi--;
645 } else if (TRAP(regs) == 0x200) {
646 machine_check_exception(regs);
647 } else {
648 die("Bad interrupt in KVM entry/exit code", regs, SIGABRT);
649 }
650 panic("Bad KVM trap");
651}
652
653
654
655
656
657
658#define PHASE_REALMODE 1
659#define PHASE_SET_LPCR 2
660#define PHASE_OUT_OF_GUEST 4
661#define PHASE_RESET_LPCR 8
662
663#define ALL(p) (((p) << 24) | ((p) << 16) | ((p) << 8) | (p))
664
665static void wait_for_sync(struct kvm_split_mode *sip, int phase)
666{
667 int thr = local_paca->kvm_hstate.tid;
668
669 sip->lpcr_sync.phase[thr] |= phase;
670 phase = ALL(phase);
671 while ((sip->lpcr_sync.allphases & phase) != phase) {
672 HMT_low();
673 barrier();
674 }
675 HMT_medium();
676}
677
678void kvmhv_p9_set_lpcr(struct kvm_split_mode *sip)
679{
680 unsigned long rb, set;
681
682
683 wait_for_sync(sip, PHASE_REALMODE);
684
685
686 mtspr(SPRN_LPCR, sip->lpcr_req);
687 mtspr(SPRN_LPID, sip->lpidr_req);
688 isync();
689
690
691 if (local_paca->kvm_hstate.tid == 0) {
692 sip->do_set = 0;
693 asm volatile("ptesync" : : : "memory");
694 for (set = 0; set < POWER9_TLB_SETS_RADIX; ++set) {
695 rb = TLBIEL_INVAL_SET_LPID +
696 (set << TLBIEL_INVAL_SET_SHIFT);
697 asm volatile(PPC_TLBIEL(%0, %1, 0, 0, 0) : :
698 "r" (rb), "r" (0));
699 }
700 asm volatile("ptesync" : : : "memory");
701 }
702
703
704 wait_for_sync(sip, PHASE_SET_LPCR);
705
706 smp_rmb();
707}
708
709
710
711
712
713
714void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip)
715{
716
717 wait_for_sync(sip, PHASE_OUT_OF_GUEST);
718
719 mtspr(SPRN_LPID, 0);
720 mtspr(SPRN_LPCR, sip->host_lpcr);
721 isync();
722
723 if (local_paca->kvm_hstate.tid == 0) {
724 sip->do_restore = 0;
725 smp_wmb();
726 }
727
728 wait_for_sync(sip, PHASE_RESET_LPCR);
729 smp_mb();
730 local_paca->kvm_hstate.kvm_split_mode = NULL;
731}
732