1
2
3
4
5
6#include <linux/cpu.h>
7#include <linux/kvm_host.h>
8#include <linux/preempt.h>
9#include <linux/export.h>
10#include <linux/sched.h>
11#include <linux/spinlock.h>
12#include <linux/init.h>
13#include <linux/memblock.h>
14#include <linux/sizes.h>
15#include <linux/cma.h>
16#include <linux/bitops.h>
17
18#include <asm/asm-prototypes.h>
19#include <asm/cputable.h>
20#include <asm/kvm_ppc.h>
21#include <asm/kvm_book3s.h>
22#include <asm/archrandom.h>
23#include <asm/xics.h>
24#include <asm/xive.h>
25#include <asm/dbell.h>
26#include <asm/cputhreads.h>
27#include <asm/io.h>
28#include <asm/opal.h>
29#include <asm/smp.h>
30
31#define KVM_CMA_CHUNK_ORDER 18
32
33#include "book3s_xics.h"
34#include "book3s_xive.h"
35
36
37
38
39unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu);
40unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server);
41int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
42 unsigned long mfrr);
43int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr);
44int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr);
45EXPORT_SYMBOL_GPL(__xive_vm_h_xirr);
46EXPORT_SYMBOL_GPL(__xive_vm_h_ipoll);
47EXPORT_SYMBOL_GPL(__xive_vm_h_ipi);
48EXPORT_SYMBOL_GPL(__xive_vm_h_cppr);
49EXPORT_SYMBOL_GPL(__xive_vm_h_eoi);
50
51
52
53
54
55#define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT)
56
57
58
59static unsigned long kvm_cma_resv_ratio = 5;
60
61static struct cma *kvm_cma;
62
63static int __init early_parse_kvm_cma_resv(char *p)
64{
65 pr_debug("%s(%s)\n", __func__, p);
66 if (!p)
67 return -EINVAL;
68 return kstrtoul(p, 0, &kvm_cma_resv_ratio);
69}
70early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv);
71
72struct page *kvm_alloc_hpt_cma(unsigned long nr_pages)
73{
74 VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
75
76 return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES),
77 false);
78}
79EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma);
80
81void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages)
82{
83 cma_release(kvm_cma, page, nr_pages);
84}
85EXPORT_SYMBOL_GPL(kvm_free_hpt_cma);
86
87
88
89
90
91
92
93
94
95void __init kvm_cma_reserve(void)
96{
97 unsigned long align_size;
98 phys_addr_t selected_size;
99
100
101
102
103 if (!cpu_has_feature(CPU_FTR_HVMODE))
104 return;
105
106 selected_size = PAGE_ALIGN(memblock_phys_mem_size() * kvm_cma_resv_ratio / 100);
107 if (selected_size) {
108 pr_info("%s: reserving %ld MiB for global area\n", __func__,
109 (unsigned long)selected_size / SZ_1M);
110 align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
111 cma_declare_contiguous(0, selected_size, 0, align_size,
112 KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, "kvm_cma",
113 &kvm_cma);
114 }
115}
116
117
118
119
120
121
122
123
124long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
125 unsigned int yield_count)
126{
127 struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
128 int ptid = local_paca->kvm_hstate.ptid;
129 int threads_running;
130 int threads_ceded;
131 int threads_conferring;
132 u64 stop = get_tb() + 10 * tb_ticks_per_usec;
133 int rv = H_SUCCESS;
134
135 set_bit(ptid, &vc->conferring_threads);
136 while ((get_tb() < stop) && !VCORE_IS_EXITING(vc)) {
137 threads_running = VCORE_ENTRY_MAP(vc);
138 threads_ceded = vc->napping_threads;
139 threads_conferring = vc->conferring_threads;
140 if ((threads_ceded | threads_conferring) == threads_running) {
141 rv = H_TOO_HARD;
142 break;
143 }
144 }
145 clear_bit(ptid, &vc->conferring_threads);
146 return rv;
147}
148
149
150
151
152
153
154
155
156static atomic_t hv_vm_count;
157
158void kvm_hv_vm_activated(void)
159{
160 get_online_cpus();
161 atomic_inc(&hv_vm_count);
162 put_online_cpus();
163}
164EXPORT_SYMBOL_GPL(kvm_hv_vm_activated);
165
166void kvm_hv_vm_deactivated(void)
167{
168 get_online_cpus();
169 atomic_dec(&hv_vm_count);
170 put_online_cpus();
171}
172EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated);
173
174bool kvm_hv_mode_active(void)
175{
176 return atomic_read(&hv_vm_count) != 0;
177}
178
179extern int hcall_real_table[], hcall_real_table_end[];
180
181int kvmppc_hcall_impl_hv_realmode(unsigned long cmd)
182{
183 cmd /= 4;
184 if (cmd < hcall_real_table_end - hcall_real_table &&
185 hcall_real_table[cmd])
186 return 1;
187
188 return 0;
189}
190EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode);
191
192int kvmppc_hwrng_present(void)
193{
194 return powernv_hwrng_present();
195}
196EXPORT_SYMBOL_GPL(kvmppc_hwrng_present);
197
198long kvmppc_h_random(struct kvm_vcpu *vcpu)
199{
200 int r;
201
202
203 if (kvm_is_radix(vcpu->kvm) && (mfmsr() & MSR_IR))
204 r = powernv_get_random_long(&vcpu->arch.regs.gpr[4]);
205 else
206 r = powernv_get_random_real_mode(&vcpu->arch.regs.gpr[4]);
207 if (r)
208 return H_SUCCESS;
209
210 return H_HARDWARE;
211}
212
213
214
215
216
217
218void kvmhv_rm_send_ipi(int cpu)
219{
220 void __iomem *xics_phys;
221 unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
222
223
224 if (kvmhv_on_pseries()) {
225 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
226
227 plpar_hcall_raw(H_IPI, retbuf, get_hard_smp_processor_id(cpu),
228 IPI_PRIORITY);
229 return;
230 }
231
232
233 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
234 msg |= get_hard_smp_processor_id(cpu);
235 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
236 return;
237 }
238
239
240 if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
241 cpu_first_thread_sibling(cpu) ==
242 cpu_first_thread_sibling(raw_smp_processor_id())) {
243 msg |= cpu_thread_in_core(cpu);
244 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
245 return;
246 }
247
248
249 if (WARN_ON_ONCE(xics_on_xive()))
250 return;
251
252
253 xics_phys = paca_ptrs[cpu]->kvm_hstate.xics_phys;
254 if (xics_phys)
255 __raw_rm_writeb(IPI_PRIORITY, xics_phys + XICS_MFRR);
256 else
257 opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY);
258}
259
260
261
262
263
264static void kvmhv_interrupt_vcore(struct kvmppc_vcore *vc, int active)
265{
266 int cpu = vc->pcpu;
267
268
269 smp_mb();
270 for (; active; active >>= 1, ++cpu)
271 if (active & 1)
272 kvmhv_rm_send_ipi(cpu);
273}
274
275void kvmhv_commence_exit(int trap)
276{
277 struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
278 int ptid = local_paca->kvm_hstate.ptid;
279 struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode;
280 int me, ee, i, t;
281 int cpu0;
282
283
284
285 me = 0x100 << ptid;
286 do {
287 ee = vc->entry_exit_map;
288 } while (cmpxchg(&vc->entry_exit_map, ee, ee | me) != ee);
289
290
291 if ((ee >> 8) != 0)
292 return;
293
294
295
296
297
298
299 if (trap != BOOK3S_INTERRUPT_HV_DECREMENTER)
300 kvmhv_interrupt_vcore(vc, ee & ~(1 << ptid));
301
302
303
304
305
306 if (!sip)
307 return;
308
309 for (i = 0; i < MAX_SUBCORES; ++i) {
310 vc = sip->vc[i];
311 if (!vc)
312 break;
313 do {
314 ee = vc->entry_exit_map;
315
316 if ((ee >> 8) != 0)
317 break;
318 } while (cmpxchg(&vc->entry_exit_map, ee,
319 ee | VCORE_EXIT_REQ) != ee);
320 if ((ee >> 8) == 0)
321 kvmhv_interrupt_vcore(vc, ee);
322 }
323
324
325
326
327
328
329 if (sip->lpcr_req) {
330 if (cmpxchg(&sip->do_restore, 0, 1) == 0) {
331 vc = local_paca->kvm_hstate.kvm_vcore;
332 cpu0 = vc->pcpu + ptid - local_paca->kvm_hstate.tid;
333 for (t = 1; t < threads_per_core; ++t) {
334 if (sip->napped[t])
335 kvmhv_rm_send_ipi(cpu0 + t);
336 }
337 }
338 }
339}
340
341struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
342EXPORT_SYMBOL_GPL(kvmppc_host_rm_ops_hv);
343
344#ifdef CONFIG_KVM_XICS
345static struct kvmppc_irq_map *get_irqmap(struct kvmppc_passthru_irqmap *pimap,
346 u32 xisr)
347{
348 int i;
349
350
351
352
353
354
355
356
357
358
359
360 for (i = 0; i < pimap->n_mapped; i++) {
361 if (xisr == pimap->mapped[i].r_hwirq) {
362
363
364
365
366 smp_rmb();
367 return &pimap->mapped[i];
368 }
369 }
370 return NULL;
371}
372
373
374
375
376
377
378
379
380
381
382
383
384
385static int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again)
386{
387 struct kvmppc_passthru_irqmap *pimap;
388 struct kvmppc_irq_map *irq_map;
389 struct kvm_vcpu *vcpu;
390
391 vcpu = local_paca->kvm_hstate.kvm_vcpu;
392 if (!vcpu)
393 return 1;
394 pimap = kvmppc_get_passthru_irqmap(vcpu->kvm);
395 if (!pimap)
396 return 1;
397 irq_map = get_irqmap(pimap, xisr);
398 if (!irq_map)
399 return 1;
400
401
402 local_paca->kvm_hstate.saved_xirr = 0;
403
404 return kvmppc_deliver_irq_passthru(vcpu, xirr, irq_map, pimap, again);
405}
406
407#else
408static inline int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again)
409{
410 return 1;
411}
412#endif
413
414
415
416
417
418
419
420
421
422
423static long kvmppc_read_one_intr(bool *again);
424
425long kvmppc_read_intr(void)
426{
427 long ret = 0;
428 long rc;
429 bool again;
430
431 if (xive_enabled())
432 return 1;
433
434 do {
435 again = false;
436 rc = kvmppc_read_one_intr(&again);
437 if (rc && (ret == 0 || rc > ret))
438 ret = rc;
439 } while (again);
440 return ret;
441}
442
443static long kvmppc_read_one_intr(bool *again)
444{
445 void __iomem *xics_phys;
446 u32 h_xirr;
447 __be32 xirr;
448 u32 xisr;
449 u8 host_ipi;
450 int64_t rc;
451
452 if (xive_enabled())
453 return 1;
454
455
456 host_ipi = local_paca->kvm_hstate.host_ipi;
457 if (host_ipi)
458 return 1;
459
460
461 if (kvmhv_on_pseries()) {
462 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
463
464 rc = plpar_hcall_raw(H_XIRR, retbuf, 0xFF);
465 xirr = cpu_to_be32(retbuf[0]);
466 } else {
467 xics_phys = local_paca->kvm_hstate.xics_phys;
468 rc = 0;
469 if (!xics_phys)
470 rc = opal_int_get_xirr(&xirr, false);
471 else
472 xirr = __raw_rm_readl(xics_phys + XICS_XIRR);
473 }
474 if (rc < 0)
475 return 1;
476
477
478
479
480
481
482
483 h_xirr = be32_to_cpu(xirr);
484 local_paca->kvm_hstate.saved_xirr = h_xirr;
485 xisr = h_xirr & 0xffffff;
486
487
488
489
490 smp_mb();
491
492
493 if (!xisr)
494 return 0;
495
496
497
498
499
500 if (xisr == XICS_IPI) {
501 rc = 0;
502 if (kvmhv_on_pseries()) {
503 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
504
505 plpar_hcall_raw(H_IPI, retbuf,
506 hard_smp_processor_id(), 0xff);
507 plpar_hcall_raw(H_EOI, retbuf, h_xirr);
508 } else if (xics_phys) {
509 __raw_rm_writeb(0xff, xics_phys + XICS_MFRR);
510 __raw_rm_writel(xirr, xics_phys + XICS_XIRR);
511 } else {
512 opal_int_set_mfrr(hard_smp_processor_id(), 0xff);
513 rc = opal_int_eoi(h_xirr);
514 }
515
516 *again = rc > 0;
517
518
519
520
521
522 smp_mb();
523
524
525
526
527
528
529 host_ipi = local_paca->kvm_hstate.host_ipi;
530 if (unlikely(host_ipi != 0)) {
531
532
533
534 if (kvmhv_on_pseries()) {
535 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
536
537 plpar_hcall_raw(H_IPI, retbuf,
538 hard_smp_processor_id(),
539 IPI_PRIORITY);
540 } else if (xics_phys)
541 __raw_rm_writeb(IPI_PRIORITY,
542 xics_phys + XICS_MFRR);
543 else
544 opal_int_set_mfrr(hard_smp_processor_id(),
545 IPI_PRIORITY);
546
547 smp_mb();
548 return 1;
549 }
550
551
552 local_paca->kvm_hstate.saved_xirr = 0;
553 return -1;
554 }
555
556 return kvmppc_check_passthru(xisr, xirr, again);
557}
558
559#ifdef CONFIG_KVM_XICS
560static inline bool is_rm(void)
561{
562 return !(mfmsr() & MSR_DR);
563}
564
565unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
566{
567 if (!kvmppc_xics_enabled(vcpu))
568 return H_TOO_HARD;
569 if (xics_on_xive()) {
570 if (is_rm())
571 return xive_rm_h_xirr(vcpu);
572 if (unlikely(!__xive_vm_h_xirr))
573 return H_NOT_AVAILABLE;
574 return __xive_vm_h_xirr(vcpu);
575 } else
576 return xics_rm_h_xirr(vcpu);
577}
578
579unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu)
580{
581 if (!kvmppc_xics_enabled(vcpu))
582 return H_TOO_HARD;
583 vcpu->arch.regs.gpr[5] = get_tb();
584 if (xics_on_xive()) {
585 if (is_rm())
586 return xive_rm_h_xirr(vcpu);
587 if (unlikely(!__xive_vm_h_xirr))
588 return H_NOT_AVAILABLE;
589 return __xive_vm_h_xirr(vcpu);
590 } else
591 return xics_rm_h_xirr(vcpu);
592}
593
594unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
595{
596 if (!kvmppc_xics_enabled(vcpu))
597 return H_TOO_HARD;
598 if (xics_on_xive()) {
599 if (is_rm())
600 return xive_rm_h_ipoll(vcpu, server);
601 if (unlikely(!__xive_vm_h_ipoll))
602 return H_NOT_AVAILABLE;
603 return __xive_vm_h_ipoll(vcpu, server);
604 } else
605 return H_TOO_HARD;
606}
607
608int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
609 unsigned long mfrr)
610{
611 if (!kvmppc_xics_enabled(vcpu))
612 return H_TOO_HARD;
613 if (xics_on_xive()) {
614 if (is_rm())
615 return xive_rm_h_ipi(vcpu, server, mfrr);
616 if (unlikely(!__xive_vm_h_ipi))
617 return H_NOT_AVAILABLE;
618 return __xive_vm_h_ipi(vcpu, server, mfrr);
619 } else
620 return xics_rm_h_ipi(vcpu, server, mfrr);
621}
622
623int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
624{
625 if (!kvmppc_xics_enabled(vcpu))
626 return H_TOO_HARD;
627 if (xics_on_xive()) {
628 if (is_rm())
629 return xive_rm_h_cppr(vcpu, cppr);
630 if (unlikely(!__xive_vm_h_cppr))
631 return H_NOT_AVAILABLE;
632 return __xive_vm_h_cppr(vcpu, cppr);
633 } else
634 return xics_rm_h_cppr(vcpu, cppr);
635}
636
637int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
638{
639 if (!kvmppc_xics_enabled(vcpu))
640 return H_TOO_HARD;
641 if (xics_on_xive()) {
642 if (is_rm())
643 return xive_rm_h_eoi(vcpu, xirr);
644 if (unlikely(!__xive_vm_h_eoi))
645 return H_NOT_AVAILABLE;
646 return __xive_vm_h_eoi(vcpu, xirr);
647 } else
648 return xics_rm_h_eoi(vcpu, xirr);
649}
650#endif
651
652void kvmppc_bad_interrupt(struct pt_regs *regs)
653{
654
655
656
657
658 if (TRAP(regs) == 0x100) {
659 get_paca()->in_nmi++;
660 system_reset_exception(regs);
661 get_paca()->in_nmi--;
662 } else if (TRAP(regs) == 0x200) {
663 machine_check_exception(regs);
664 } else {
665 die("Bad interrupt in KVM entry/exit code", regs, SIGABRT);
666 }
667 panic("Bad KVM trap");
668}
669
670
671
672
673
674
675#define PHASE_REALMODE 1
676#define PHASE_SET_LPCR 2
677#define PHASE_OUT_OF_GUEST 4
678#define PHASE_RESET_LPCR 8
679
680#define ALL(p) (((p) << 24) | ((p) << 16) | ((p) << 8) | (p))
681
682static void wait_for_sync(struct kvm_split_mode *sip, int phase)
683{
684 int thr = local_paca->kvm_hstate.tid;
685
686 sip->lpcr_sync.phase[thr] |= phase;
687 phase = ALL(phase);
688 while ((sip->lpcr_sync.allphases & phase) != phase) {
689 HMT_low();
690 barrier();
691 }
692 HMT_medium();
693}
694
695void kvmhv_p9_set_lpcr(struct kvm_split_mode *sip)
696{
697 unsigned long rb, set;
698
699
700 wait_for_sync(sip, PHASE_REALMODE);
701
702
703 mtspr(SPRN_LPCR, sip->lpcr_req);
704 mtspr(SPRN_LPID, sip->lpidr_req);
705 isync();
706
707
708 if (local_paca->kvm_hstate.tid == 0) {
709 sip->do_set = 0;
710 asm volatile("ptesync" : : : "memory");
711 for (set = 0; set < POWER9_TLB_SETS_RADIX; ++set) {
712 rb = TLBIEL_INVAL_SET_LPID +
713 (set << TLBIEL_INVAL_SET_SHIFT);
714 asm volatile(PPC_TLBIEL(%0, %1, 0, 0, 0) : :
715 "r" (rb), "r" (0));
716 }
717 asm volatile("ptesync" : : : "memory");
718 }
719
720
721 wait_for_sync(sip, PHASE_SET_LPCR);
722
723 smp_rmb();
724}
725
726
727
728
729
730
731void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip)
732{
733
734 wait_for_sync(sip, PHASE_OUT_OF_GUEST);
735
736 mtspr(SPRN_LPID, 0);
737 mtspr(SPRN_LPCR, sip->host_lpcr);
738 isync();
739
740 if (local_paca->kvm_hstate.tid == 0) {
741 sip->do_restore = 0;
742 smp_wmb();
743 }
744
745 wait_for_sync(sip, PHASE_RESET_LPCR);
746 smp_mb();
747 local_paca->kvm_hstate.kvm_split_mode = NULL;
748}
749
750static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
751{
752 vcpu->arch.ceded = 0;
753 if (vcpu->arch.timer_running) {
754 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
755 vcpu->arch.timer_running = 0;
756 }
757}
758
759void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
760{
761
762
763
764
765 if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
766 msr &= ~MSR_TS_MASK;
767 vcpu->arch.shregs.msr = msr;
768 kvmppc_end_cede(vcpu);
769}
770EXPORT_SYMBOL_GPL(kvmppc_set_msr_hv);
771
772static void inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
773{
774 unsigned long msr, pc, new_msr, new_pc;
775
776 msr = kvmppc_get_msr(vcpu);
777 pc = kvmppc_get_pc(vcpu);
778 new_msr = vcpu->arch.intr_msr;
779 new_pc = vec;
780
781
782 if (MSR_TM_TRANSACTIONAL(msr))
783 new_msr |= MSR_TS_S;
784 else
785 new_msr |= msr & MSR_TS_MASK;
786
787
788
789
790
791
792
793
794 if (vec != BOOK3S_INTERRUPT_SYSTEM_RESET &&
795 vec != BOOK3S_INTERRUPT_MACHINE_CHECK &&
796 (vcpu->arch.vcore->lpcr & LPCR_AIL) == LPCR_AIL_3 &&
797 (msr & (MSR_IR|MSR_DR)) == (MSR_IR|MSR_DR) ) {
798 new_msr |= MSR_IR | MSR_DR;
799 new_pc += 0xC000000000004000ULL;
800 }
801
802 kvmppc_set_srr0(vcpu, pc);
803 kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags);
804 kvmppc_set_pc(vcpu, new_pc);
805 vcpu->arch.shregs.msr = new_msr;
806}
807
808void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
809{
810 inject_interrupt(vcpu, vec, srr1_flags);
811 kvmppc_end_cede(vcpu);
812}
813EXPORT_SYMBOL_GPL(kvmppc_inject_interrupt_hv);
814
815
816
817
818
819void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu)
820{
821 int ext;
822 unsigned long lpcr;
823
824
825 ext = (vcpu->arch.pending_exceptions >> BOOK3S_IRQPRIO_EXTERNAL) & 1;
826 lpcr = mfspr(SPRN_LPCR);
827 lpcr |= ext << LPCR_MER_SH;
828 mtspr(SPRN_LPCR, lpcr);
829 isync();
830
831 if (vcpu->arch.shregs.msr & MSR_EE) {
832 if (ext) {
833 inject_interrupt(vcpu, BOOK3S_INTERRUPT_EXTERNAL, 0);
834 } else {
835 long int dec = mfspr(SPRN_DEC);
836 if (!(lpcr & LPCR_LD))
837 dec = (int) dec;
838 if (dec < 0)
839 inject_interrupt(vcpu,
840 BOOK3S_INTERRUPT_DECREMENTER, 0);
841 }
842 }
843
844 if (vcpu->arch.doorbell_request) {
845 mtspr(SPRN_DPDES, 1);
846 vcpu->arch.vcore->dpdes = 1;
847 smp_wmb();
848 vcpu->arch.doorbell_request = 0;
849 }
850}
851
852static void flush_guest_tlb(struct kvm *kvm)
853{
854 unsigned long rb, set;
855
856 rb = PPC_BIT(52);
857 if (kvm_is_radix(kvm)) {
858
859 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
860 : : "r" (rb), "i" (1), "i" (1), "i" (2),
861 "r" (0) : "memory");
862 for (set = 1; set < kvm->arch.tlb_sets; ++set) {
863 rb += PPC_BIT(51);
864
865 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
866 : : "r" (rb), "i" (1), "i" (1), "i" (0),
867 "r" (0) : "memory");
868 }
869 asm volatile("ptesync": : :"memory");
870 asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST : : :"memory");
871 } else {
872 for (set = 0; set < kvm->arch.tlb_sets; ++set) {
873
874 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
875 : : "r" (rb), "i" (0), "i" (0), "i" (0),
876 "r" (0) : "memory");
877 rb += PPC_BIT(51);
878 }
879 asm volatile("ptesync": : :"memory");
880 asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory");
881 }
882}
883
884void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu,
885 struct kvm_nested_guest *nested)
886{
887 cpumask_t *need_tlb_flush;
888
889
890
891
892
893
894
895 if (cpu_has_feature(CPU_FTR_ARCH_300))
896 pcpu = cpu_first_thread_sibling(pcpu);
897
898 if (nested)
899 need_tlb_flush = &nested->need_tlb_flush;
900 else
901 need_tlb_flush = &kvm->arch.need_tlb_flush;
902
903 if (cpumask_test_cpu(pcpu, need_tlb_flush)) {
904 flush_guest_tlb(kvm);
905
906
907 cpumask_clear_cpu(pcpu, need_tlb_flush);
908 }
909}
910EXPORT_SYMBOL_GPL(kvmppc_check_need_tlb_flush);
911