1
2
3
4
5
6
7
8
9
10#include <linux/kernel.h>
11#include <linux/kvm_host.h>
12#include <linux/err.h>
13#include <linux/kernel_stat.h>
14
15#include <asm/kvm_book3s.h>
16#include <asm/kvm_ppc.h>
17#include <asm/hvcall.h>
18#include <asm/xics.h>
19#include <asm/synch.h>
20#include <asm/cputhreads.h>
21#include <asm/pgtable.h>
22#include <asm/ppc-opcode.h>
23#include <asm/pnv-pci.h>
24#include <asm/opal.h>
25#include <asm/smp.h>
26
27#include "book3s_xics.h"
28
29#define DEBUG_PASSUP
30
31int h_ipi_redirect = 1;
32EXPORT_SYMBOL(h_ipi_redirect);
33int kvm_irq_bypass = 1;
34EXPORT_SYMBOL(kvm_irq_bypass);
35
36static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
37 u32 new_irq, bool check_resend);
38static int xics_opal_set_server(unsigned int hw_irq, int server_cpu);
39
40
41static void ics_rm_check_resend(struct kvmppc_xics *xics,
42 struct kvmppc_ics *ics, struct kvmppc_icp *icp)
43{
44 int i;
45
46 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
47 struct ics_irq_state *state = &ics->irq_state[i];
48 if (state->resend)
49 icp_rm_deliver_irq(xics, icp, state->number, true);
50 }
51
52}
53
54
55
56#ifdef CONFIG_SMP
57static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu)
58{
59 int hcpu;
60
61 hcpu = hcore << threads_shift;
62 kvmppc_host_rm_ops_hv->rm_core[hcore].rm_data = vcpu;
63 smp_muxed_ipi_set_message(hcpu, PPC_MSG_RM_HOST_ACTION);
64 kvmppc_set_host_ipi(hcpu, 1);
65 smp_mb();
66 kvmhv_rm_send_ipi(hcpu);
67}
68#else
69static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu) { }
70#endif
71
72
73
74
75
76
77
78
79
80
81
82
83
84static inline int grab_next_hostcore(int start,
85 struct kvmppc_host_rm_core *rm_core, int max, int action)
86{
87 bool success;
88 int core;
89 union kvmppc_rm_state old, new;
90
91 for (core = start + 1; core < max; core++) {
92 old = new = READ_ONCE(rm_core[core].rm_state);
93
94 if (!old.in_host || old.rm_action)
95 continue;
96
97
98 new.rm_action = action;
99
100 success = cmpxchg64(&rm_core[core].rm_state.raw,
101 old.raw, new.raw) == old.raw;
102 if (success) {
103
104
105
106
107
108
109 smp_wmb();
110 return core;
111 }
112 }
113
114 return -1;
115}
116
117static inline int find_available_hostcore(int action)
118{
119 int core;
120 int my_core = smp_processor_id() >> threads_shift;
121 struct kvmppc_host_rm_core *rm_core = kvmppc_host_rm_ops_hv->rm_core;
122
123 core = grab_next_hostcore(my_core, rm_core, cpu_nr_cores(), action);
124 if (core == -1)
125 core = grab_next_hostcore(core, rm_core, my_core, action);
126
127 return core;
128}
129
130static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
131 struct kvm_vcpu *this_vcpu)
132{
133 struct kvmppc_icp *this_icp = this_vcpu->arch.icp;
134 int cpu;
135 int hcore;
136
137
138 vcpu->stat.queue_intr++;
139 set_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
140
141
142 if (vcpu == this_vcpu) {
143 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_MER);
144 return;
145 }
146
147
148
149
150
151
152 cpu = vcpu->arch.thread_cpu;
153 if (cpu < 0 || cpu >= nr_cpu_ids) {
154 hcore = -1;
155 if (kvmppc_host_rm_ops_hv && h_ipi_redirect)
156 hcore = find_available_hostcore(XICS_RM_KICK_VCPU);
157 if (hcore != -1) {
158 icp_send_hcore_msg(hcore, vcpu);
159 } else {
160 this_icp->rm_action |= XICS_RM_KICK_VCPU;
161 this_icp->rm_kick_target = vcpu;
162 }
163 return;
164 }
165
166 smp_mb();
167 kvmhv_rm_send_ipi(cpu);
168}
169
170static void icp_rm_clr_vcpu_irq(struct kvm_vcpu *vcpu)
171{
172
173 clear_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL,
174 &vcpu->arch.pending_exceptions);
175 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_MER);
176}
177
178static inline bool icp_rm_try_update(struct kvmppc_icp *icp,
179 union kvmppc_icp_state old,
180 union kvmppc_icp_state new)
181{
182 struct kvm_vcpu *this_vcpu = local_paca->kvm_hstate.kvm_vcpu;
183 bool success;
184
185
186 new.out_ee = (new.xisr && (new.pending_pri < new.cppr));
187
188
189 success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw;
190 if (!success)
191 goto bail;
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208 if (new.out_ee)
209 icp_rm_set_vcpu_irq(icp->vcpu, this_vcpu);
210
211
212 this_vcpu->arch.icp->rm_dbgstate = new;
213 this_vcpu->arch.icp->rm_dbgtgt = icp->vcpu;
214
215 bail:
216 return success;
217}
218
219static inline int check_too_hard(struct kvmppc_xics *xics,
220 struct kvmppc_icp *icp)
221{
222 return (xics->real_mode_dbg || icp->rm_action) ? H_TOO_HARD : H_SUCCESS;
223}
224
225static void icp_rm_check_resend(struct kvmppc_xics *xics,
226 struct kvmppc_icp *icp)
227{
228 u32 icsid;
229
230
231 smp_rmb();
232 for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) {
233 struct kvmppc_ics *ics = xics->ics[icsid];
234
235 if (!test_and_clear_bit(icsid, icp->resend_map))
236 continue;
237 if (!ics)
238 continue;
239 ics_rm_check_resend(xics, ics, icp);
240 }
241}
242
243static bool icp_rm_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
244 u32 *reject)
245{
246 union kvmppc_icp_state old_state, new_state;
247 bool success;
248
249 do {
250 old_state = new_state = READ_ONCE(icp->state);
251
252 *reject = 0;
253
254
255 success = new_state.cppr > priority &&
256 new_state.mfrr > priority &&
257 new_state.pending_pri > priority;
258
259
260
261
262
263 if (success) {
264 *reject = new_state.xisr;
265 new_state.xisr = irq;
266 new_state.pending_pri = priority;
267 } else {
268
269
270
271
272
273 new_state.need_resend = true;
274 }
275
276 } while (!icp_rm_try_update(icp, old_state, new_state));
277
278 return success;
279}
280
281static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
282 u32 new_irq, bool check_resend)
283{
284 struct ics_irq_state *state;
285 struct kvmppc_ics *ics;
286 u32 reject;
287 u16 src;
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304 again:
305
306 ics = kvmppc_xics_find_ics(xics, new_irq, &src);
307 if (!ics) {
308
309 xics->err_noics++;
310 return;
311 }
312 state = &ics->irq_state[src];
313
314
315 arch_spin_lock(&ics->lock);
316
317
318 if (!icp || state->server != icp->server_num) {
319 icp = kvmppc_xics_find_server(xics->kvm, state->server);
320 if (!icp) {
321
322 xics->err_noicp++;
323 goto out;
324 }
325 }
326
327 if (check_resend)
328 if (!state->resend)
329 goto out;
330
331
332 state->resend = 0;
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349 if (state->priority == MASKED) {
350 state->masked_pending = 1;
351 goto out;
352 }
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370 if (icp_rm_try_to_deliver(icp, new_irq, state->priority, &reject)) {
371
372
373
374 if (reject && reject != XICS_IPI) {
375 arch_spin_unlock(&ics->lock);
376 icp->n_reject++;
377 new_irq = reject;
378 check_resend = 0;
379 goto again;
380 }
381 } else {
382
383
384
385
386 state->resend = 1;
387
388
389
390
391
392 smp_wmb();
393 set_bit(ics->icsid, icp->resend_map);
394
395
396
397
398
399
400
401 smp_mb();
402 if (!icp->state.need_resend) {
403 state->resend = 0;
404 arch_spin_unlock(&ics->lock);
405 check_resend = 0;
406 goto again;
407 }
408 }
409 out:
410 arch_spin_unlock(&ics->lock);
411}
412
413static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
414 u8 new_cppr)
415{
416 union kvmppc_icp_state old_state, new_state;
417 bool resend;
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448 do {
449 old_state = new_state = READ_ONCE(icp->state);
450
451
452 new_state.cppr = new_cppr;
453
454
455
456
457
458
459
460
461
462
463 if (new_state.mfrr < new_cppr &&
464 new_state.mfrr <= new_state.pending_pri) {
465 new_state.pending_pri = new_state.mfrr;
466 new_state.xisr = XICS_IPI;
467 }
468
469
470 resend = new_state.need_resend;
471 new_state.need_resend = 0;
472
473 } while (!icp_rm_try_update(icp, old_state, new_state));
474
475
476
477
478
479
480 if (resend) {
481 icp->n_check_resend++;
482 icp_rm_check_resend(xics, icp);
483 }
484}
485
486
487unsigned long xics_rm_h_xirr(struct kvm_vcpu *vcpu)
488{
489 union kvmppc_icp_state old_state, new_state;
490 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
491 struct kvmppc_icp *icp = vcpu->arch.icp;
492 u32 xirr;
493
494 if (!xics || !xics->real_mode)
495 return H_TOO_HARD;
496
497
498 icp_rm_clr_vcpu_irq(icp->vcpu);
499
500
501
502
503
504
505
506
507 do {
508 old_state = new_state = READ_ONCE(icp->state);
509
510 xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
511 if (!old_state.xisr)
512 break;
513 new_state.cppr = new_state.pending_pri;
514 new_state.pending_pri = 0xff;
515 new_state.xisr = 0;
516
517 } while (!icp_rm_try_update(icp, old_state, new_state));
518
519
520 vcpu->arch.gpr[4] = xirr;
521
522 return check_too_hard(xics, icp);
523}
524
525int xics_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
526 unsigned long mfrr)
527{
528 union kvmppc_icp_state old_state, new_state;
529 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
530 struct kvmppc_icp *icp, *this_icp = vcpu->arch.icp;
531 u32 reject;
532 bool resend;
533 bool local;
534
535 if (!xics || !xics->real_mode)
536 return H_TOO_HARD;
537
538 local = this_icp->server_num == server;
539 if (local)
540 icp = this_icp;
541 else
542 icp = kvmppc_xics_find_server(vcpu->kvm, server);
543 if (!icp)
544 return H_PARAMETER;
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573 do {
574 old_state = new_state = READ_ONCE(icp->state);
575
576
577 new_state.mfrr = mfrr;
578
579
580 reject = 0;
581 resend = false;
582 if (mfrr < new_state.cppr) {
583
584 if (mfrr <= new_state.pending_pri) {
585 reject = new_state.xisr;
586 new_state.pending_pri = mfrr;
587 new_state.xisr = XICS_IPI;
588 }
589 }
590
591 if (mfrr > old_state.mfrr) {
592 resend = new_state.need_resend;
593 new_state.need_resend = 0;
594 }
595 } while (!icp_rm_try_update(icp, old_state, new_state));
596
597
598 if (reject && reject != XICS_IPI) {
599 this_icp->n_reject++;
600 icp_rm_deliver_irq(xics, icp, reject, false);
601 }
602
603
604 if (resend) {
605 this_icp->n_check_resend++;
606 icp_rm_check_resend(xics, icp);
607 }
608
609 return check_too_hard(xics, this_icp);
610}
611
612int xics_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
613{
614 union kvmppc_icp_state old_state, new_state;
615 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
616 struct kvmppc_icp *icp = vcpu->arch.icp;
617 u32 reject;
618
619 if (!xics || !xics->real_mode)
620 return H_TOO_HARD;
621
622
623
624
625
626
627
628
629 if (cppr > icp->state.cppr) {
630 icp_rm_down_cppr(xics, icp, cppr);
631 goto bail;
632 } else if (cppr == icp->state.cppr)
633 return H_SUCCESS;
634
635
636
637
638
639
640
641
642
643
644
645
646 icp_rm_clr_vcpu_irq(icp->vcpu);
647
648 do {
649 old_state = new_state = READ_ONCE(icp->state);
650
651 reject = 0;
652 new_state.cppr = cppr;
653
654 if (cppr <= new_state.pending_pri) {
655 reject = new_state.xisr;
656 new_state.xisr = 0;
657 new_state.pending_pri = 0xff;
658 }
659
660 } while (!icp_rm_try_update(icp, old_state, new_state));
661
662
663
664
665
666 if (reject && reject != XICS_IPI) {
667 icp->n_reject++;
668 icp_rm_deliver_irq(xics, icp, reject, false);
669 }
670 bail:
671 return check_too_hard(xics, icp);
672}
673
674static int ics_rm_eoi(struct kvm_vcpu *vcpu, u32 irq)
675{
676 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
677 struct kvmppc_icp *icp = vcpu->arch.icp;
678 struct kvmppc_ics *ics;
679 struct ics_irq_state *state;
680 u16 src;
681 u32 pq_old, pq_new;
682
683
684
685
686
687
688
689
690
691 ics = kvmppc_xics_find_ics(xics, irq, &src);
692 if (!ics)
693 goto bail;
694
695 state = &ics->irq_state[src];
696
697 if (state->lsi)
698 pq_new = state->pq_state;
699 else
700 do {
701 pq_old = state->pq_state;
702 pq_new = pq_old >> 1;
703 } while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old);
704
705 if (pq_new & PQ_PRESENTED)
706 icp_rm_deliver_irq(xics, NULL, irq, false);
707
708 if (!hlist_empty(&vcpu->kvm->irq_ack_notifier_list)) {
709 icp->rm_action |= XICS_RM_NOTIFY_EOI;
710 icp->rm_eoied_irq = irq;
711 }
712
713 if (state->host_irq) {
714 ++vcpu->stat.pthru_all;
715 if (state->intr_cpu != -1) {
716 int pcpu = raw_smp_processor_id();
717
718 pcpu = cpu_first_thread_sibling(pcpu);
719 ++vcpu->stat.pthru_host;
720 if (state->intr_cpu != pcpu) {
721 ++vcpu->stat.pthru_bad_aff;
722 xics_opal_set_server(state->host_irq, pcpu);
723 }
724 state->intr_cpu = -1;
725 }
726 }
727
728 bail:
729 return check_too_hard(xics, icp);
730}
731
732int xics_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
733{
734 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
735 struct kvmppc_icp *icp = vcpu->arch.icp;
736 u32 irq = xirr & 0x00ffffff;
737
738 if (!xics || !xics->real_mode)
739 return H_TOO_HARD;
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755 icp_rm_down_cppr(xics, icp, xirr >> 24);
756
757
758 if (irq == XICS_IPI)
759 return check_too_hard(xics, icp);
760
761 return ics_rm_eoi(vcpu, irq);
762}
763
764unsigned long eoi_rc;
765
766static void icp_eoi(struct irq_chip *c, u32 hwirq, __be32 xirr, bool *again)
767{
768 void __iomem *xics_phys;
769 int64_t rc;
770
771 rc = pnv_opal_pci_msi_eoi(c, hwirq);
772
773 if (rc)
774 eoi_rc = rc;
775
776 iosync();
777
778
779 xics_phys = local_paca->kvm_hstate.xics_phys;
780 if (xics_phys) {
781 __raw_rm_writel(xirr, xics_phys + XICS_XIRR);
782 } else {
783 rc = opal_int_eoi(be32_to_cpu(xirr));
784 *again = rc > 0;
785 }
786}
787
788static int xics_opal_set_server(unsigned int hw_irq, int server_cpu)
789{
790 unsigned int mangle_cpu = get_hard_smp_processor_id(server_cpu) << 2;
791
792 return opal_set_xive(hw_irq, mangle_cpu, DEFAULT_PRIORITY);
793}
794
795
796
797
798
799
800
801
802static inline void this_cpu_inc_rm(unsigned int __percpu *addr)
803{
804 unsigned long l;
805 unsigned int *raddr;
806 int cpu = smp_processor_id();
807
808 raddr = per_cpu_ptr(addr, cpu);
809 l = (unsigned long)raddr;
810
811 if (REGION_ID(l) == VMALLOC_REGION_ID) {
812 l = vmalloc_to_phys(raddr);
813 raddr = (unsigned int *)l;
814 }
815 ++*raddr;
816}
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836static void kvmppc_rm_handle_irq_desc(struct irq_desc *desc)
837{
838 this_cpu_inc_rm(desc->kstat_irqs);
839 __this_cpu_inc(kstat.irqs_sum);
840}
841
842long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu,
843 __be32 xirr,
844 struct kvmppc_irq_map *irq_map,
845 struct kvmppc_passthru_irqmap *pimap,
846 bool *again)
847{
848 struct kvmppc_xics *xics;
849 struct kvmppc_icp *icp;
850 struct kvmppc_ics *ics;
851 struct ics_irq_state *state;
852 u32 irq;
853 u16 src;
854 u32 pq_old, pq_new;
855
856 irq = irq_map->v_hwirq;
857 xics = vcpu->kvm->arch.xics;
858 icp = vcpu->arch.icp;
859
860 kvmppc_rm_handle_irq_desc(irq_map->desc);
861
862 ics = kvmppc_xics_find_ics(xics, irq, &src);
863 if (!ics)
864 return 2;
865
866 state = &ics->irq_state[src];
867
868
869 do {
870 pq_old = state->pq_state;
871 pq_new = ((pq_old << 1) & 3) | PQ_PRESENTED;
872 } while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old);
873
874
875 if (pq_new == PQ_PRESENTED)
876 icp_rm_deliver_irq(xics, icp, irq, false);
877
878
879 icp_eoi(irq_desc_get_chip(irq_map->desc), irq_map->r_hwirq, xirr,
880 again);
881
882 if (check_too_hard(xics, icp) == H_TOO_HARD)
883 return 2;
884 else
885 return -2;
886}
887
888
889
890
891
892
893static void rm_host_ipi_action(int action, void *data)
894{
895 switch (action) {
896 case XICS_RM_KICK_VCPU:
897 kvmppc_host_rm_ops_hv->vcpu_kick(data);
898 break;
899 default:
900 WARN(1, "Unexpected rm_action=%d data=%p\n", action, data);
901 break;
902 }
903
904}
905
906void kvmppc_xics_ipi_action(void)
907{
908 int core;
909 unsigned int cpu = smp_processor_id();
910 struct kvmppc_host_rm_core *rm_corep;
911
912 core = cpu >> threads_shift;
913 rm_corep = &kvmppc_host_rm_ops_hv->rm_core[core];
914
915 if (rm_corep->rm_data) {
916 rm_host_ipi_action(rm_corep->rm_state.rm_action,
917 rm_corep->rm_data);
918
919 rm_corep->rm_data = NULL;
920 smp_wmb();
921 rm_corep->rm_state.rm_action = 0;
922 }
923}
924