1
2
3
4
5
6
7
8
9
10#include <linux/kernel.h>
11#include <linux/kvm_host.h>
12#include <linux/err.h>
13#include <linux/kernel_stat.h>
14
15#include <asm/kvm_book3s.h>
16#include <asm/kvm_ppc.h>
17#include <asm/hvcall.h>
18#include <asm/xics.h>
19#include <asm/debug.h>
20#include <asm/synch.h>
21#include <asm/cputhreads.h>
22#include <asm/pgtable.h>
23#include <asm/ppc-opcode.h>
24#include <asm/pnv-pci.h>
25#include <asm/opal.h>
26
27#include "book3s_xics.h"
28
29#define DEBUG_PASSUP
30
31int h_ipi_redirect = 1;
32EXPORT_SYMBOL(h_ipi_redirect);
33
34static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
35 u32 new_irq, bool check_resend);
36static int xics_opal_rm_set_server(unsigned int hw_irq, int server_cpu);
37
38
39static void ics_rm_check_resend(struct kvmppc_xics *xics,
40 struct kvmppc_ics *ics, struct kvmppc_icp *icp)
41{
42 int i;
43
44 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
45 struct ics_irq_state *state = &ics->irq_state[i];
46 if (state->resend)
47 icp_rm_deliver_irq(xics, icp, state->number, true);
48 }
49
50}
51
52
53
54#ifdef CONFIG_SMP
55static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu)
56{
57 int hcpu;
58
59 hcpu = hcore << threads_shift;
60 kvmppc_host_rm_ops_hv->rm_core[hcore].rm_data = vcpu;
61 smp_muxed_ipi_set_message(hcpu, PPC_MSG_RM_HOST_ACTION);
62 icp_native_cause_ipi_rm(hcpu);
63}
64#else
65static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu) { }
66#endif
67
68
69
70
71
72
73
74
75
76
77
78
79
80static inline int grab_next_hostcore(int start,
81 struct kvmppc_host_rm_core *rm_core, int max, int action)
82{
83 bool success;
84 int core;
85 union kvmppc_rm_state old, new;
86
87 for (core = start + 1; core < max; core++) {
88 old = new = READ_ONCE(rm_core[core].rm_state);
89
90 if (!old.in_host || old.rm_action)
91 continue;
92
93
94 new.rm_action = action;
95
96 success = cmpxchg64(&rm_core[core].rm_state.raw,
97 old.raw, new.raw) == old.raw;
98 if (success) {
99
100
101
102
103
104
105 smp_wmb();
106 return core;
107 }
108 }
109
110 return -1;
111}
112
113static inline int find_available_hostcore(int action)
114{
115 int core;
116 int my_core = smp_processor_id() >> threads_shift;
117 struct kvmppc_host_rm_core *rm_core = kvmppc_host_rm_ops_hv->rm_core;
118
119 core = grab_next_hostcore(my_core, rm_core, cpu_nr_cores(), action);
120 if (core == -1)
121 core = grab_next_hostcore(core, rm_core, my_core, action);
122
123 return core;
124}
125
126static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
127 struct kvm_vcpu *this_vcpu)
128{
129 struct kvmppc_icp *this_icp = this_vcpu->arch.icp;
130 int cpu;
131 int hcore;
132
133
134 vcpu->stat.queue_intr++;
135 set_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
136
137
138 if (vcpu == this_vcpu) {
139 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_MER);
140 return;
141 }
142
143
144
145
146
147
148 cpu = vcpu->arch.thread_cpu;
149 if (cpu < 0 || cpu >= nr_cpu_ids) {
150 hcore = -1;
151 if (kvmppc_host_rm_ops_hv && h_ipi_redirect)
152 hcore = find_available_hostcore(XICS_RM_KICK_VCPU);
153 if (hcore != -1) {
154 icp_send_hcore_msg(hcore, vcpu);
155 } else {
156 this_icp->rm_action |= XICS_RM_KICK_VCPU;
157 this_icp->rm_kick_target = vcpu;
158 }
159 return;
160 }
161
162 smp_mb();
163 kvmhv_rm_send_ipi(cpu);
164}
165
166static void icp_rm_clr_vcpu_irq(struct kvm_vcpu *vcpu)
167{
168
169 clear_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL,
170 &vcpu->arch.pending_exceptions);
171 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_MER);
172}
173
174static inline bool icp_rm_try_update(struct kvmppc_icp *icp,
175 union kvmppc_icp_state old,
176 union kvmppc_icp_state new)
177{
178 struct kvm_vcpu *this_vcpu = local_paca->kvm_hstate.kvm_vcpu;
179 bool success;
180
181
182 new.out_ee = (new.xisr && (new.pending_pri < new.cppr));
183
184
185 success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw;
186 if (!success)
187 goto bail;
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204 if (new.out_ee)
205 icp_rm_set_vcpu_irq(icp->vcpu, this_vcpu);
206
207
208 this_vcpu->arch.icp->rm_dbgstate = new;
209 this_vcpu->arch.icp->rm_dbgtgt = icp->vcpu;
210
211 bail:
212 return success;
213}
214
215static inline int check_too_hard(struct kvmppc_xics *xics,
216 struct kvmppc_icp *icp)
217{
218 return (xics->real_mode_dbg || icp->rm_action) ? H_TOO_HARD : H_SUCCESS;
219}
220
221static void icp_rm_check_resend(struct kvmppc_xics *xics,
222 struct kvmppc_icp *icp)
223{
224 u32 icsid;
225
226
227 smp_rmb();
228 for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) {
229 struct kvmppc_ics *ics = xics->ics[icsid];
230
231 if (!test_and_clear_bit(icsid, icp->resend_map))
232 continue;
233 if (!ics)
234 continue;
235 ics_rm_check_resend(xics, ics, icp);
236 }
237}
238
239static bool icp_rm_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
240 u32 *reject)
241{
242 union kvmppc_icp_state old_state, new_state;
243 bool success;
244
245 do {
246 old_state = new_state = READ_ONCE(icp->state);
247
248 *reject = 0;
249
250
251 success = new_state.cppr > priority &&
252 new_state.mfrr > priority &&
253 new_state.pending_pri > priority;
254
255
256
257
258
259 if (success) {
260 *reject = new_state.xisr;
261 new_state.xisr = irq;
262 new_state.pending_pri = priority;
263 } else {
264
265
266
267
268
269 new_state.need_resend = true;
270 }
271
272 } while (!icp_rm_try_update(icp, old_state, new_state));
273
274 return success;
275}
276
277static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
278 u32 new_irq, bool check_resend)
279{
280 struct ics_irq_state *state;
281 struct kvmppc_ics *ics;
282 u32 reject;
283 u16 src;
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300 again:
301
302 ics = kvmppc_xics_find_ics(xics, new_irq, &src);
303 if (!ics) {
304
305 xics->err_noics++;
306 return;
307 }
308 state = &ics->irq_state[src];
309
310
311 arch_spin_lock(&ics->lock);
312
313
314 if (!icp || state->server != icp->server_num) {
315 icp = kvmppc_xics_find_server(xics->kvm, state->server);
316 if (!icp) {
317
318 xics->err_noicp++;
319 goto out;
320 }
321 }
322
323 if (check_resend)
324 if (!state->resend)
325 goto out;
326
327
328 state->resend = 0;
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345 if (state->priority == MASKED) {
346 state->masked_pending = 1;
347 goto out;
348 }
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366 if (icp_rm_try_to_deliver(icp, new_irq, state->priority, &reject)) {
367
368
369
370 if (reject && reject != XICS_IPI) {
371 arch_spin_unlock(&ics->lock);
372 icp->n_reject++;
373 new_irq = reject;
374 check_resend = 0;
375 goto again;
376 }
377 } else {
378
379
380
381
382 state->resend = 1;
383
384
385
386
387
388 smp_wmb();
389 set_bit(ics->icsid, icp->resend_map);
390
391
392
393
394
395
396
397 smp_mb();
398 if (!icp->state.need_resend) {
399 state->resend = 0;
400 arch_spin_unlock(&ics->lock);
401 check_resend = 0;
402 goto again;
403 }
404 }
405 out:
406 arch_spin_unlock(&ics->lock);
407}
408
409static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
410 u8 new_cppr)
411{
412 union kvmppc_icp_state old_state, new_state;
413 bool resend;
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444 do {
445 old_state = new_state = ACCESS_ONCE(icp->state);
446
447
448 new_state.cppr = new_cppr;
449
450
451
452
453
454
455
456
457
458
459 if (new_state.mfrr < new_cppr &&
460 new_state.mfrr <= new_state.pending_pri) {
461 new_state.pending_pri = new_state.mfrr;
462 new_state.xisr = XICS_IPI;
463 }
464
465
466 resend = new_state.need_resend;
467 new_state.need_resend = 0;
468
469 } while (!icp_rm_try_update(icp, old_state, new_state));
470
471
472
473
474
475
476 if (resend) {
477 icp->n_check_resend++;
478 icp_rm_check_resend(xics, icp);
479 }
480}
481
482
483unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
484{
485 union kvmppc_icp_state old_state, new_state;
486 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
487 struct kvmppc_icp *icp = vcpu->arch.icp;
488 u32 xirr;
489
490 if (!xics || !xics->real_mode)
491 return H_TOO_HARD;
492
493
494 icp_rm_clr_vcpu_irq(icp->vcpu);
495
496
497
498
499
500
501
502
503 do {
504 old_state = new_state = ACCESS_ONCE(icp->state);
505
506 xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
507 if (!old_state.xisr)
508 break;
509 new_state.cppr = new_state.pending_pri;
510 new_state.pending_pri = 0xff;
511 new_state.xisr = 0;
512
513 } while (!icp_rm_try_update(icp, old_state, new_state));
514
515
516 vcpu->arch.gpr[4] = xirr;
517
518 return check_too_hard(xics, icp);
519}
520
521int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
522 unsigned long mfrr)
523{
524 union kvmppc_icp_state old_state, new_state;
525 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
526 struct kvmppc_icp *icp, *this_icp = vcpu->arch.icp;
527 u32 reject;
528 bool resend;
529 bool local;
530
531 if (!xics || !xics->real_mode)
532 return H_TOO_HARD;
533
534 local = this_icp->server_num == server;
535 if (local)
536 icp = this_icp;
537 else
538 icp = kvmppc_xics_find_server(vcpu->kvm, server);
539 if (!icp)
540 return H_PARAMETER;
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569 do {
570 old_state = new_state = ACCESS_ONCE(icp->state);
571
572
573 new_state.mfrr = mfrr;
574
575
576 reject = 0;
577 resend = false;
578 if (mfrr < new_state.cppr) {
579
580 if (mfrr <= new_state.pending_pri) {
581 reject = new_state.xisr;
582 new_state.pending_pri = mfrr;
583 new_state.xisr = XICS_IPI;
584 }
585 }
586
587 if (mfrr > old_state.mfrr) {
588 resend = new_state.need_resend;
589 new_state.need_resend = 0;
590 }
591 } while (!icp_rm_try_update(icp, old_state, new_state));
592
593
594 if (reject && reject != XICS_IPI) {
595 this_icp->n_reject++;
596 icp_rm_deliver_irq(xics, icp, reject, false);
597 }
598
599
600 if (resend) {
601 this_icp->n_check_resend++;
602 icp_rm_check_resend(xics, icp);
603 }
604
605 return check_too_hard(xics, this_icp);
606}
607
608int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
609{
610 union kvmppc_icp_state old_state, new_state;
611 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
612 struct kvmppc_icp *icp = vcpu->arch.icp;
613 u32 reject;
614
615 if (!xics || !xics->real_mode)
616 return H_TOO_HARD;
617
618
619
620
621
622
623
624
625 if (cppr > icp->state.cppr) {
626 icp_rm_down_cppr(xics, icp, cppr);
627 goto bail;
628 } else if (cppr == icp->state.cppr)
629 return H_SUCCESS;
630
631
632
633
634
635
636
637
638
639
640
641
642 icp_rm_clr_vcpu_irq(icp->vcpu);
643
644 do {
645 old_state = new_state = ACCESS_ONCE(icp->state);
646
647 reject = 0;
648 new_state.cppr = cppr;
649
650 if (cppr <= new_state.pending_pri) {
651 reject = new_state.xisr;
652 new_state.xisr = 0;
653 new_state.pending_pri = 0xff;
654 }
655
656 } while (!icp_rm_try_update(icp, old_state, new_state));
657
658
659
660
661
662 if (reject && reject != XICS_IPI) {
663 icp->n_reject++;
664 icp_rm_deliver_irq(xics, icp, reject, false);
665 }
666 bail:
667 return check_too_hard(xics, icp);
668}
669
670static int ics_rm_eoi(struct kvm_vcpu *vcpu, u32 irq)
671{
672 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
673 struct kvmppc_icp *icp = vcpu->arch.icp;
674 struct kvmppc_ics *ics;
675 struct ics_irq_state *state;
676 u16 src;
677 u32 pq_old, pq_new;
678
679
680
681
682
683
684
685
686
687 ics = kvmppc_xics_find_ics(xics, irq, &src);
688 if (!ics)
689 goto bail;
690
691 state = &ics->irq_state[src];
692
693 if (state->lsi)
694 pq_new = state->pq_state;
695 else
696 do {
697 pq_old = state->pq_state;
698 pq_new = pq_old >> 1;
699 } while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old);
700
701 if (pq_new & PQ_PRESENTED)
702 icp_rm_deliver_irq(xics, NULL, irq, false);
703
704 if (!hlist_empty(&vcpu->kvm->irq_ack_notifier_list)) {
705 icp->rm_action |= XICS_RM_NOTIFY_EOI;
706 icp->rm_eoied_irq = irq;
707 }
708
709 if (state->host_irq) {
710 ++vcpu->stat.pthru_all;
711 if (state->intr_cpu != -1) {
712 int pcpu = raw_smp_processor_id();
713
714 pcpu = cpu_first_thread_sibling(pcpu);
715 ++vcpu->stat.pthru_host;
716 if (state->intr_cpu != pcpu) {
717 ++vcpu->stat.pthru_bad_aff;
718 xics_opal_rm_set_server(state->host_irq, pcpu);
719 }
720 state->intr_cpu = -1;
721 }
722 }
723
724 bail:
725 return check_too_hard(xics, icp);
726}
727
728int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
729{
730 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
731 struct kvmppc_icp *icp = vcpu->arch.icp;
732 u32 irq = xirr & 0x00ffffff;
733
734 if (!xics || !xics->real_mode)
735 return H_TOO_HARD;
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751 icp_rm_down_cppr(xics, icp, xirr >> 24);
752
753
754 if (irq == XICS_IPI)
755 return check_too_hard(xics, icp);
756
757 return ics_rm_eoi(vcpu, irq);
758}
759
760unsigned long eoi_rc;
761
762static void icp_eoi(struct irq_chip *c, u32 hwirq, u32 xirr)
763{
764 unsigned long xics_phys;
765 int64_t rc;
766
767 rc = pnv_opal_pci_msi_eoi(c, hwirq);
768
769 if (rc)
770 eoi_rc = rc;
771
772 iosync();
773
774
775 xics_phys = local_paca->kvm_hstate.xics_phys;
776 _stwcix(xics_phys + XICS_XIRR, xirr);
777}
778
779static int xics_opal_rm_set_server(unsigned int hw_irq, int server_cpu)
780{
781 unsigned int mangle_cpu = get_hard_smp_processor_id(server_cpu) << 2;
782
783 return opal_rm_set_xive(hw_irq, mangle_cpu, DEFAULT_PRIORITY);
784}
785
786
787
788
789
790
791
792
793static inline void this_cpu_inc_rm(unsigned int __percpu *addr)
794{
795 unsigned long l;
796 unsigned int *raddr;
797 int cpu = smp_processor_id();
798
799 raddr = per_cpu_ptr(addr, cpu);
800 l = (unsigned long)raddr;
801
802 if (REGION_ID(l) == VMALLOC_REGION_ID) {
803 l = vmalloc_to_phys(raddr);
804 raddr = (unsigned int *)l;
805 }
806 ++*raddr;
807}
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827static void kvmppc_rm_handle_irq_desc(struct irq_desc *desc)
828{
829 this_cpu_inc_rm(desc->kstat_irqs);
830 __this_cpu_inc(kstat.irqs_sum);
831}
832
833long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu,
834 u32 xirr,
835 struct kvmppc_irq_map *irq_map,
836 struct kvmppc_passthru_irqmap *pimap)
837{
838 struct kvmppc_xics *xics;
839 struct kvmppc_icp *icp;
840 struct kvmppc_ics *ics;
841 struct ics_irq_state *state;
842 u32 irq;
843 u16 src;
844 u32 pq_old, pq_new;
845
846 irq = irq_map->v_hwirq;
847 xics = vcpu->kvm->arch.xics;
848 icp = vcpu->arch.icp;
849
850 kvmppc_rm_handle_irq_desc(irq_map->desc);
851
852 ics = kvmppc_xics_find_ics(xics, irq, &src);
853 if (!ics)
854 return 2;
855
856 state = &ics->irq_state[src];
857
858
859 do {
860 pq_old = state->pq_state;
861 pq_new = ((pq_old << 1) & 3) | PQ_PRESENTED;
862 } while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old);
863
864
865 if (pq_new == PQ_PRESENTED)
866 icp_rm_deliver_irq(xics, icp, irq, false);
867
868
869 icp_eoi(irq_desc_get_chip(irq_map->desc), irq_map->r_hwirq, xirr);
870
871 if (check_too_hard(xics, icp) == H_TOO_HARD)
872 return 1;
873 else
874 return -2;
875}
876
877
878
879
880
881
882static void rm_host_ipi_action(int action, void *data)
883{
884 switch (action) {
885 case XICS_RM_KICK_VCPU:
886 kvmppc_host_rm_ops_hv->vcpu_kick(data);
887 break;
888 default:
889 WARN(1, "Unexpected rm_action=%d data=%p\n", action, data);
890 break;
891 }
892
893}
894
895void kvmppc_xics_ipi_action(void)
896{
897 int core;
898 unsigned int cpu = smp_processor_id();
899 struct kvmppc_host_rm_core *rm_corep;
900
901 core = cpu >> threads_shift;
902 rm_corep = &kvmppc_host_rm_ops_hv->rm_core[core];
903
904 if (rm_corep->rm_data) {
905 rm_host_ipi_action(rm_corep->rm_state.rm_action,
906 rm_corep->rm_data);
907
908 rm_corep->rm_data = NULL;
909 smp_wmb();
910 rm_corep->rm_state.rm_action = 0;
911 }
912}
913