1
2
3
4
5
6#define pr_fmt(fmt) "xive: " fmt
7
8#include <linux/types.h>
9#include <linux/threads.h>
10#include <linux/kernel.h>
11#include <linux/irq.h>
12#include <linux/debugfs.h>
13#include <linux/smp.h>
14#include <linux/interrupt.h>
15#include <linux/seq_file.h>
16#include <linux/init.h>
17#include <linux/cpu.h>
18#include <linux/of.h>
19#include <linux/slab.h>
20#include <linux/spinlock.h>
21#include <linux/msi.h>
22
23#include <asm/prom.h>
24#include <asm/io.h>
25#include <asm/smp.h>
26#include <asm/machdep.h>
27#include <asm/irq.h>
28#include <asm/errno.h>
29#include <asm/xive.h>
30#include <asm/xive-regs.h>
31#include <asm/xmon.h>
32
33#include "xive-internal.h"
34
35#undef DEBUG_FLUSH
36#undef DEBUG_ALL
37
38#ifdef DEBUG_ALL
39#define DBG_VERBOSE(fmt, ...) pr_devel("cpu %d - " fmt, \
40 smp_processor_id(), ## __VA_ARGS__)
41#else
42#define DBG_VERBOSE(fmt...) do { } while(0)
43#endif
44
45bool __xive_enabled;
46EXPORT_SYMBOL_GPL(__xive_enabled);
47bool xive_cmdline_disabled;
48
49
50static u8 xive_irq_priority;
51
52
53void __iomem *xive_tima;
54EXPORT_SYMBOL_GPL(xive_tima);
55u32 xive_tima_offset;
56
57
58static const struct xive_ops *xive_ops;
59
60
61static struct irq_domain *xive_irq_domain;
62
63#ifdef CONFIG_SMP
64
65static u32 xive_ipi_irq;
66#endif
67
68
69static DEFINE_PER_CPU(struct xive_cpu *, xive_cpu);
70
71
72
73
74
75#define XIVE_BAD_IRQ 0x7fffffff
76#define XIVE_MAX_IRQ (XIVE_BAD_IRQ - 1)
77
78
79#define XIVE_INVALID_TARGET (-1)
80
81
82
83
84
85
86
87static u32 xive_read_eq(struct xive_q *q, bool just_peek)
88{
89 u32 cur;
90
91 if (!q->qpage)
92 return 0;
93 cur = be32_to_cpup(q->qpage + q->idx);
94
95
96 if ((cur >> 31) == q->toggle)
97 return 0;
98
99
100 if (!just_peek) {
101
102 q->idx = (q->idx + 1) & q->msk;
103
104
105 if (q->idx == 0)
106 q->toggle ^= 1;
107 }
108
109 return cur & 0x7fffffff;
110}
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135static u32 xive_scan_interrupts(struct xive_cpu *xc, bool just_peek)
136{
137 u32 irq = 0;
138 u8 prio;
139
140
141 while (xc->pending_prio != 0) {
142 struct xive_q *q;
143
144 prio = ffs(xc->pending_prio) - 1;
145 DBG_VERBOSE("scan_irq: trying prio %d\n", prio);
146
147
148 irq = xive_read_eq(&xc->queue[prio], just_peek);
149
150
151 if (irq)
152 break;
153
154
155 xc->pending_prio &= ~(1 << prio);
156
157
158
159
160
161
162 q = &xc->queue[prio];
163 if (atomic_read(&q->pending_count)) {
164 int p = atomic_xchg(&q->pending_count, 0);
165 if (p) {
166 WARN_ON(p > atomic_read(&q->count));
167 atomic_sub(p, &q->count);
168 }
169 }
170 }
171
172
173 if (irq == 0)
174 prio = 0xff;
175
176
177 if (prio != xc->cppr) {
178 DBG_VERBOSE("scan_irq: adjusting CPPR to %d\n", prio);
179 xc->cppr = prio;
180 out_8(xive_tima + xive_tima_offset + TM_CPPR, prio);
181 }
182
183 return irq;
184}
185
186
187
188
189
190static notrace u8 xive_esb_read(struct xive_irq_data *xd, u32 offset)
191{
192 u64 val;
193
194
195 if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
196 offset |= offset << 4;
197
198 if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw)
199 val = xive_ops->esb_rw(xd->hw_irq, offset, 0, 0);
200 else
201 val = in_be64(xd->eoi_mmio + offset);
202
203 return (u8)val;
204}
205
206static void xive_esb_write(struct xive_irq_data *xd, u32 offset, u64 data)
207{
208
209 if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
210 offset |= offset << 4;
211
212 if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw)
213 xive_ops->esb_rw(xd->hw_irq, offset, data, 1);
214 else
215 out_be64(xd->eoi_mmio + offset, data);
216}
217
218#ifdef CONFIG_XMON
219static notrace void xive_dump_eq(const char *name, struct xive_q *q)
220{
221 u32 i0, i1, idx;
222
223 if (!q->qpage)
224 return;
225 idx = q->idx;
226 i0 = be32_to_cpup(q->qpage + idx);
227 idx = (idx + 1) & q->msk;
228 i1 = be32_to_cpup(q->qpage + idx);
229 xmon_printf(" %s Q T=%d %08x %08x ...\n", name,
230 q->toggle, i0, i1);
231}
232
233notrace void xmon_xive_do_dump(int cpu)
234{
235 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
236
237 xmon_printf("XIVE state for CPU %d:\n", cpu);
238 xmon_printf(" pp=%02x cppr=%02x\n", xc->pending_prio, xc->cppr);
239 xive_dump_eq("IRQ", &xc->queue[xive_irq_priority]);
240#ifdef CONFIG_SMP
241 {
242 u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET);
243 xmon_printf(" IPI state: %x:%c%c\n", xc->hw_ipi,
244 val & XIVE_ESB_VAL_P ? 'P' : 'p',
245 val & XIVE_ESB_VAL_Q ? 'Q' : 'q');
246 }
247#endif
248}
249#endif
250
251static unsigned int xive_get_irq(void)
252{
253 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
254 u32 irq;
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270 xive_ops->update_pending(xc);
271
272 DBG_VERBOSE("get_irq: pending=%02x\n", xc->pending_prio);
273
274
275 irq = xive_scan_interrupts(xc, false);
276
277 DBG_VERBOSE("get_irq: got irq 0x%x, new pending=0x%02x\n",
278 irq, xc->pending_prio);
279
280
281 if (irq == XIVE_BAD_IRQ)
282 return 0;
283 return irq;
284}
285
286
287
288
289
290
291
292
293
294
295
296static void xive_do_queue_eoi(struct xive_cpu *xc)
297{
298 if (xive_scan_interrupts(xc, true) != 0) {
299 DBG_VERBOSE("eoi: pending=0x%02x\n", xc->pending_prio);
300 force_external_irq_replay();
301 }
302}
303
304
305
306
307
308static void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
309{
310
311 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
312 xive_esb_write(xd, XIVE_ESB_STORE_EOI, 0);
313 else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
314
315
316
317
318
319
320
321 if (WARN_ON_ONCE(!xive_ops->eoi))
322 return;
323 xive_ops->eoi(hw_irq);
324 } else {
325 u8 eoi_val;
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340 if (xd->flags & XIVE_IRQ_FLAG_LSI)
341 xive_esb_read(xd, XIVE_ESB_LOAD_EOI);
342 else {
343 eoi_val = xive_esb_read(xd, XIVE_ESB_SET_PQ_00);
344 DBG_VERBOSE("eoi_val=%x\n", eoi_val);
345
346
347 if ((eoi_val & XIVE_ESB_VAL_Q) && xd->trig_mmio)
348 out_be64(xd->trig_mmio, 0);
349 }
350 }
351}
352
353
354static void xive_irq_eoi(struct irq_data *d)
355{
356 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
357 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
358
359 DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n",
360 d->irq, irqd_to_hwirq(d), xc->pending_prio);
361
362
363
364
365
366 if (!irqd_irq_disabled(d) && !irqd_is_forwarded_to_vcpu(d) &&
367 !(xd->flags & XIVE_IRQ_NO_EOI))
368 xive_do_source_eoi(irqd_to_hwirq(d), xd);
369
370
371
372
373
374 xd->saved_p = false;
375
376
377 xive_do_queue_eoi(xc);
378}
379
380
381
382
383
384
385static void xive_do_source_set_mask(struct xive_irq_data *xd,
386 bool mask)
387{
388 u64 val;
389
390
391
392
393
394
395
396
397
398 if (mask) {
399 val = xive_esb_read(xd, XIVE_ESB_SET_PQ_01);
400 xd->saved_p = !!(val & XIVE_ESB_VAL_P);
401 } else if (xd->saved_p)
402 xive_esb_read(xd, XIVE_ESB_SET_PQ_10);
403 else
404 xive_esb_read(xd, XIVE_ESB_SET_PQ_00);
405}
406
407
408
409
410
411
412static bool xive_try_pick_target(int cpu)
413{
414 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
415 struct xive_q *q = &xc->queue[xive_irq_priority];
416 int max;
417
418
419
420
421
422
423 max = (q->msk + 1) - 1;
424 return !!atomic_add_unless(&q->count, 1, max);
425}
426
427
428
429
430
431
432
433
434
435
436static void xive_dec_target_count(int cpu)
437{
438 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
439 struct xive_q *q = &xc->queue[xive_irq_priority];
440
441 if (WARN_ON(cpu < 0 || !xc)) {
442 pr_err("%s: cpu=%d xc=%p\n", __func__, cpu, xc);
443 return;
444 }
445
446
447
448
449
450
451
452
453 atomic_inc(&q->pending_count);
454}
455
456
457static int xive_find_target_in_mask(const struct cpumask *mask,
458 unsigned int fuzz)
459{
460 int cpu, first, num, i;
461
462
463 num = min_t(int, cpumask_weight(mask), nr_cpu_ids);
464 first = fuzz % num;
465
466
467 cpu = cpumask_first(mask);
468 for (i = 0; i < first && cpu < nr_cpu_ids; i++)
469 cpu = cpumask_next(cpu, mask);
470
471
472 if (WARN_ON(cpu >= nr_cpu_ids))
473 cpu = cpumask_first(cpu_online_mask);
474
475
476 first = cpu;
477
478
479
480
481
482 for (;;) {
483
484
485
486
487 if (cpu_online(cpu) && xive_try_pick_target(cpu))
488 return cpu;
489 cpu = cpumask_next(cpu, mask);
490 if (cpu == first)
491 break;
492
493 if (cpu >= nr_cpu_ids)
494 cpu = cpumask_first(mask);
495 }
496 return -1;
497}
498
499
500
501
502
503
504static int xive_pick_irq_target(struct irq_data *d,
505 const struct cpumask *affinity)
506{
507 static unsigned int fuzz;
508 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
509 cpumask_var_t mask;
510 int cpu = -1;
511
512
513
514
515
516 if (xd->src_chip != XIVE_INVALID_CHIP_ID &&
517 zalloc_cpumask_var(&mask, GFP_ATOMIC)) {
518
519 for_each_cpu_and(cpu, affinity, cpu_online_mask) {
520 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
521 if (xc->chip_id == xd->src_chip)
522 cpumask_set_cpu(cpu, mask);
523 }
524
525 if (cpumask_empty(mask))
526 cpu = -1;
527 else
528 cpu = xive_find_target_in_mask(mask, fuzz++);
529 free_cpumask_var(mask);
530 if (cpu >= 0)
531 return cpu;
532 fuzz--;
533 }
534
535
536 return xive_find_target_in_mask(affinity, fuzz++);
537}
538
539static unsigned int xive_irq_startup(struct irq_data *d)
540{
541 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
542 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
543 int target, rc;
544
545 pr_devel("xive_irq_startup: irq %d [0x%x] data @%p\n",
546 d->irq, hw_irq, d);
547
548#ifdef CONFIG_PCI_MSI
549
550
551
552
553
554 if (irq_data_get_msi_desc(d))
555 pci_msi_unmask_irq(d);
556#endif
557
558
559 target = xive_pick_irq_target(d, irq_data_get_affinity_mask(d));
560 if (target == XIVE_INVALID_TARGET) {
561
562 target = xive_pick_irq_target(d, cpu_online_mask);
563 if (target == XIVE_INVALID_TARGET)
564 return -ENXIO;
565 pr_warn("irq %d started with broken affinity\n", d->irq);
566 }
567
568
569 if (WARN_ON(target == XIVE_INVALID_TARGET ||
570 target >= nr_cpu_ids))
571 target = smp_processor_id();
572
573 xd->target = target;
574
575
576
577
578
579 rc = xive_ops->configure_irq(hw_irq,
580 get_hard_smp_processor_id(target),
581 xive_irq_priority, d->irq);
582 if (rc)
583 return rc;
584
585
586 xive_do_source_set_mask(xd, false);
587
588 return 0;
589}
590
591static void xive_irq_shutdown(struct irq_data *d)
592{
593 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
594 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
595
596 pr_devel("xive_irq_shutdown: irq %d [0x%x] data @%p\n",
597 d->irq, hw_irq, d);
598
599 if (WARN_ON(xd->target == XIVE_INVALID_TARGET))
600 return;
601
602
603 xive_do_source_set_mask(xd, true);
604
605
606
607
608
609
610
611
612
613 xd->saved_p = false;
614
615
616
617
618
619 xive_ops->configure_irq(hw_irq,
620 get_hard_smp_processor_id(xd->target),
621 0xff, XIVE_BAD_IRQ);
622
623 xive_dec_target_count(xd->target);
624 xd->target = XIVE_INVALID_TARGET;
625}
626
627static void xive_irq_unmask(struct irq_data *d)
628{
629 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
630
631 pr_devel("xive_irq_unmask: irq %d data @%p\n", d->irq, xd);
632
633
634
635
636
637
638
639 if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) {
640 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
641 xive_ops->configure_irq(hw_irq,
642 get_hard_smp_processor_id(xd->target),
643 xive_irq_priority, d->irq);
644 return;
645 }
646
647 xive_do_source_set_mask(xd, false);
648}
649
650static void xive_irq_mask(struct irq_data *d)
651{
652 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
653
654 pr_devel("xive_irq_mask: irq %d data @%p\n", d->irq, xd);
655
656
657
658
659
660
661
662 if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) {
663 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
664 xive_ops->configure_irq(hw_irq,
665 get_hard_smp_processor_id(xd->target),
666 0xff, d->irq);
667 return;
668 }
669
670 xive_do_source_set_mask(xd, true);
671}
672
673static int xive_irq_set_affinity(struct irq_data *d,
674 const struct cpumask *cpumask,
675 bool force)
676{
677 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
678 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
679 u32 target, old_target;
680 int rc = 0;
681
682 pr_devel("xive_irq_set_affinity: irq %d\n", d->irq);
683
684
685 if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids)
686 return -EINVAL;
687
688
689 if (!irqd_is_started(d))
690 return IRQ_SET_MASK_OK;
691
692
693
694
695
696 if (xd->target != XIVE_INVALID_TARGET &&
697 cpu_online(xd->target) &&
698 cpumask_test_cpu(xd->target, cpumask))
699 return IRQ_SET_MASK_OK;
700
701
702 target = xive_pick_irq_target(d, cpumask);
703
704
705 if (target == XIVE_INVALID_TARGET)
706 return -ENXIO;
707
708
709 if (WARN_ON(target >= nr_cpu_ids))
710 target = smp_processor_id();
711
712 old_target = xd->target;
713
714
715
716
717
718 if (!irqd_is_forwarded_to_vcpu(d))
719 rc = xive_ops->configure_irq(hw_irq,
720 get_hard_smp_processor_id(target),
721 xive_irq_priority, d->irq);
722 if (rc < 0) {
723 pr_err("Error %d reconfiguring irq %d\n", rc, d->irq);
724 return rc;
725 }
726
727 pr_devel(" target: 0x%x\n", target);
728 xd->target = target;
729
730
731 if (old_target != XIVE_INVALID_TARGET)
732 xive_dec_target_count(old_target);
733
734 return IRQ_SET_MASK_OK;
735}
736
737static int xive_irq_set_type(struct irq_data *d, unsigned int flow_type)
738{
739 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
740
741
742
743
744
745
746
747
748 if (flow_type == IRQ_TYPE_DEFAULT || flow_type == IRQ_TYPE_NONE)
749 flow_type = IRQ_TYPE_EDGE_RISING;
750
751 if (flow_type != IRQ_TYPE_EDGE_RISING &&
752 flow_type != IRQ_TYPE_LEVEL_LOW)
753 return -EINVAL;
754
755 irqd_set_trigger_type(d, flow_type);
756
757
758
759
760
761
762
763
764
765 if ((flow_type == IRQ_TYPE_LEVEL_LOW) !=
766 !!(xd->flags & XIVE_IRQ_FLAG_LSI)) {
767 pr_warn("Interrupt %d (HW 0x%x) type mismatch, Linux says %s, FW says %s\n",
768 d->irq, (u32)irqd_to_hwirq(d),
769 (flow_type == IRQ_TYPE_LEVEL_LOW) ? "Level" : "Edge",
770 (xd->flags & XIVE_IRQ_FLAG_LSI) ? "Level" : "Edge");
771 }
772
773 return IRQ_SET_MASK_OK_NOCOPY;
774}
775
776static int xive_irq_retrigger(struct irq_data *d)
777{
778 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
779
780
781 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
782 return 0;
783
784
785
786
787
788 xive_esb_read(xd, XIVE_ESB_SET_PQ_11);
789
790
791
792
793
794
795
796 xive_do_source_eoi(0, xd);
797
798 return 1;
799}
800
801static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
802{
803 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
804 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
805 int rc;
806 u8 pq;
807
808
809
810
811
812 if (xd->flags & XIVE_IRQ_FLAG_MASK_FW)
813 return -EIO;
814
815
816
817
818
819 if (state) {
820 irqd_set_forwarded_to_vcpu(d);
821
822
823 pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_10);
824
825
826 if (xd->target == XIVE_INVALID_TARGET) {
827
828
829
830
831 WARN_ON(pq & 2);
832
833 return 0;
834 }
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851 if (pq & 2) {
852 pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_11);
853 xd->saved_p = true;
854
855
856
857
858
859
860
861
862
863
864 if (xive_ops->sync_source)
865 xive_ops->sync_source(hw_irq);
866 } else
867 xd->saved_p = false;
868 } else {
869 irqd_clr_forwarded_to_vcpu(d);
870
871
872 if (xd->target == XIVE_INVALID_TARGET) {
873 xive_do_source_set_mask(xd, true);
874 return 0;
875 }
876
877
878
879
880
881
882 if (xive_ops->sync_source)
883 xive_ops->sync_source(hw_irq);
884
885
886
887
888
889
890
891
892
893 rc = xive_ops->configure_irq(hw_irq,
894 get_hard_smp_processor_id(xd->target),
895 xive_irq_priority, d->irq);
896 if (rc)
897 return rc;
898
899
900
901
902
903
904
905
906
907
908
909
910
911 if (!xd->saved_p)
912 xive_do_source_eoi(hw_irq, xd);
913
914 }
915 return 0;
916}
917
918static struct irq_chip xive_irq_chip = {
919 .name = "XIVE-IRQ",
920 .irq_startup = xive_irq_startup,
921 .irq_shutdown = xive_irq_shutdown,
922 .irq_eoi = xive_irq_eoi,
923 .irq_mask = xive_irq_mask,
924 .irq_unmask = xive_irq_unmask,
925 .irq_set_affinity = xive_irq_set_affinity,
926 .irq_set_type = xive_irq_set_type,
927 .irq_retrigger = xive_irq_retrigger,
928 .irq_set_vcpu_affinity = xive_irq_set_vcpu_affinity,
929};
930
931bool is_xive_irq(struct irq_chip *chip)
932{
933 return chip == &xive_irq_chip;
934}
935EXPORT_SYMBOL_GPL(is_xive_irq);
936
937void xive_cleanup_irq_data(struct xive_irq_data *xd)
938{
939 if (xd->eoi_mmio) {
940 iounmap(xd->eoi_mmio);
941 if (xd->eoi_mmio == xd->trig_mmio)
942 xd->trig_mmio = NULL;
943 xd->eoi_mmio = NULL;
944 }
945 if (xd->trig_mmio) {
946 iounmap(xd->trig_mmio);
947 xd->trig_mmio = NULL;
948 }
949}
950EXPORT_SYMBOL_GPL(xive_cleanup_irq_data);
951
952static int xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw)
953{
954 struct xive_irq_data *xd;
955 int rc;
956
957 xd = kzalloc(sizeof(struct xive_irq_data), GFP_KERNEL);
958 if (!xd)
959 return -ENOMEM;
960 rc = xive_ops->populate_irq_data(hw, xd);
961 if (rc) {
962 kfree(xd);
963 return rc;
964 }
965 xd->target = XIVE_INVALID_TARGET;
966 irq_set_handler_data(virq, xd);
967
968 return 0;
969}
970
971static void xive_irq_free_data(unsigned int virq)
972{
973 struct xive_irq_data *xd = irq_get_handler_data(virq);
974
975 if (!xd)
976 return;
977 irq_set_handler_data(virq, NULL);
978 xive_cleanup_irq_data(xd);
979 kfree(xd);
980}
981
982#ifdef CONFIG_SMP
983
984static void xive_cause_ipi(int cpu)
985{
986 struct xive_cpu *xc;
987 struct xive_irq_data *xd;
988
989 xc = per_cpu(xive_cpu, cpu);
990
991 DBG_VERBOSE("IPI CPU %d -> %d (HW IRQ 0x%x)\n",
992 smp_processor_id(), cpu, xc->hw_ipi);
993
994 xd = &xc->ipi_data;
995 if (WARN_ON(!xd->trig_mmio))
996 return;
997 out_be64(xd->trig_mmio, 0);
998}
999
1000static irqreturn_t xive_muxed_ipi_action(int irq, void *dev_id)
1001{
1002 return smp_ipi_demux();
1003}
1004
1005static void xive_ipi_eoi(struct irq_data *d)
1006{
1007 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1008
1009
1010 if (!xc)
1011 return;
1012
1013 DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n",
1014 d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio);
1015
1016 xive_do_source_eoi(xc->hw_ipi, &xc->ipi_data);
1017 xive_do_queue_eoi(xc);
1018}
1019
1020static void xive_ipi_do_nothing(struct irq_data *d)
1021{
1022
1023
1024
1025
1026}
1027
1028static struct irq_chip xive_ipi_chip = {
1029 .name = "XIVE-IPI",
1030 .irq_eoi = xive_ipi_eoi,
1031 .irq_mask = xive_ipi_do_nothing,
1032 .irq_unmask = xive_ipi_do_nothing,
1033};
1034
1035static void __init xive_request_ipi(void)
1036{
1037 unsigned int virq;
1038
1039
1040
1041
1042
1043
1044 if (!xive_irq_domain)
1045 return;
1046
1047
1048 virq = irq_create_mapping(xive_irq_domain, 0);
1049 xive_ipi_irq = virq;
1050
1051 WARN_ON(request_irq(virq, xive_muxed_ipi_action,
1052 IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL));
1053}
1054
1055static int xive_setup_cpu_ipi(unsigned int cpu)
1056{
1057 struct xive_cpu *xc;
1058 int rc;
1059
1060 pr_debug("Setting up IPI for CPU %d\n", cpu);
1061
1062 xc = per_cpu(xive_cpu, cpu);
1063
1064
1065 if (xc->hw_ipi != 0)
1066 return 0;
1067
1068
1069 if (xive_ops->get_ipi(cpu, xc))
1070 return -EIO;
1071
1072
1073
1074
1075
1076 rc = xive_ops->populate_irq_data(xc->hw_ipi, &xc->ipi_data);
1077 if (rc) {
1078 pr_err("Failed to populate IPI data on CPU %d\n", cpu);
1079 return -EIO;
1080 }
1081 rc = xive_ops->configure_irq(xc->hw_ipi,
1082 get_hard_smp_processor_id(cpu),
1083 xive_irq_priority, xive_ipi_irq);
1084 if (rc) {
1085 pr_err("Failed to map IPI CPU %d\n", cpu);
1086 return -EIO;
1087 }
1088 pr_devel("CPU %d HW IPI %x, virq %d, trig_mmio=%p\n", cpu,
1089 xc->hw_ipi, xive_ipi_irq, xc->ipi_data.trig_mmio);
1090
1091
1092 xive_do_source_set_mask(&xc->ipi_data, false);
1093
1094 return 0;
1095}
1096
1097static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc)
1098{
1099
1100
1101
1102 if (xc->hw_ipi == 0)
1103 return;
1104
1105
1106 xive_do_source_set_mask(&xc->ipi_data, true);
1107
1108
1109
1110
1111
1112
1113
1114
1115 xive_ops->configure_irq(xc->hw_ipi, hard_smp_processor_id(),
1116 0xff, xive_ipi_irq);
1117
1118
1119 xive_ops->put_ipi(cpu, xc);
1120}
1121
1122void __init xive_smp_probe(void)
1123{
1124 smp_ops->cause_ipi = xive_cause_ipi;
1125
1126
1127 xive_request_ipi();
1128
1129
1130 xive_setup_cpu_ipi(smp_processor_id());
1131}
1132
1133#endif
1134
1135static int xive_irq_domain_map(struct irq_domain *h, unsigned int virq,
1136 irq_hw_number_t hw)
1137{
1138 int rc;
1139
1140
1141
1142
1143
1144 irq_clear_status_flags(virq, IRQ_LEVEL);
1145
1146#ifdef CONFIG_SMP
1147
1148 if (hw == 0) {
1149
1150
1151
1152
1153 irq_set_chip_and_handler(virq, &xive_ipi_chip,
1154 handle_percpu_irq);
1155 return 0;
1156 }
1157#endif
1158
1159 rc = xive_irq_alloc_data(virq, hw);
1160 if (rc)
1161 return rc;
1162
1163 irq_set_chip_and_handler(virq, &xive_irq_chip, handle_fasteoi_irq);
1164
1165 return 0;
1166}
1167
1168static void xive_irq_domain_unmap(struct irq_domain *d, unsigned int virq)
1169{
1170 struct irq_data *data = irq_get_irq_data(virq);
1171 unsigned int hw_irq;
1172
1173
1174 if (!data)
1175 return;
1176 hw_irq = (unsigned int)irqd_to_hwirq(data);
1177 if (hw_irq)
1178 xive_irq_free_data(virq);
1179}
1180
1181static int xive_irq_domain_xlate(struct irq_domain *h, struct device_node *ct,
1182 const u32 *intspec, unsigned int intsize,
1183 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
1184
1185{
1186 *out_hwirq = intspec[0];
1187
1188
1189
1190
1191
1192 if (intsize > 1) {
1193 if (intspec[1] & 1)
1194 *out_flags = IRQ_TYPE_LEVEL_LOW;
1195 else
1196 *out_flags = IRQ_TYPE_EDGE_RISING;
1197 } else
1198 *out_flags = IRQ_TYPE_LEVEL_LOW;
1199
1200 return 0;
1201}
1202
1203static int xive_irq_domain_match(struct irq_domain *h, struct device_node *node,
1204 enum irq_domain_bus_token bus_token)
1205{
1206 return xive_ops->match(node);
1207}
1208
1209static const struct irq_domain_ops xive_irq_domain_ops = {
1210 .match = xive_irq_domain_match,
1211 .map = xive_irq_domain_map,
1212 .unmap = xive_irq_domain_unmap,
1213 .xlate = xive_irq_domain_xlate,
1214};
1215
1216static void __init xive_init_host(void)
1217{
1218 xive_irq_domain = irq_domain_add_nomap(NULL, XIVE_MAX_IRQ,
1219 &xive_irq_domain_ops, NULL);
1220 if (WARN_ON(xive_irq_domain == NULL))
1221 return;
1222 irq_set_default_host(xive_irq_domain);
1223}
1224
1225static void xive_cleanup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
1226{
1227 if (xc->queue[xive_irq_priority].qpage)
1228 xive_ops->cleanup_queue(cpu, xc, xive_irq_priority);
1229}
1230
1231static int xive_setup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
1232{
1233 int rc = 0;
1234
1235
1236 if (!xc->queue[xive_irq_priority].qpage)
1237 rc = xive_ops->setup_queue(cpu, xc, xive_irq_priority);
1238
1239 return rc;
1240}
1241
1242static int xive_prepare_cpu(unsigned int cpu)
1243{
1244 struct xive_cpu *xc;
1245
1246 xc = per_cpu(xive_cpu, cpu);
1247 if (!xc) {
1248 struct device_node *np;
1249
1250 xc = kzalloc_node(sizeof(struct xive_cpu),
1251 GFP_KERNEL, cpu_to_node(cpu));
1252 if (!xc)
1253 return -ENOMEM;
1254 np = of_get_cpu_node(cpu, NULL);
1255 if (np)
1256 xc->chip_id = of_get_ibm_chip_id(np);
1257 of_node_put(np);
1258
1259 per_cpu(xive_cpu, cpu) = xc;
1260 }
1261
1262
1263 return xive_setup_cpu_queues(cpu, xc);
1264}
1265
1266static void xive_setup_cpu(void)
1267{
1268 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1269
1270
1271 if (xive_ops->setup_cpu)
1272 xive_ops->setup_cpu(smp_processor_id(), xc);
1273
1274
1275 xc->cppr = 0xff;
1276 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff);
1277}
1278
1279#ifdef CONFIG_SMP
1280void xive_smp_setup_cpu(void)
1281{
1282 pr_devel("SMP setup CPU %d\n", smp_processor_id());
1283
1284
1285 if (smp_processor_id() != boot_cpuid)
1286 xive_setup_cpu();
1287
1288}
1289
1290int xive_smp_prepare_cpu(unsigned int cpu)
1291{
1292 int rc;
1293
1294
1295 rc = xive_prepare_cpu(cpu);
1296 if (rc)
1297 return rc;
1298
1299
1300 return xive_setup_cpu_ipi(cpu);
1301}
1302
1303#ifdef CONFIG_HOTPLUG_CPU
1304static void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc)
1305{
1306 u32 irq;
1307
1308
1309 WARN_ON(!irqs_disabled());
1310
1311
1312 while ((irq = xive_scan_interrupts(xc, false)) != 0) {
1313
1314
1315
1316
1317 struct irq_desc *desc = irq_to_desc(irq);
1318 struct irq_data *d = irq_desc_get_irq_data(desc);
1319 struct xive_irq_data *xd;
1320 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
1321
1322
1323
1324
1325
1326 if (d->domain != xive_irq_domain || hw_irq == 0)
1327 continue;
1328
1329
1330
1331
1332
1333
1334#ifdef DEBUG_FLUSH
1335 pr_info("CPU %d: Got irq %d while offline, re-sending...\n",
1336 cpu, irq);
1337#endif
1338 raw_spin_lock(&desc->lock);
1339 xd = irq_desc_get_handler_data(desc);
1340
1341
1342
1343
1344
1345 if (xd->flags & XIVE_IRQ_FLAG_LSI)
1346 xive_do_source_eoi(irqd_to_hwirq(d), xd);
1347 else
1348 xive_irq_retrigger(d);
1349
1350 raw_spin_unlock(&desc->lock);
1351 }
1352}
1353
1354void xive_smp_disable_cpu(void)
1355{
1356 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1357 unsigned int cpu = smp_processor_id();
1358
1359
1360 irq_migrate_all_off_this_cpu();
1361
1362
1363 xc->cppr = 0;
1364 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
1365
1366
1367 xive_flush_cpu_queue(cpu, xc);
1368
1369
1370 xc->cppr = 0xff;
1371 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff);
1372}
1373
1374void xive_flush_interrupt(void)
1375{
1376 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1377 unsigned int cpu = smp_processor_id();
1378
1379
1380 xive_flush_cpu_queue(cpu, xc);
1381}
1382
1383#endif
1384
1385#endif
1386
1387void xive_teardown_cpu(void)
1388{
1389 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1390 unsigned int cpu = smp_processor_id();
1391
1392
1393 xc->cppr = 0;
1394 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
1395
1396 if (xive_ops->teardown_cpu)
1397 xive_ops->teardown_cpu(cpu, xc);
1398
1399#ifdef CONFIG_SMP
1400
1401 xive_cleanup_cpu_ipi(cpu, xc);
1402#endif
1403
1404
1405 xive_cleanup_cpu_queues(cpu, xc);
1406}
1407
1408void xive_shutdown(void)
1409{
1410 xive_ops->shutdown();
1411}
1412
1413bool __init xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset,
1414 u8 max_prio)
1415{
1416 xive_tima = area;
1417 xive_tima_offset = offset;
1418 xive_ops = ops;
1419 xive_irq_priority = max_prio;
1420
1421 ppc_md.get_irq = xive_get_irq;
1422 __xive_enabled = true;
1423
1424 pr_devel("Initializing host..\n");
1425 xive_init_host();
1426
1427 pr_devel("Initializing boot CPU..\n");
1428
1429
1430 xive_prepare_cpu(smp_processor_id());
1431
1432
1433 xive_setup_cpu();
1434
1435 pr_info("Interrupt handling initialized with %s backend\n",
1436 xive_ops->name);
1437 pr_info("Using priority %d for all interrupts\n", max_prio);
1438
1439 return true;
1440}
1441
1442__be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift)
1443{
1444 unsigned int alloc_order;
1445 struct page *pages;
1446 __be32 *qpage;
1447
1448 alloc_order = xive_alloc_order(queue_shift);
1449 pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order);
1450 if (!pages)
1451 return ERR_PTR(-ENOMEM);
1452 qpage = (__be32 *)page_address(pages);
1453 memset(qpage, 0, 1 << queue_shift);
1454
1455 return qpage;
1456}
1457
1458static int __init xive_off(char *arg)
1459{
1460 xive_cmdline_disabled = true;
1461 return 0;
1462}
1463__setup("xive=off", xive_off);
1464