1
2
3
4
5
6
7
8
9
10#define pr_fmt(fmt) "xive: " fmt
11
12#include <linux/types.h>
13#include <linux/threads.h>
14#include <linux/kernel.h>
15#include <linux/irq.h>
16#include <linux/debugfs.h>
17#include <linux/smp.h>
18#include <linux/interrupt.h>
19#include <linux/seq_file.h>
20#include <linux/init.h>
21#include <linux/cpu.h>
22#include <linux/of.h>
23#include <linux/slab.h>
24#include <linux/spinlock.h>
25#include <linux/msi.h>
26
27#include <asm/prom.h>
28#include <asm/io.h>
29#include <asm/smp.h>
30#include <asm/machdep.h>
31#include <asm/irq.h>
32#include <asm/errno.h>
33#include <asm/xive.h>
34#include <asm/xive-regs.h>
35#include <asm/xmon.h>
36
37#include "xive-internal.h"
38
39#undef DEBUG_FLUSH
40#undef DEBUG_ALL
41
42#ifdef DEBUG_ALL
43#define DBG_VERBOSE(fmt, ...) pr_devel("cpu %d - " fmt, \
44 smp_processor_id(), ## __VA_ARGS__)
45#else
46#define DBG_VERBOSE(fmt...) do { } while(0)
47#endif
48
49bool __xive_enabled;
50EXPORT_SYMBOL_GPL(__xive_enabled);
51bool xive_cmdline_disabled;
52
53
54static u8 xive_irq_priority;
55
56
57void __iomem *xive_tima;
58EXPORT_SYMBOL_GPL(xive_tima);
59u32 xive_tima_offset;
60
61
62static const struct xive_ops *xive_ops;
63
64
65static struct irq_domain *xive_irq_domain;
66
67#ifdef CONFIG_SMP
68
69static u32 xive_ipi_irq;
70#endif
71
72
73static DEFINE_PER_CPU(struct xive_cpu *, xive_cpu);
74
75
76
77
78
79#define XIVE_BAD_IRQ 0x7fffffff
80#define XIVE_MAX_IRQ (XIVE_BAD_IRQ - 1)
81
82
83#define XIVE_INVALID_TARGET (-1)
84
85
86
87
88
89
90
91static u32 xive_read_eq(struct xive_q *q, bool just_peek)
92{
93 u32 cur;
94
95 if (!q->qpage)
96 return 0;
97 cur = be32_to_cpup(q->qpage + q->idx);
98
99
100 if ((cur >> 31) == q->toggle)
101 return 0;
102
103
104 if (!just_peek) {
105
106 q->idx = (q->idx + 1) & q->msk;
107
108
109 if (q->idx == 0)
110 q->toggle ^= 1;
111 }
112
113 return cur & 0x7fffffff;
114}
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139static u32 xive_scan_interrupts(struct xive_cpu *xc, bool just_peek)
140{
141 u32 irq = 0;
142 u8 prio;
143
144
145 while (xc->pending_prio != 0) {
146 struct xive_q *q;
147
148 prio = ffs(xc->pending_prio) - 1;
149 DBG_VERBOSE("scan_irq: trying prio %d\n", prio);
150
151
152 irq = xive_read_eq(&xc->queue[prio], just_peek);
153
154
155 if (irq)
156 break;
157
158
159 xc->pending_prio &= ~(1 << prio);
160
161
162
163
164
165
166 q = &xc->queue[prio];
167 if (atomic_read(&q->pending_count)) {
168 int p = atomic_xchg(&q->pending_count, 0);
169 if (p) {
170 WARN_ON(p > atomic_read(&q->count));
171 atomic_sub(p, &q->count);
172 }
173 }
174 }
175
176
177 if (irq == 0)
178 prio = 0xff;
179
180
181 if (prio != xc->cppr) {
182 DBG_VERBOSE("scan_irq: adjusting CPPR to %d\n", prio);
183 xc->cppr = prio;
184 out_8(xive_tima + xive_tima_offset + TM_CPPR, prio);
185 }
186
187 return irq;
188}
189
190
191
192
193
194static notrace u8 xive_esb_read(struct xive_irq_data *xd, u32 offset)
195{
196 u64 val;
197
198
199 if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
200 offset |= offset << 4;
201
202 if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw)
203 val = xive_ops->esb_rw(xd->hw_irq, offset, 0, 0);
204 else
205 val = in_be64(xd->eoi_mmio + offset);
206
207 return (u8)val;
208}
209
210static void xive_esb_write(struct xive_irq_data *xd, u32 offset, u64 data)
211{
212
213 if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
214 offset |= offset << 4;
215
216 if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw)
217 xive_ops->esb_rw(xd->hw_irq, offset, data, 1);
218 else
219 out_be64(xd->eoi_mmio + offset, data);
220}
221
222#ifdef CONFIG_XMON
223static notrace void xive_dump_eq(const char *name, struct xive_q *q)
224{
225 u32 i0, i1, idx;
226
227 if (!q->qpage)
228 return;
229 idx = q->idx;
230 i0 = be32_to_cpup(q->qpage + idx);
231 idx = (idx + 1) & q->msk;
232 i1 = be32_to_cpup(q->qpage + idx);
233 xmon_printf(" %s Q T=%d %08x %08x ...\n", name,
234 q->toggle, i0, i1);
235}
236
237notrace void xmon_xive_do_dump(int cpu)
238{
239 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
240
241 xmon_printf("XIVE state for CPU %d:\n", cpu);
242 xmon_printf(" pp=%02x cppr=%02x\n", xc->pending_prio, xc->cppr);
243 xive_dump_eq("IRQ", &xc->queue[xive_irq_priority]);
244#ifdef CONFIG_SMP
245 {
246 u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET);
247 xmon_printf(" IPI state: %x:%c%c\n", xc->hw_ipi,
248 val & XIVE_ESB_VAL_P ? 'P' : 'p',
249 val & XIVE_ESB_VAL_Q ? 'Q' : 'q');
250 }
251#endif
252}
253#endif
254
255static unsigned int xive_get_irq(void)
256{
257 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
258 u32 irq;
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274 xive_ops->update_pending(xc);
275
276 DBG_VERBOSE("get_irq: pending=%02x\n", xc->pending_prio);
277
278
279 irq = xive_scan_interrupts(xc, false);
280
281 DBG_VERBOSE("get_irq: got irq 0x%x, new pending=0x%02x\n",
282 irq, xc->pending_prio);
283
284
285 if (irq == XIVE_BAD_IRQ)
286 return 0;
287 return irq;
288}
289
290
291
292
293
294
295
296
297
298
299
300static void xive_do_queue_eoi(struct xive_cpu *xc)
301{
302 if (xive_scan_interrupts(xc, true) != 0) {
303 DBG_VERBOSE("eoi: pending=0x%02x\n", xc->pending_prio);
304 force_external_irq_replay();
305 }
306}
307
308
309
310
311
312static void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
313{
314
315 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
316 xive_esb_write(xd, XIVE_ESB_STORE_EOI, 0);
317 else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
318
319
320
321
322
323
324
325 if (WARN_ON_ONCE(!xive_ops->eoi))
326 return;
327 xive_ops->eoi(hw_irq);
328 } else {
329 u8 eoi_val;
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344 if (xd->flags & XIVE_IRQ_FLAG_LSI)
345 xive_esb_read(xd, XIVE_ESB_LOAD_EOI);
346 else {
347 eoi_val = xive_esb_read(xd, XIVE_ESB_SET_PQ_00);
348 DBG_VERBOSE("eoi_val=%x\n", eoi_val);
349
350
351 if ((eoi_val & XIVE_ESB_VAL_Q) && xd->trig_mmio)
352 out_be64(xd->trig_mmio, 0);
353 }
354 }
355}
356
357
358static void xive_irq_eoi(struct irq_data *d)
359{
360 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
361 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
362
363 DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n",
364 d->irq, irqd_to_hwirq(d), xc->pending_prio);
365
366
367
368
369
370 if (!irqd_irq_disabled(d) && !irqd_is_forwarded_to_vcpu(d) &&
371 !(xd->flags & XIVE_IRQ_NO_EOI))
372 xive_do_source_eoi(irqd_to_hwirq(d), xd);
373
374
375
376
377
378 xd->saved_p = false;
379
380
381 xive_do_queue_eoi(xc);
382}
383
384
385
386
387
388
389static void xive_do_source_set_mask(struct xive_irq_data *xd,
390 bool mask)
391{
392 u64 val;
393
394
395
396
397
398
399
400
401
402 if (mask) {
403 val = xive_esb_read(xd, XIVE_ESB_SET_PQ_01);
404 xd->saved_p = !!(val & XIVE_ESB_VAL_P);
405 } else if (xd->saved_p)
406 xive_esb_read(xd, XIVE_ESB_SET_PQ_10);
407 else
408 xive_esb_read(xd, XIVE_ESB_SET_PQ_00);
409}
410
411
412
413
414
415
416static bool xive_try_pick_target(int cpu)
417{
418 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
419 struct xive_q *q = &xc->queue[xive_irq_priority];
420 int max;
421
422
423
424
425
426
427 max = (q->msk + 1) - 1;
428 return !!atomic_add_unless(&q->count, 1, max);
429}
430
431
432
433
434
435
436
437
438
439
440static void xive_dec_target_count(int cpu)
441{
442 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
443 struct xive_q *q = &xc->queue[xive_irq_priority];
444
445 if (WARN_ON(cpu < 0 || !xc)) {
446 pr_err("%s: cpu=%d xc=%p\n", __func__, cpu, xc);
447 return;
448 }
449
450
451
452
453
454
455
456
457 atomic_inc(&q->pending_count);
458}
459
460
461static int xive_find_target_in_mask(const struct cpumask *mask,
462 unsigned int fuzz)
463{
464 int cpu, first, num, i;
465
466
467 num = min_t(int, cpumask_weight(mask), nr_cpu_ids);
468 first = fuzz % num;
469
470
471 cpu = cpumask_first(mask);
472 for (i = 0; i < first && cpu < nr_cpu_ids; i++)
473 cpu = cpumask_next(cpu, mask);
474
475
476 if (WARN_ON(cpu >= nr_cpu_ids))
477 cpu = cpumask_first(cpu_online_mask);
478
479
480 first = cpu;
481
482
483
484
485
486 for (;;) {
487
488
489
490
491 if (cpu_online(cpu) && xive_try_pick_target(cpu))
492 return cpu;
493 cpu = cpumask_next(cpu, mask);
494 if (cpu == first)
495 break;
496
497 if (cpu >= nr_cpu_ids)
498 cpu = cpumask_first(mask);
499 }
500 return -1;
501}
502
503
504
505
506
507
508static int xive_pick_irq_target(struct irq_data *d,
509 const struct cpumask *affinity)
510{
511 static unsigned int fuzz;
512 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
513 cpumask_var_t mask;
514 int cpu = -1;
515
516
517
518
519
520 if (xd->src_chip != XIVE_INVALID_CHIP_ID &&
521 zalloc_cpumask_var(&mask, GFP_ATOMIC)) {
522
523 for_each_cpu_and(cpu, affinity, cpu_online_mask) {
524 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
525 if (xc->chip_id == xd->src_chip)
526 cpumask_set_cpu(cpu, mask);
527 }
528
529 if (cpumask_empty(mask))
530 cpu = -1;
531 else
532 cpu = xive_find_target_in_mask(mask, fuzz++);
533 free_cpumask_var(mask);
534 if (cpu >= 0)
535 return cpu;
536 fuzz--;
537 }
538
539
540 return xive_find_target_in_mask(affinity, fuzz++);
541}
542
543static unsigned int xive_irq_startup(struct irq_data *d)
544{
545 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
546 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
547 int target, rc;
548
549 pr_devel("xive_irq_startup: irq %d [0x%x] data @%p\n",
550 d->irq, hw_irq, d);
551
552#ifdef CONFIG_PCI_MSI
553
554
555
556
557
558 if (irq_data_get_msi_desc(d))
559 pci_msi_unmask_irq(d);
560#endif
561
562
563 target = xive_pick_irq_target(d, irq_data_get_affinity_mask(d));
564 if (target == XIVE_INVALID_TARGET) {
565
566 target = xive_pick_irq_target(d, cpu_online_mask);
567 if (target == XIVE_INVALID_TARGET)
568 return -ENXIO;
569 pr_warn("irq %d started with broken affinity\n", d->irq);
570 }
571
572
573 if (WARN_ON(target == XIVE_INVALID_TARGET ||
574 target >= nr_cpu_ids))
575 target = smp_processor_id();
576
577 xd->target = target;
578
579
580
581
582
583 rc = xive_ops->configure_irq(hw_irq,
584 get_hard_smp_processor_id(target),
585 xive_irq_priority, d->irq);
586 if (rc)
587 return rc;
588
589
590 xive_do_source_set_mask(xd, false);
591
592 return 0;
593}
594
595static void xive_irq_shutdown(struct irq_data *d)
596{
597 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
598 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
599
600 pr_devel("xive_irq_shutdown: irq %d [0x%x] data @%p\n",
601 d->irq, hw_irq, d);
602
603 if (WARN_ON(xd->target == XIVE_INVALID_TARGET))
604 return;
605
606
607 xive_do_source_set_mask(xd, true);
608
609
610
611
612
613
614
615
616
617 xd->saved_p = false;
618
619
620
621
622
623 xive_ops->configure_irq(hw_irq,
624 get_hard_smp_processor_id(xd->target),
625 0xff, XIVE_BAD_IRQ);
626
627 xive_dec_target_count(xd->target);
628 xd->target = XIVE_INVALID_TARGET;
629}
630
631static void xive_irq_unmask(struct irq_data *d)
632{
633 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
634
635 pr_devel("xive_irq_unmask: irq %d data @%p\n", d->irq, xd);
636
637
638
639
640
641
642
643 if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) {
644 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
645 xive_ops->configure_irq(hw_irq,
646 get_hard_smp_processor_id(xd->target),
647 xive_irq_priority, d->irq);
648 return;
649 }
650
651 xive_do_source_set_mask(xd, false);
652}
653
654static void xive_irq_mask(struct irq_data *d)
655{
656 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
657
658 pr_devel("xive_irq_mask: irq %d data @%p\n", d->irq, xd);
659
660
661
662
663
664
665
666 if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) {
667 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
668 xive_ops->configure_irq(hw_irq,
669 get_hard_smp_processor_id(xd->target),
670 0xff, d->irq);
671 return;
672 }
673
674 xive_do_source_set_mask(xd, true);
675}
676
677static int xive_irq_set_affinity(struct irq_data *d,
678 const struct cpumask *cpumask,
679 bool force)
680{
681 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
682 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
683 u32 target, old_target;
684 int rc = 0;
685
686 pr_devel("xive_irq_set_affinity: irq %d\n", d->irq);
687
688
689 if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids)
690 return -EINVAL;
691
692
693 if (!irqd_is_started(d))
694 return IRQ_SET_MASK_OK;
695
696
697
698
699
700 if (xd->target != XIVE_INVALID_TARGET &&
701 cpu_online(xd->target) &&
702 cpumask_test_cpu(xd->target, cpumask))
703 return IRQ_SET_MASK_OK;
704
705
706 target = xive_pick_irq_target(d, cpumask);
707
708
709 if (target == XIVE_INVALID_TARGET)
710 return -ENXIO;
711
712
713 if (WARN_ON(target >= nr_cpu_ids))
714 target = smp_processor_id();
715
716 old_target = xd->target;
717
718
719
720
721
722 if (!irqd_is_forwarded_to_vcpu(d))
723 rc = xive_ops->configure_irq(hw_irq,
724 get_hard_smp_processor_id(target),
725 xive_irq_priority, d->irq);
726 if (rc < 0) {
727 pr_err("Error %d reconfiguring irq %d\n", rc, d->irq);
728 return rc;
729 }
730
731 pr_devel(" target: 0x%x\n", target);
732 xd->target = target;
733
734
735 if (old_target != XIVE_INVALID_TARGET)
736 xive_dec_target_count(old_target);
737
738 return IRQ_SET_MASK_OK;
739}
740
741static int xive_irq_set_type(struct irq_data *d, unsigned int flow_type)
742{
743 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
744
745
746
747
748
749
750
751
752 if (flow_type == IRQ_TYPE_DEFAULT || flow_type == IRQ_TYPE_NONE)
753 flow_type = IRQ_TYPE_EDGE_RISING;
754
755 if (flow_type != IRQ_TYPE_EDGE_RISING &&
756 flow_type != IRQ_TYPE_LEVEL_LOW)
757 return -EINVAL;
758
759 irqd_set_trigger_type(d, flow_type);
760
761
762
763
764
765
766
767
768
769 if ((flow_type == IRQ_TYPE_LEVEL_LOW) !=
770 !!(xd->flags & XIVE_IRQ_FLAG_LSI)) {
771 pr_warn("Interrupt %d (HW 0x%x) type mismatch, Linux says %s, FW says %s\n",
772 d->irq, (u32)irqd_to_hwirq(d),
773 (flow_type == IRQ_TYPE_LEVEL_LOW) ? "Level" : "Edge",
774 (xd->flags & XIVE_IRQ_FLAG_LSI) ? "Level" : "Edge");
775 }
776
777 return IRQ_SET_MASK_OK_NOCOPY;
778}
779
780static int xive_irq_retrigger(struct irq_data *d)
781{
782 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
783
784
785 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
786 return 0;
787
788
789
790
791
792 xive_esb_read(xd, XIVE_ESB_SET_PQ_11);
793
794
795
796
797
798
799
800 xive_do_source_eoi(0, xd);
801
802 return 1;
803}
804
805static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
806{
807 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
808 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
809 int rc;
810 u8 pq;
811
812
813
814
815
816 if (xd->flags & XIVE_IRQ_FLAG_MASK_FW)
817 return -EIO;
818
819
820
821
822
823 if (state) {
824 irqd_set_forwarded_to_vcpu(d);
825
826
827 pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_10);
828
829
830 if (xd->target == XIVE_INVALID_TARGET) {
831
832
833
834
835 WARN_ON(pq & 2);
836
837 return 0;
838 }
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855 if (pq & 2) {
856 pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_11);
857 xd->saved_p = true;
858
859
860
861
862
863
864
865
866
867
868 if (xive_ops->sync_source)
869 xive_ops->sync_source(hw_irq);
870 } else
871 xd->saved_p = false;
872 } else {
873 irqd_clr_forwarded_to_vcpu(d);
874
875
876 if (xd->target == XIVE_INVALID_TARGET) {
877 xive_do_source_set_mask(xd, true);
878 return 0;
879 }
880
881
882
883
884
885
886 if (xive_ops->sync_source)
887 xive_ops->sync_source(hw_irq);
888
889
890
891
892
893
894
895
896
897 rc = xive_ops->configure_irq(hw_irq,
898 get_hard_smp_processor_id(xd->target),
899 xive_irq_priority, d->irq);
900 if (rc)
901 return rc;
902
903
904
905
906
907
908
909
910
911
912
913
914
915 if (!xd->saved_p)
916 xive_do_source_eoi(hw_irq, xd);
917
918 }
919 return 0;
920}
921
922static struct irq_chip xive_irq_chip = {
923 .name = "XIVE-IRQ",
924 .irq_startup = xive_irq_startup,
925 .irq_shutdown = xive_irq_shutdown,
926 .irq_eoi = xive_irq_eoi,
927 .irq_mask = xive_irq_mask,
928 .irq_unmask = xive_irq_unmask,
929 .irq_set_affinity = xive_irq_set_affinity,
930 .irq_set_type = xive_irq_set_type,
931 .irq_retrigger = xive_irq_retrigger,
932 .irq_set_vcpu_affinity = xive_irq_set_vcpu_affinity,
933};
934
935bool is_xive_irq(struct irq_chip *chip)
936{
937 return chip == &xive_irq_chip;
938}
939EXPORT_SYMBOL_GPL(is_xive_irq);
940
941void xive_cleanup_irq_data(struct xive_irq_data *xd)
942{
943 if (xd->eoi_mmio) {
944 iounmap(xd->eoi_mmio);
945 if (xd->eoi_mmio == xd->trig_mmio)
946 xd->trig_mmio = NULL;
947 xd->eoi_mmio = NULL;
948 }
949 if (xd->trig_mmio) {
950 iounmap(xd->trig_mmio);
951 xd->trig_mmio = NULL;
952 }
953}
954EXPORT_SYMBOL_GPL(xive_cleanup_irq_data);
955
956static int xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw)
957{
958 struct xive_irq_data *xd;
959 int rc;
960
961 xd = kzalloc(sizeof(struct xive_irq_data), GFP_KERNEL);
962 if (!xd)
963 return -ENOMEM;
964 rc = xive_ops->populate_irq_data(hw, xd);
965 if (rc) {
966 kfree(xd);
967 return rc;
968 }
969 xd->target = XIVE_INVALID_TARGET;
970 irq_set_handler_data(virq, xd);
971
972 return 0;
973}
974
975static void xive_irq_free_data(unsigned int virq)
976{
977 struct xive_irq_data *xd = irq_get_handler_data(virq);
978
979 if (!xd)
980 return;
981 irq_set_handler_data(virq, NULL);
982 xive_cleanup_irq_data(xd);
983 kfree(xd);
984}
985
986#ifdef CONFIG_SMP
987
988static void xive_cause_ipi(int cpu)
989{
990 struct xive_cpu *xc;
991 struct xive_irq_data *xd;
992
993 xc = per_cpu(xive_cpu, cpu);
994
995 DBG_VERBOSE("IPI CPU %d -> %d (HW IRQ 0x%x)\n",
996 smp_processor_id(), cpu, xc->hw_ipi);
997
998 xd = &xc->ipi_data;
999 if (WARN_ON(!xd->trig_mmio))
1000 return;
1001 out_be64(xd->trig_mmio, 0);
1002}
1003
1004static irqreturn_t xive_muxed_ipi_action(int irq, void *dev_id)
1005{
1006 return smp_ipi_demux();
1007}
1008
1009static void xive_ipi_eoi(struct irq_data *d)
1010{
1011 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1012
1013
1014 if (!xc)
1015 return;
1016
1017 DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n",
1018 d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio);
1019
1020 xive_do_source_eoi(xc->hw_ipi, &xc->ipi_data);
1021 xive_do_queue_eoi(xc);
1022}
1023
1024static void xive_ipi_do_nothing(struct irq_data *d)
1025{
1026
1027
1028
1029
1030}
1031
1032static struct irq_chip xive_ipi_chip = {
1033 .name = "XIVE-IPI",
1034 .irq_eoi = xive_ipi_eoi,
1035 .irq_mask = xive_ipi_do_nothing,
1036 .irq_unmask = xive_ipi_do_nothing,
1037};
1038
1039static void __init xive_request_ipi(void)
1040{
1041 unsigned int virq;
1042
1043
1044
1045
1046
1047
1048 if (!xive_irq_domain)
1049 return;
1050
1051
1052 virq = irq_create_mapping(xive_irq_domain, 0);
1053 xive_ipi_irq = virq;
1054
1055 WARN_ON(request_irq(virq, xive_muxed_ipi_action,
1056 IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL));
1057}
1058
1059static int xive_setup_cpu_ipi(unsigned int cpu)
1060{
1061 struct xive_cpu *xc;
1062 int rc;
1063
1064 pr_debug("Setting up IPI for CPU %d\n", cpu);
1065
1066 xc = per_cpu(xive_cpu, cpu);
1067
1068
1069 if (xc->hw_ipi != 0)
1070 return 0;
1071
1072
1073 if (xive_ops->get_ipi(cpu, xc))
1074 return -EIO;
1075
1076
1077
1078
1079
1080 rc = xive_ops->populate_irq_data(xc->hw_ipi, &xc->ipi_data);
1081 if (rc) {
1082 pr_err("Failed to populate IPI data on CPU %d\n", cpu);
1083 return -EIO;
1084 }
1085 rc = xive_ops->configure_irq(xc->hw_ipi,
1086 get_hard_smp_processor_id(cpu),
1087 xive_irq_priority, xive_ipi_irq);
1088 if (rc) {
1089 pr_err("Failed to map IPI CPU %d\n", cpu);
1090 return -EIO;
1091 }
1092 pr_devel("CPU %d HW IPI %x, virq %d, trig_mmio=%p\n", cpu,
1093 xc->hw_ipi, xive_ipi_irq, xc->ipi_data.trig_mmio);
1094
1095
1096 xive_do_source_set_mask(&xc->ipi_data, false);
1097
1098 return 0;
1099}
1100
1101static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc)
1102{
1103
1104
1105
1106 if (xc->hw_ipi == 0)
1107 return;
1108
1109
1110 xive_do_source_set_mask(&xc->ipi_data, true);
1111
1112
1113
1114
1115
1116
1117
1118
1119 xive_ops->configure_irq(xc->hw_ipi, hard_smp_processor_id(),
1120 0xff, xive_ipi_irq);
1121
1122
1123 xive_ops->put_ipi(cpu, xc);
1124}
1125
1126void __init xive_smp_probe(void)
1127{
1128 smp_ops->cause_ipi = xive_cause_ipi;
1129
1130
1131 xive_request_ipi();
1132
1133
1134 xive_setup_cpu_ipi(smp_processor_id());
1135}
1136
1137#endif
1138
1139static int xive_irq_domain_map(struct irq_domain *h, unsigned int virq,
1140 irq_hw_number_t hw)
1141{
1142 int rc;
1143
1144
1145
1146
1147
1148 irq_clear_status_flags(virq, IRQ_LEVEL);
1149
1150#ifdef CONFIG_SMP
1151
1152 if (hw == 0) {
1153
1154
1155
1156
1157 irq_set_chip_and_handler(virq, &xive_ipi_chip,
1158 handle_percpu_irq);
1159 return 0;
1160 }
1161#endif
1162
1163 rc = xive_irq_alloc_data(virq, hw);
1164 if (rc)
1165 return rc;
1166
1167 irq_set_chip_and_handler(virq, &xive_irq_chip, handle_fasteoi_irq);
1168
1169 return 0;
1170}
1171
1172static void xive_irq_domain_unmap(struct irq_domain *d, unsigned int virq)
1173{
1174 struct irq_data *data = irq_get_irq_data(virq);
1175 unsigned int hw_irq;
1176
1177
1178 if (!data)
1179 return;
1180 hw_irq = (unsigned int)irqd_to_hwirq(data);
1181 if (hw_irq)
1182 xive_irq_free_data(virq);
1183}
1184
1185static int xive_irq_domain_xlate(struct irq_domain *h, struct device_node *ct,
1186 const u32 *intspec, unsigned int intsize,
1187 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
1188
1189{
1190 *out_hwirq = intspec[0];
1191
1192
1193
1194
1195
1196 if (intsize > 1) {
1197 if (intspec[1] & 1)
1198 *out_flags = IRQ_TYPE_LEVEL_LOW;
1199 else
1200 *out_flags = IRQ_TYPE_EDGE_RISING;
1201 } else
1202 *out_flags = IRQ_TYPE_LEVEL_LOW;
1203
1204 return 0;
1205}
1206
1207static int xive_irq_domain_match(struct irq_domain *h, struct device_node *node,
1208 enum irq_domain_bus_token bus_token)
1209{
1210 return xive_ops->match(node);
1211}
1212
1213static const struct irq_domain_ops xive_irq_domain_ops = {
1214 .match = xive_irq_domain_match,
1215 .map = xive_irq_domain_map,
1216 .unmap = xive_irq_domain_unmap,
1217 .xlate = xive_irq_domain_xlate,
1218};
1219
1220static void __init xive_init_host(void)
1221{
1222 xive_irq_domain = irq_domain_add_nomap(NULL, XIVE_MAX_IRQ,
1223 &xive_irq_domain_ops, NULL);
1224 if (WARN_ON(xive_irq_domain == NULL))
1225 return;
1226 irq_set_default_host(xive_irq_domain);
1227}
1228
1229static void xive_cleanup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
1230{
1231 if (xc->queue[xive_irq_priority].qpage)
1232 xive_ops->cleanup_queue(cpu, xc, xive_irq_priority);
1233}
1234
1235static int xive_setup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
1236{
1237 int rc = 0;
1238
1239
1240 if (!xc->queue[xive_irq_priority].qpage)
1241 rc = xive_ops->setup_queue(cpu, xc, xive_irq_priority);
1242
1243 return rc;
1244}
1245
1246static int xive_prepare_cpu(unsigned int cpu)
1247{
1248 struct xive_cpu *xc;
1249
1250 xc = per_cpu(xive_cpu, cpu);
1251 if (!xc) {
1252 struct device_node *np;
1253
1254 xc = kzalloc_node(sizeof(struct xive_cpu),
1255 GFP_KERNEL, cpu_to_node(cpu));
1256 if (!xc)
1257 return -ENOMEM;
1258 np = of_get_cpu_node(cpu, NULL);
1259 if (np)
1260 xc->chip_id = of_get_ibm_chip_id(np);
1261 of_node_put(np);
1262
1263 per_cpu(xive_cpu, cpu) = xc;
1264 }
1265
1266
1267 return xive_setup_cpu_queues(cpu, xc);
1268}
1269
1270static void xive_setup_cpu(void)
1271{
1272 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1273
1274
1275 if (xive_ops->setup_cpu)
1276 xive_ops->setup_cpu(smp_processor_id(), xc);
1277
1278
1279 xc->cppr = 0xff;
1280 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff);
1281}
1282
1283#ifdef CONFIG_SMP
1284void xive_smp_setup_cpu(void)
1285{
1286 pr_devel("SMP setup CPU %d\n", smp_processor_id());
1287
1288
1289 if (smp_processor_id() != boot_cpuid)
1290 xive_setup_cpu();
1291
1292}
1293
1294int xive_smp_prepare_cpu(unsigned int cpu)
1295{
1296 int rc;
1297
1298
1299 rc = xive_prepare_cpu(cpu);
1300 if (rc)
1301 return rc;
1302
1303
1304 return xive_setup_cpu_ipi(cpu);
1305}
1306
1307#ifdef CONFIG_HOTPLUG_CPU
1308static void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc)
1309{
1310 u32 irq;
1311
1312
1313 WARN_ON(!irqs_disabled());
1314
1315
1316 while ((irq = xive_scan_interrupts(xc, false)) != 0) {
1317
1318
1319
1320
1321 struct irq_desc *desc = irq_to_desc(irq);
1322 struct irq_data *d = irq_desc_get_irq_data(desc);
1323 struct xive_irq_data *xd;
1324 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
1325
1326
1327
1328
1329
1330 if (d->domain != xive_irq_domain || hw_irq == 0)
1331 continue;
1332
1333
1334
1335
1336
1337
1338#ifdef DEBUG_FLUSH
1339 pr_info("CPU %d: Got irq %d while offline, re-sending...\n",
1340 cpu, irq);
1341#endif
1342 raw_spin_lock(&desc->lock);
1343 xd = irq_desc_get_handler_data(desc);
1344
1345
1346
1347
1348
1349 if (xd->flags & XIVE_IRQ_FLAG_LSI)
1350 xive_do_source_eoi(irqd_to_hwirq(d), xd);
1351 else
1352 xive_irq_retrigger(d);
1353
1354 raw_spin_unlock(&desc->lock);
1355 }
1356}
1357
1358void xive_smp_disable_cpu(void)
1359{
1360 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1361 unsigned int cpu = smp_processor_id();
1362
1363
1364 irq_migrate_all_off_this_cpu();
1365
1366
1367 xc->cppr = 0;
1368 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
1369
1370
1371 xive_flush_cpu_queue(cpu, xc);
1372
1373
1374 xc->cppr = 0xff;
1375 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff);
1376}
1377
1378void xive_flush_interrupt(void)
1379{
1380 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1381 unsigned int cpu = smp_processor_id();
1382
1383
1384 xive_flush_cpu_queue(cpu, xc);
1385}
1386
1387#endif
1388
1389#endif
1390
1391void xive_teardown_cpu(void)
1392{
1393 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1394 unsigned int cpu = smp_processor_id();
1395
1396
1397 xc->cppr = 0;
1398 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
1399
1400 if (xive_ops->teardown_cpu)
1401 xive_ops->teardown_cpu(cpu, xc);
1402
1403#ifdef CONFIG_SMP
1404
1405 xive_cleanup_cpu_ipi(cpu, xc);
1406#endif
1407
1408
1409 xive_cleanup_cpu_queues(cpu, xc);
1410}
1411
1412void xive_shutdown(void)
1413{
1414 xive_ops->shutdown();
1415}
1416
1417bool __init xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset,
1418 u8 max_prio)
1419{
1420 xive_tima = area;
1421 xive_tima_offset = offset;
1422 xive_ops = ops;
1423 xive_irq_priority = max_prio;
1424
1425 ppc_md.get_irq = xive_get_irq;
1426 __xive_enabled = true;
1427
1428 pr_devel("Initializing host..\n");
1429 xive_init_host();
1430
1431 pr_devel("Initializing boot CPU..\n");
1432
1433
1434 xive_prepare_cpu(smp_processor_id());
1435
1436
1437 xive_setup_cpu();
1438
1439 pr_info("Interrupt handling initialized with %s backend\n",
1440 xive_ops->name);
1441 pr_info("Using priority %d for all interrupts\n", max_prio);
1442
1443 return true;
1444}
1445
1446__be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift)
1447{
1448 unsigned int alloc_order;
1449 struct page *pages;
1450 __be32 *qpage;
1451
1452 alloc_order = xive_alloc_order(queue_shift);
1453 pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order);
1454 if (!pages)
1455 return ERR_PTR(-ENOMEM);
1456 qpage = (__be32 *)page_address(pages);
1457 memset(qpage, 0, 1 << queue_shift);
1458
1459 return qpage;
1460}
1461
1462static int __init xive_off(char *arg)
1463{
1464 xive_cmdline_disabled = true;
1465 return 0;
1466}
1467__setup("xive=off", xive_off);
1468