1
2
3
4
5
6#define pr_fmt(fmt) "xive: " fmt
7
8#include <linux/types.h>
9#include <linux/threads.h>
10#include <linux/kernel.h>
11#include <linux/irq.h>
12#include <linux/debugfs.h>
13#include <linux/smp.h>
14#include <linux/interrupt.h>
15#include <linux/seq_file.h>
16#include <linux/init.h>
17#include <linux/cpu.h>
18#include <linux/of.h>
19#include <linux/slab.h>
20#include <linux/spinlock.h>
21#include <linux/msi.h>
22#include <linux/vmalloc.h>
23
24#include <asm/prom.h>
25#include <asm/io.h>
26#include <asm/smp.h>
27#include <asm/machdep.h>
28#include <asm/irq.h>
29#include <asm/errno.h>
30#include <asm/xive.h>
31#include <asm/xive-regs.h>
32#include <asm/xmon.h>
33
34#include "xive-internal.h"
35
36#undef DEBUG_FLUSH
37#undef DEBUG_ALL
38
39#ifdef DEBUG_ALL
40#define DBG_VERBOSE(fmt, ...) pr_devel("cpu %d - " fmt, \
41 smp_processor_id(), ## __VA_ARGS__)
42#else
43#define DBG_VERBOSE(fmt...) do { } while(0)
44#endif
45
46bool __xive_enabled;
47EXPORT_SYMBOL_GPL(__xive_enabled);
48bool xive_cmdline_disabled;
49
50
51static u8 xive_irq_priority;
52
53
54void __iomem *xive_tima;
55EXPORT_SYMBOL_GPL(xive_tima);
56u32 xive_tima_offset;
57
58
59static const struct xive_ops *xive_ops;
60
61
62static struct irq_domain *xive_irq_domain;
63
64#ifdef CONFIG_SMP
65
66static struct xive_ipi_desc {
67 unsigned int irq;
68 char name[16];
69 atomic_t started;
70} *xive_ipis;
71
72
73
74
75static unsigned int xive_ipi_cpu_to_irq(unsigned int cpu)
76{
77 return xive_ipis[early_cpu_to_node(cpu)].irq;
78}
79#endif
80
81
82static DEFINE_PER_CPU(struct xive_cpu *, xive_cpu);
83
84
85#define XIVE_INVALID_TARGET (-1)
86
87
88
89
90
91
92
93static u32 xive_read_eq(struct xive_q *q, bool just_peek)
94{
95 u32 cur;
96
97 if (!q->qpage)
98 return 0;
99 cur = be32_to_cpup(q->qpage + q->idx);
100
101
102 if ((cur >> 31) == q->toggle)
103 return 0;
104
105
106 if (!just_peek) {
107
108 q->idx = (q->idx + 1) & q->msk;
109
110
111 if (q->idx == 0)
112 q->toggle ^= 1;
113 }
114
115 return cur & 0x7fffffff;
116}
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141static u32 xive_scan_interrupts(struct xive_cpu *xc, bool just_peek)
142{
143 u32 irq = 0;
144 u8 prio = 0;
145
146
147 while (xc->pending_prio != 0) {
148 struct xive_q *q;
149
150 prio = ffs(xc->pending_prio) - 1;
151 DBG_VERBOSE("scan_irq: trying prio %d\n", prio);
152
153
154 irq = xive_read_eq(&xc->queue[prio], just_peek);
155
156
157 if (irq) {
158 if (just_peek || irq_to_desc(irq))
159 break;
160
161
162
163
164
165 pr_crit("xive: got interrupt %d without descriptor, dropping\n",
166 irq);
167 WARN_ON(1);
168 continue;
169 }
170
171
172 xc->pending_prio &= ~(1 << prio);
173
174
175
176
177
178
179 q = &xc->queue[prio];
180 if (atomic_read(&q->pending_count)) {
181 int p = atomic_xchg(&q->pending_count, 0);
182 if (p) {
183 WARN_ON(p > atomic_read(&q->count));
184 atomic_sub(p, &q->count);
185 }
186 }
187 }
188
189
190 if (irq == 0)
191 prio = 0xff;
192
193
194 if (prio != xc->cppr) {
195 DBG_VERBOSE("scan_irq: adjusting CPPR to %d\n", prio);
196 xc->cppr = prio;
197 out_8(xive_tima + xive_tima_offset + TM_CPPR, prio);
198 }
199
200 return irq;
201}
202
203
204
205
206
207static notrace u8 xive_esb_read(struct xive_irq_data *xd, u32 offset)
208{
209 u64 val;
210
211 if (offset == XIVE_ESB_SET_PQ_10 && xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
212 offset |= XIVE_ESB_LD_ST_MO;
213
214 if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw)
215 val = xive_ops->esb_rw(xd->hw_irq, offset, 0, 0);
216 else
217 val = in_be64(xd->eoi_mmio + offset);
218
219 return (u8)val;
220}
221
222static void xive_esb_write(struct xive_irq_data *xd, u32 offset, u64 data)
223{
224 if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw)
225 xive_ops->esb_rw(xd->hw_irq, offset, data, 1);
226 else
227 out_be64(xd->eoi_mmio + offset, data);
228}
229
230#ifdef CONFIG_XMON
231static notrace void xive_dump_eq(const char *name, struct xive_q *q)
232{
233 u32 i0, i1, idx;
234
235 if (!q->qpage)
236 return;
237 idx = q->idx;
238 i0 = be32_to_cpup(q->qpage + idx);
239 idx = (idx + 1) & q->msk;
240 i1 = be32_to_cpup(q->qpage + idx);
241 xmon_printf("%s idx=%d T=%d %08x %08x ...", name,
242 q->idx, q->toggle, i0, i1);
243}
244
245notrace void xmon_xive_do_dump(int cpu)
246{
247 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
248
249 xmon_printf("CPU %d:", cpu);
250 if (xc) {
251 xmon_printf("pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr);
252
253#ifdef CONFIG_SMP
254 {
255 u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET);
256
257 xmon_printf("IPI=0x%08x PQ=%c%c ", xc->hw_ipi,
258 val & XIVE_ESB_VAL_P ? 'P' : '-',
259 val & XIVE_ESB_VAL_Q ? 'Q' : '-');
260 }
261#endif
262 xive_dump_eq("EQ", &xc->queue[xive_irq_priority]);
263 }
264 xmon_printf("\n");
265}
266
267static struct irq_data *xive_get_irq_data(u32 hw_irq)
268{
269 unsigned int irq = irq_find_mapping(xive_irq_domain, hw_irq);
270
271 return irq ? irq_get_irq_data(irq) : NULL;
272}
273
274int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d)
275{
276 int rc;
277 u32 target;
278 u8 prio;
279 u32 lirq;
280
281 rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq);
282 if (rc) {
283 xmon_printf("IRQ 0x%08x : no config rc=%d\n", hw_irq, rc);
284 return rc;
285 }
286
287 xmon_printf("IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
288 hw_irq, target, prio, lirq);
289
290 if (!d)
291 d = xive_get_irq_data(hw_irq);
292
293 if (d) {
294 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
295 u64 val = xive_esb_read(xd, XIVE_ESB_GET);
296
297 xmon_printf("flags=%c%c%c PQ=%c%c",
298 xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ',
299 xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ',
300 xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ',
301 val & XIVE_ESB_VAL_P ? 'P' : '-',
302 val & XIVE_ESB_VAL_Q ? 'Q' : '-');
303 }
304
305 xmon_printf("\n");
306 return 0;
307}
308
309void xmon_xive_get_irq_all(void)
310{
311 unsigned int i;
312 struct irq_desc *desc;
313
314 for_each_irq_desc(i, desc) {
315 struct irq_data *d = irq_domain_get_irq_data(xive_irq_domain, i);
316
317 if (d)
318 xmon_xive_get_irq_config(irqd_to_hwirq(d), d);
319 }
320}
321
322#endif
323
324static unsigned int xive_get_irq(void)
325{
326 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
327 u32 irq;
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343 xive_ops->update_pending(xc);
344
345 DBG_VERBOSE("get_irq: pending=%02x\n", xc->pending_prio);
346
347
348 irq = xive_scan_interrupts(xc, false);
349
350 DBG_VERBOSE("get_irq: got irq 0x%x, new pending=0x%02x\n",
351 irq, xc->pending_prio);
352
353
354 if (irq == XIVE_BAD_IRQ)
355 return 0;
356 return irq;
357}
358
359
360
361
362
363
364
365
366
367
368
369static void xive_do_queue_eoi(struct xive_cpu *xc)
370{
371 if (xive_scan_interrupts(xc, true) != 0) {
372 DBG_VERBOSE("eoi: pending=0x%02x\n", xc->pending_prio);
373 force_external_irq_replay();
374 }
375}
376
377
378
379
380
381static void xive_do_source_eoi(struct xive_irq_data *xd)
382{
383 u8 eoi_val;
384
385 xd->stale_p = false;
386
387
388 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) {
389 xive_esb_write(xd, XIVE_ESB_STORE_EOI, 0);
390 return;
391 }
392
393
394
395
396
397
398 if (xd->flags & XIVE_IRQ_FLAG_LSI) {
399 xive_esb_read(xd, XIVE_ESB_LOAD_EOI);
400 return;
401 }
402
403
404
405
406
407
408
409 eoi_val = xive_esb_read(xd, XIVE_ESB_SET_PQ_00);
410 DBG_VERBOSE("eoi_val=%x\n", eoi_val);
411
412
413 if ((eoi_val & XIVE_ESB_VAL_Q) && xd->trig_mmio)
414 out_be64(xd->trig_mmio, 0);
415}
416
417
418static void xive_irq_eoi(struct irq_data *d)
419{
420 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
421 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
422
423 DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n",
424 d->irq, irqd_to_hwirq(d), xc->pending_prio);
425
426
427
428
429
430 if (!irqd_irq_disabled(d) && !irqd_is_forwarded_to_vcpu(d) &&
431 !(xd->flags & XIVE_IRQ_FLAG_NO_EOI))
432 xive_do_source_eoi(xd);
433 else
434 xd->stale_p = true;
435
436
437
438
439
440 xd->saved_p = false;
441
442
443 xive_do_queue_eoi(xc);
444}
445
446
447
448
449static void xive_do_source_set_mask(struct xive_irq_data *xd,
450 bool mask)
451{
452 u64 val;
453
454
455
456
457
458
459
460
461
462 if (mask) {
463 val = xive_esb_read(xd, XIVE_ESB_SET_PQ_01);
464 if (!xd->stale_p && !!(val & XIVE_ESB_VAL_P))
465 xd->saved_p = true;
466 xd->stale_p = false;
467 } else if (xd->saved_p) {
468 xive_esb_read(xd, XIVE_ESB_SET_PQ_10);
469 xd->saved_p = false;
470 } else {
471 xive_esb_read(xd, XIVE_ESB_SET_PQ_00);
472 xd->stale_p = false;
473 }
474}
475
476
477
478
479
480
481static bool xive_try_pick_target(int cpu)
482{
483 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
484 struct xive_q *q = &xc->queue[xive_irq_priority];
485 int max;
486
487
488
489
490
491
492 max = (q->msk + 1) - 1;
493 return !!atomic_add_unless(&q->count, 1, max);
494}
495
496
497
498
499
500
501
502
503
504
505static void xive_dec_target_count(int cpu)
506{
507 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
508 struct xive_q *q = &xc->queue[xive_irq_priority];
509
510 if (WARN_ON(cpu < 0 || !xc)) {
511 pr_err("%s: cpu=%d xc=%p\n", __func__, cpu, xc);
512 return;
513 }
514
515
516
517
518
519
520
521
522 atomic_inc(&q->pending_count);
523}
524
525
526static int xive_find_target_in_mask(const struct cpumask *mask,
527 unsigned int fuzz)
528{
529 int cpu, first, num, i;
530
531
532 num = min_t(int, cpumask_weight(mask), nr_cpu_ids);
533 first = fuzz % num;
534
535
536 cpu = cpumask_first(mask);
537 for (i = 0; i < first && cpu < nr_cpu_ids; i++)
538 cpu = cpumask_next(cpu, mask);
539
540
541 if (WARN_ON(cpu >= nr_cpu_ids))
542 cpu = cpumask_first(cpu_online_mask);
543
544
545 first = cpu;
546
547
548
549
550
551 do {
552
553
554
555
556 if (cpu_online(cpu) && xive_try_pick_target(cpu))
557 return cpu;
558 cpu = cpumask_next(cpu, mask);
559
560 if (cpu >= nr_cpu_ids)
561 cpu = cpumask_first(mask);
562 } while (cpu != first);
563
564 return -1;
565}
566
567
568
569
570
571
572static int xive_pick_irq_target(struct irq_data *d,
573 const struct cpumask *affinity)
574{
575 static unsigned int fuzz;
576 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
577 cpumask_var_t mask;
578 int cpu = -1;
579
580
581
582
583
584 if (xd->src_chip != XIVE_INVALID_CHIP_ID &&
585 zalloc_cpumask_var(&mask, GFP_ATOMIC)) {
586
587 for_each_cpu_and(cpu, affinity, cpu_online_mask) {
588 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
589 if (xc->chip_id == xd->src_chip)
590 cpumask_set_cpu(cpu, mask);
591 }
592
593 if (cpumask_empty(mask))
594 cpu = -1;
595 else
596 cpu = xive_find_target_in_mask(mask, fuzz++);
597 free_cpumask_var(mask);
598 if (cpu >= 0)
599 return cpu;
600 fuzz--;
601 }
602
603
604 return xive_find_target_in_mask(affinity, fuzz++);
605}
606
607static unsigned int xive_irq_startup(struct irq_data *d)
608{
609 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
610 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
611 int target, rc;
612
613 xd->saved_p = false;
614 xd->stale_p = false;
615 pr_devel("xive_irq_startup: irq %d [0x%x] data @%p\n",
616 d->irq, hw_irq, d);
617
618
619 target = xive_pick_irq_target(d, irq_data_get_affinity_mask(d));
620 if (target == XIVE_INVALID_TARGET) {
621
622 target = xive_pick_irq_target(d, cpu_online_mask);
623 if (target == XIVE_INVALID_TARGET)
624 return -ENXIO;
625 pr_warn("irq %d started with broken affinity\n", d->irq);
626 }
627
628
629 if (WARN_ON(target == XIVE_INVALID_TARGET ||
630 target >= nr_cpu_ids))
631 target = smp_processor_id();
632
633 xd->target = target;
634
635
636
637
638
639 rc = xive_ops->configure_irq(hw_irq,
640 get_hard_smp_processor_id(target),
641 xive_irq_priority, d->irq);
642 if (rc)
643 return rc;
644
645
646 xive_do_source_set_mask(xd, false);
647
648 return 0;
649}
650
651
652static void xive_irq_shutdown(struct irq_data *d)
653{
654 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
655 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
656
657 pr_devel("xive_irq_shutdown: irq %d [0x%x] data @%p\n",
658 d->irq, hw_irq, d);
659
660 if (WARN_ON(xd->target == XIVE_INVALID_TARGET))
661 return;
662
663
664 xive_do_source_set_mask(xd, true);
665
666
667
668
669
670 xive_ops->configure_irq(hw_irq,
671 get_hard_smp_processor_id(xd->target),
672 0xff, XIVE_BAD_IRQ);
673
674 xive_dec_target_count(xd->target);
675 xd->target = XIVE_INVALID_TARGET;
676}
677
678static void xive_irq_unmask(struct irq_data *d)
679{
680 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
681
682 pr_devel("xive_irq_unmask: irq %d data @%p\n", d->irq, xd);
683
684 xive_do_source_set_mask(xd, false);
685}
686
687static void xive_irq_mask(struct irq_data *d)
688{
689 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
690
691 pr_devel("xive_irq_mask: irq %d data @%p\n", d->irq, xd);
692
693 xive_do_source_set_mask(xd, true);
694}
695
696static int xive_irq_set_affinity(struct irq_data *d,
697 const struct cpumask *cpumask,
698 bool force)
699{
700 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
701 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
702 u32 target, old_target;
703 int rc = 0;
704
705 pr_debug("%s: irq %d/%x\n", __func__, d->irq, hw_irq);
706
707
708 if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids)
709 return -EINVAL;
710
711
712
713
714
715 if (xd->target != XIVE_INVALID_TARGET &&
716 cpu_online(xd->target) &&
717 cpumask_test_cpu(xd->target, cpumask))
718 return IRQ_SET_MASK_OK;
719
720
721 target = xive_pick_irq_target(d, cpumask);
722
723
724 if (target == XIVE_INVALID_TARGET)
725 return -ENXIO;
726
727
728 if (WARN_ON(target >= nr_cpu_ids))
729 target = smp_processor_id();
730
731 old_target = xd->target;
732
733
734
735
736
737 if (!irqd_is_forwarded_to_vcpu(d))
738 rc = xive_ops->configure_irq(hw_irq,
739 get_hard_smp_processor_id(target),
740 xive_irq_priority, d->irq);
741 if (rc < 0) {
742 pr_err("Error %d reconfiguring irq %d\n", rc, d->irq);
743 return rc;
744 }
745
746 pr_debug(" target: 0x%x\n", target);
747 xd->target = target;
748
749
750 if (old_target != XIVE_INVALID_TARGET)
751 xive_dec_target_count(old_target);
752
753 return IRQ_SET_MASK_OK;
754}
755
756static int xive_irq_set_type(struct irq_data *d, unsigned int flow_type)
757{
758 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
759
760
761
762
763
764
765
766
767 if (flow_type == IRQ_TYPE_DEFAULT || flow_type == IRQ_TYPE_NONE)
768 flow_type = IRQ_TYPE_EDGE_RISING;
769
770 if (flow_type != IRQ_TYPE_EDGE_RISING &&
771 flow_type != IRQ_TYPE_LEVEL_LOW)
772 return -EINVAL;
773
774 irqd_set_trigger_type(d, flow_type);
775
776
777
778
779
780
781
782
783
784 if ((flow_type == IRQ_TYPE_LEVEL_LOW) !=
785 !!(xd->flags & XIVE_IRQ_FLAG_LSI)) {
786 pr_warn("Interrupt %d (HW 0x%x) type mismatch, Linux says %s, FW says %s\n",
787 d->irq, (u32)irqd_to_hwirq(d),
788 (flow_type == IRQ_TYPE_LEVEL_LOW) ? "Level" : "Edge",
789 (xd->flags & XIVE_IRQ_FLAG_LSI) ? "Level" : "Edge");
790 }
791
792 return IRQ_SET_MASK_OK_NOCOPY;
793}
794
795static int xive_irq_retrigger(struct irq_data *d)
796{
797 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
798
799
800 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
801 return 0;
802
803
804
805
806
807 xive_esb_read(xd, XIVE_ESB_SET_PQ_11);
808 xive_do_source_eoi(xd);
809
810 return 1;
811}
812
813
814
815
816
817static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
818{
819 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
820 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
821 int rc;
822 u8 pq;
823
824
825
826
827
828 if (state) {
829 irqd_set_forwarded_to_vcpu(d);
830
831
832 pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_10);
833 if (!xd->stale_p) {
834 xd->saved_p = !!(pq & XIVE_ESB_VAL_P);
835 xd->stale_p = !xd->saved_p;
836 }
837
838
839 if (xd->target == XIVE_INVALID_TARGET) {
840
841
842
843
844 WARN_ON(xd->saved_p);
845
846 return 0;
847 }
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864 if (xd->saved_p) {
865 xive_esb_read(xd, XIVE_ESB_SET_PQ_11);
866
867
868
869
870
871
872
873
874
875
876 if (xive_ops->sync_source)
877 xive_ops->sync_source(hw_irq);
878 }
879 } else {
880 irqd_clr_forwarded_to_vcpu(d);
881
882
883 if (xd->target == XIVE_INVALID_TARGET) {
884 xive_do_source_set_mask(xd, true);
885 return 0;
886 }
887
888
889
890
891
892
893 if (xive_ops->sync_source)
894 xive_ops->sync_source(hw_irq);
895
896
897
898
899
900
901
902
903
904 rc = xive_ops->configure_irq(hw_irq,
905 get_hard_smp_processor_id(xd->target),
906 xive_irq_priority, d->irq);
907 if (rc)
908 return rc;
909
910
911
912
913
914
915
916
917
918
919
920
921
922 if (!xd->saved_p)
923 xive_do_source_eoi(xd);
924
925 }
926 return 0;
927}
928
929
930static int xive_get_irqchip_state(struct irq_data *data,
931 enum irqchip_irq_state which, bool *state)
932{
933 struct xive_irq_data *xd = irq_data_get_irq_handler_data(data);
934 u8 pq;
935
936 switch (which) {
937 case IRQCHIP_STATE_ACTIVE:
938 pq = xive_esb_read(xd, XIVE_ESB_GET);
939
940
941
942
943
944
945
946
947 *state = (pq != XIVE_ESB_INVALID) && !xd->stale_p &&
948 (xd->saved_p || (!!(pq & XIVE_ESB_VAL_P) &&
949 !irqd_irq_disabled(data)));
950 return 0;
951 default:
952 return -EINVAL;
953 }
954}
955
956static struct irq_chip xive_irq_chip = {
957 .name = "XIVE-IRQ",
958 .irq_startup = xive_irq_startup,
959 .irq_shutdown = xive_irq_shutdown,
960 .irq_eoi = xive_irq_eoi,
961 .irq_mask = xive_irq_mask,
962 .irq_unmask = xive_irq_unmask,
963 .irq_set_affinity = xive_irq_set_affinity,
964 .irq_set_type = xive_irq_set_type,
965 .irq_retrigger = xive_irq_retrigger,
966 .irq_set_vcpu_affinity = xive_irq_set_vcpu_affinity,
967 .irq_get_irqchip_state = xive_get_irqchip_state,
968};
969
970bool is_xive_irq(struct irq_chip *chip)
971{
972 return chip == &xive_irq_chip;
973}
974EXPORT_SYMBOL_GPL(is_xive_irq);
975
976void xive_cleanup_irq_data(struct xive_irq_data *xd)
977{
978 pr_debug("%s for HW %x\n", __func__, xd->hw_irq);
979
980 if (xd->eoi_mmio) {
981 iounmap(xd->eoi_mmio);
982 if (xd->eoi_mmio == xd->trig_mmio)
983 xd->trig_mmio = NULL;
984 xd->eoi_mmio = NULL;
985 }
986 if (xd->trig_mmio) {
987 iounmap(xd->trig_mmio);
988 xd->trig_mmio = NULL;
989 }
990}
991EXPORT_SYMBOL_GPL(xive_cleanup_irq_data);
992
993static int xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw)
994{
995 struct xive_irq_data *xd;
996 int rc;
997
998 xd = kzalloc(sizeof(struct xive_irq_data), GFP_KERNEL);
999 if (!xd)
1000 return -ENOMEM;
1001 rc = xive_ops->populate_irq_data(hw, xd);
1002 if (rc) {
1003 kfree(xd);
1004 return rc;
1005 }
1006 xd->target = XIVE_INVALID_TARGET;
1007 irq_set_handler_data(virq, xd);
1008
1009
1010
1011
1012
1013
1014
1015
1016 xive_esb_read(xd, XIVE_ESB_SET_PQ_01);
1017
1018 return 0;
1019}
1020
1021void xive_irq_free_data(unsigned int virq)
1022{
1023 struct xive_irq_data *xd = irq_get_handler_data(virq);
1024
1025 if (!xd)
1026 return;
1027 irq_set_handler_data(virq, NULL);
1028 xive_cleanup_irq_data(xd);
1029 kfree(xd);
1030}
1031EXPORT_SYMBOL_GPL(xive_irq_free_data);
1032
1033#ifdef CONFIG_SMP
1034
1035static void xive_cause_ipi(int cpu)
1036{
1037 struct xive_cpu *xc;
1038 struct xive_irq_data *xd;
1039
1040 xc = per_cpu(xive_cpu, cpu);
1041
1042 DBG_VERBOSE("IPI CPU %d -> %d (HW IRQ 0x%x)\n",
1043 smp_processor_id(), cpu, xc->hw_ipi);
1044
1045 xd = &xc->ipi_data;
1046 if (WARN_ON(!xd->trig_mmio))
1047 return;
1048 out_be64(xd->trig_mmio, 0);
1049}
1050
1051static irqreturn_t xive_muxed_ipi_action(int irq, void *dev_id)
1052{
1053 return smp_ipi_demux();
1054}
1055
1056static void xive_ipi_eoi(struct irq_data *d)
1057{
1058 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1059
1060
1061 if (!xc)
1062 return;
1063
1064 DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n",
1065 d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio);
1066
1067 xive_do_source_eoi(&xc->ipi_data);
1068 xive_do_queue_eoi(xc);
1069}
1070
1071static void xive_ipi_do_nothing(struct irq_data *d)
1072{
1073
1074
1075
1076
1077}
1078
1079static struct irq_chip xive_ipi_chip = {
1080 .name = "XIVE-IPI",
1081 .irq_eoi = xive_ipi_eoi,
1082 .irq_mask = xive_ipi_do_nothing,
1083 .irq_unmask = xive_ipi_do_nothing,
1084};
1085
1086
1087
1088
1089
1090struct xive_ipi_alloc_info {
1091 irq_hw_number_t hwirq;
1092};
1093
1094static int xive_ipi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1095 unsigned int nr_irqs, void *arg)
1096{
1097 struct xive_ipi_alloc_info *info = arg;
1098 int i;
1099
1100 for (i = 0; i < nr_irqs; i++) {
1101 irq_domain_set_info(domain, virq + i, info->hwirq + i, &xive_ipi_chip,
1102 domain->host_data, handle_percpu_irq,
1103 NULL, NULL);
1104 }
1105 return 0;
1106}
1107
1108static const struct irq_domain_ops xive_ipi_irq_domain_ops = {
1109 .alloc = xive_ipi_irq_domain_alloc,
1110};
1111
1112static int __init xive_init_ipis(void)
1113{
1114 struct fwnode_handle *fwnode;
1115 struct irq_domain *ipi_domain;
1116 unsigned int node;
1117 int ret = -ENOMEM;
1118
1119 fwnode = irq_domain_alloc_named_fwnode("XIVE-IPI");
1120 if (!fwnode)
1121 goto out;
1122
1123 ipi_domain = irq_domain_create_linear(fwnode, nr_node_ids,
1124 &xive_ipi_irq_domain_ops, NULL);
1125 if (!ipi_domain)
1126 goto out_free_fwnode;
1127
1128 xive_ipis = kcalloc(nr_node_ids, sizeof(*xive_ipis), GFP_KERNEL | __GFP_NOFAIL);
1129 if (!xive_ipis)
1130 goto out_free_domain;
1131
1132 for_each_node(node) {
1133 struct xive_ipi_desc *xid = &xive_ipis[node];
1134 struct xive_ipi_alloc_info info = { node };
1135
1136
1137
1138
1139
1140
1141 ret = irq_domain_alloc_irqs(ipi_domain, 1, node, &info);
1142 if (ret < 0)
1143 goto out_free_xive_ipis;
1144 xid->irq = ret;
1145
1146 snprintf(xid->name, sizeof(xid->name), "IPI-%d", node);
1147 }
1148
1149 return ret;
1150
1151out_free_xive_ipis:
1152 kfree(xive_ipis);
1153out_free_domain:
1154 irq_domain_remove(ipi_domain);
1155out_free_fwnode:
1156 irq_domain_free_fwnode(fwnode);
1157out:
1158 return ret;
1159}
1160
1161static int xive_request_ipi(unsigned int cpu)
1162{
1163 struct xive_ipi_desc *xid = &xive_ipis[early_cpu_to_node(cpu)];
1164 int ret;
1165
1166 if (atomic_inc_return(&xid->started) > 1)
1167 return 0;
1168
1169 ret = request_irq(xid->irq, xive_muxed_ipi_action,
1170 IRQF_NO_DEBUG | IRQF_PERCPU | IRQF_NO_THREAD,
1171 xid->name, NULL);
1172
1173 WARN(ret < 0, "Failed to request IPI %d: %d\n", xid->irq, ret);
1174 return ret;
1175}
1176
1177static int xive_setup_cpu_ipi(unsigned int cpu)
1178{
1179 unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu);
1180 struct xive_cpu *xc;
1181 int rc;
1182
1183 pr_debug("Setting up IPI for CPU %d\n", cpu);
1184
1185 xc = per_cpu(xive_cpu, cpu);
1186
1187
1188 if (xc->hw_ipi != XIVE_BAD_IRQ)
1189 return 0;
1190
1191
1192 xive_request_ipi(cpu);
1193
1194
1195 if (xive_ops->get_ipi(cpu, xc))
1196 return -EIO;
1197
1198
1199
1200
1201
1202 rc = xive_ops->populate_irq_data(xc->hw_ipi, &xc->ipi_data);
1203 if (rc) {
1204 pr_err("Failed to populate IPI data on CPU %d\n", cpu);
1205 return -EIO;
1206 }
1207 rc = xive_ops->configure_irq(xc->hw_ipi,
1208 get_hard_smp_processor_id(cpu),
1209 xive_irq_priority, xive_ipi_irq);
1210 if (rc) {
1211 pr_err("Failed to map IPI CPU %d\n", cpu);
1212 return -EIO;
1213 }
1214 pr_devel("CPU %d HW IPI %x, virq %d, trig_mmio=%p\n", cpu,
1215 xc->hw_ipi, xive_ipi_irq, xc->ipi_data.trig_mmio);
1216
1217
1218 xive_do_source_set_mask(&xc->ipi_data, false);
1219
1220 return 0;
1221}
1222
1223static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc)
1224{
1225 unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu);
1226
1227
1228
1229
1230 if (xc->hw_ipi == XIVE_BAD_IRQ)
1231 return;
1232
1233
1234
1235
1236 xive_do_source_set_mask(&xc->ipi_data, true);
1237
1238
1239
1240
1241
1242
1243
1244
1245 xive_ops->configure_irq(xc->hw_ipi, hard_smp_processor_id(),
1246 0xff, xive_ipi_irq);
1247
1248
1249 xive_ops->put_ipi(cpu, xc);
1250}
1251
1252void __init xive_smp_probe(void)
1253{
1254 smp_ops->cause_ipi = xive_cause_ipi;
1255
1256
1257 xive_init_ipis();
1258
1259
1260 xive_setup_cpu_ipi(smp_processor_id());
1261}
1262
1263#endif
1264
1265static int xive_irq_domain_map(struct irq_domain *h, unsigned int virq,
1266 irq_hw_number_t hw)
1267{
1268 int rc;
1269
1270
1271
1272
1273
1274 irq_clear_status_flags(virq, IRQ_LEVEL);
1275
1276 rc = xive_irq_alloc_data(virq, hw);
1277 if (rc)
1278 return rc;
1279
1280 irq_set_chip_and_handler(virq, &xive_irq_chip, handle_fasteoi_irq);
1281
1282 return 0;
1283}
1284
1285static void xive_irq_domain_unmap(struct irq_domain *d, unsigned int virq)
1286{
1287 xive_irq_free_data(virq);
1288}
1289
1290static int xive_irq_domain_xlate(struct irq_domain *h, struct device_node *ct,
1291 const u32 *intspec, unsigned int intsize,
1292 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
1293
1294{
1295 *out_hwirq = intspec[0];
1296
1297
1298
1299
1300
1301 if (intsize > 1) {
1302 if (intspec[1] & 1)
1303 *out_flags = IRQ_TYPE_LEVEL_LOW;
1304 else
1305 *out_flags = IRQ_TYPE_EDGE_RISING;
1306 } else
1307 *out_flags = IRQ_TYPE_LEVEL_LOW;
1308
1309 return 0;
1310}
1311
1312static int xive_irq_domain_match(struct irq_domain *h, struct device_node *node,
1313 enum irq_domain_bus_token bus_token)
1314{
1315 return xive_ops->match(node);
1316}
1317
1318#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
1319static const char * const esb_names[] = { "RESET", "OFF", "PENDING", "QUEUED" };
1320
1321static const struct {
1322 u64 mask;
1323 char *name;
1324} xive_irq_flags[] = {
1325 { XIVE_IRQ_FLAG_STORE_EOI, "STORE_EOI" },
1326 { XIVE_IRQ_FLAG_LSI, "LSI" },
1327 { XIVE_IRQ_FLAG_H_INT_ESB, "H_INT_ESB" },
1328 { XIVE_IRQ_FLAG_NO_EOI, "NO_EOI" },
1329};
1330
1331static void xive_irq_domain_debug_show(struct seq_file *m, struct irq_domain *d,
1332 struct irq_data *irqd, int ind)
1333{
1334 struct xive_irq_data *xd;
1335 u64 val;
1336 int i;
1337
1338
1339 if (!irqd)
1340 return;
1341
1342 if (!is_xive_irq(irq_data_get_irq_chip(irqd)))
1343 return;
1344
1345 seq_printf(m, "%*sXIVE:\n", ind, "");
1346 ind++;
1347
1348 xd = irq_data_get_irq_handler_data(irqd);
1349 if (!xd) {
1350 seq_printf(m, "%*snot assigned\n", ind, "");
1351 return;
1352 }
1353
1354 val = xive_esb_read(xd, XIVE_ESB_GET);
1355 seq_printf(m, "%*sESB: %s\n", ind, "", esb_names[val & 0x3]);
1356 seq_printf(m, "%*sPstate: %s %s\n", ind, "", xd->stale_p ? "stale" : "",
1357 xd->saved_p ? "saved" : "");
1358 seq_printf(m, "%*sTarget: %d\n", ind, "", xd->target);
1359 seq_printf(m, "%*sChip: %d\n", ind, "", xd->src_chip);
1360 seq_printf(m, "%*sTrigger: 0x%016llx\n", ind, "", xd->trig_page);
1361 seq_printf(m, "%*sEOI: 0x%016llx\n", ind, "", xd->eoi_page);
1362 seq_printf(m, "%*sFlags: 0x%llx\n", ind, "", xd->flags);
1363 for (i = 0; i < ARRAY_SIZE(xive_irq_flags); i++) {
1364 if (xd->flags & xive_irq_flags[i].mask)
1365 seq_printf(m, "%*s%s\n", ind + 12, "", xive_irq_flags[i].name);
1366 }
1367}
1368#endif
1369
1370#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1371static int xive_irq_domain_translate(struct irq_domain *d,
1372 struct irq_fwspec *fwspec,
1373 unsigned long *hwirq,
1374 unsigned int *type)
1375{
1376 return xive_irq_domain_xlate(d, to_of_node(fwspec->fwnode),
1377 fwspec->param, fwspec->param_count,
1378 hwirq, type);
1379}
1380
1381static int xive_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1382 unsigned int nr_irqs, void *arg)
1383{
1384 struct irq_fwspec *fwspec = arg;
1385 irq_hw_number_t hwirq;
1386 unsigned int type = IRQ_TYPE_NONE;
1387 int i, rc;
1388
1389 rc = xive_irq_domain_translate(domain, fwspec, &hwirq, &type);
1390 if (rc)
1391 return rc;
1392
1393 pr_debug("%s %d/%lx #%d\n", __func__, virq, hwirq, nr_irqs);
1394
1395 for (i = 0; i < nr_irqs; i++) {
1396
1397
1398
1399
1400
1401
1402 irq_clear_status_flags(virq, IRQ_LEVEL);
1403
1404
1405 rc = xive_irq_alloc_data(virq + i, hwirq + i);
1406 if (rc)
1407 return rc;
1408
1409 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
1410 &xive_irq_chip, domain->host_data);
1411 irq_set_handler(virq + i, handle_fasteoi_irq);
1412 }
1413
1414 return 0;
1415}
1416
1417static void xive_irq_domain_free(struct irq_domain *domain,
1418 unsigned int virq, unsigned int nr_irqs)
1419{
1420 int i;
1421
1422 pr_debug("%s %d #%d\n", __func__, virq, nr_irqs);
1423
1424 for (i = 0; i < nr_irqs; i++)
1425 xive_irq_free_data(virq + i);
1426}
1427#endif
1428
1429static const struct irq_domain_ops xive_irq_domain_ops = {
1430#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1431 .alloc = xive_irq_domain_alloc,
1432 .free = xive_irq_domain_free,
1433 .translate = xive_irq_domain_translate,
1434#endif
1435 .match = xive_irq_domain_match,
1436 .map = xive_irq_domain_map,
1437 .unmap = xive_irq_domain_unmap,
1438 .xlate = xive_irq_domain_xlate,
1439#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
1440 .debug_show = xive_irq_domain_debug_show,
1441#endif
1442};
1443
1444static void __init xive_init_host(struct device_node *np)
1445{
1446 xive_irq_domain = irq_domain_add_nomap(np, XIVE_MAX_IRQ,
1447 &xive_irq_domain_ops, NULL);
1448 if (WARN_ON(xive_irq_domain == NULL))
1449 return;
1450 irq_set_default_host(xive_irq_domain);
1451}
1452
1453static void xive_cleanup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
1454{
1455 if (xc->queue[xive_irq_priority].qpage)
1456 xive_ops->cleanup_queue(cpu, xc, xive_irq_priority);
1457}
1458
1459static int xive_setup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
1460{
1461 int rc = 0;
1462
1463
1464 if (!xc->queue[xive_irq_priority].qpage)
1465 rc = xive_ops->setup_queue(cpu, xc, xive_irq_priority);
1466
1467 return rc;
1468}
1469
1470static int xive_prepare_cpu(unsigned int cpu)
1471{
1472 struct xive_cpu *xc;
1473
1474 xc = per_cpu(xive_cpu, cpu);
1475 if (!xc) {
1476 xc = kzalloc_node(sizeof(struct xive_cpu),
1477 GFP_KERNEL, cpu_to_node(cpu));
1478 if (!xc)
1479 return -ENOMEM;
1480 xc->hw_ipi = XIVE_BAD_IRQ;
1481 xc->chip_id = XIVE_INVALID_CHIP_ID;
1482 if (xive_ops->prepare_cpu)
1483 xive_ops->prepare_cpu(cpu, xc);
1484
1485 per_cpu(xive_cpu, cpu) = xc;
1486 }
1487
1488
1489 return xive_setup_cpu_queues(cpu, xc);
1490}
1491
1492static void xive_setup_cpu(void)
1493{
1494 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1495
1496
1497 if (xive_ops->setup_cpu)
1498 xive_ops->setup_cpu(smp_processor_id(), xc);
1499
1500
1501 xc->cppr = 0xff;
1502 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff);
1503}
1504
1505#ifdef CONFIG_SMP
1506void xive_smp_setup_cpu(void)
1507{
1508 pr_devel("SMP setup CPU %d\n", smp_processor_id());
1509
1510
1511 if (smp_processor_id() != boot_cpuid)
1512 xive_setup_cpu();
1513
1514}
1515
1516int xive_smp_prepare_cpu(unsigned int cpu)
1517{
1518 int rc;
1519
1520
1521 rc = xive_prepare_cpu(cpu);
1522 if (rc)
1523 return rc;
1524
1525
1526 return xive_setup_cpu_ipi(cpu);
1527}
1528
1529#ifdef CONFIG_HOTPLUG_CPU
1530static void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc)
1531{
1532 u32 irq;
1533
1534
1535 WARN_ON(!irqs_disabled());
1536
1537
1538 while ((irq = xive_scan_interrupts(xc, false)) != 0) {
1539
1540
1541
1542
1543 struct irq_desc *desc = irq_to_desc(irq);
1544 struct irq_data *d = irq_desc_get_irq_data(desc);
1545 struct xive_irq_data *xd;
1546
1547
1548
1549
1550
1551 if (d->domain != xive_irq_domain)
1552 continue;
1553
1554
1555
1556
1557
1558
1559#ifdef DEBUG_FLUSH
1560 pr_info("CPU %d: Got irq %d while offline, re-sending...\n",
1561 cpu, irq);
1562#endif
1563 raw_spin_lock(&desc->lock);
1564 xd = irq_desc_get_handler_data(desc);
1565
1566
1567
1568
1569 xd->saved_p = false;
1570
1571
1572
1573
1574
1575 if (xd->flags & XIVE_IRQ_FLAG_LSI)
1576 xive_do_source_eoi(xd);
1577 else
1578 xive_irq_retrigger(d);
1579
1580 raw_spin_unlock(&desc->lock);
1581 }
1582}
1583
1584void xive_smp_disable_cpu(void)
1585{
1586 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1587 unsigned int cpu = smp_processor_id();
1588
1589
1590 irq_migrate_all_off_this_cpu();
1591
1592
1593 xc->cppr = 0;
1594 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
1595
1596
1597 xive_flush_cpu_queue(cpu, xc);
1598
1599
1600 xc->cppr = 0xff;
1601 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff);
1602}
1603
1604void xive_flush_interrupt(void)
1605{
1606 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1607 unsigned int cpu = smp_processor_id();
1608
1609
1610 xive_flush_cpu_queue(cpu, xc);
1611}
1612
1613#endif
1614
1615#endif
1616
1617void xive_teardown_cpu(void)
1618{
1619 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1620 unsigned int cpu = smp_processor_id();
1621
1622
1623 xc->cppr = 0;
1624 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
1625
1626 if (xive_ops->teardown_cpu)
1627 xive_ops->teardown_cpu(cpu, xc);
1628
1629#ifdef CONFIG_SMP
1630
1631 xive_cleanup_cpu_ipi(cpu, xc);
1632#endif
1633
1634
1635 xive_cleanup_cpu_queues(cpu, xc);
1636}
1637
1638void xive_shutdown(void)
1639{
1640 xive_ops->shutdown();
1641}
1642
1643bool __init xive_core_init(struct device_node *np, const struct xive_ops *ops,
1644 void __iomem *area, u32 offset, u8 max_prio)
1645{
1646 xive_tima = area;
1647 xive_tima_offset = offset;
1648 xive_ops = ops;
1649 xive_irq_priority = max_prio;
1650
1651 ppc_md.get_irq = xive_get_irq;
1652 __xive_enabled = true;
1653
1654 pr_devel("Initializing host..\n");
1655 xive_init_host(np);
1656
1657 pr_devel("Initializing boot CPU..\n");
1658
1659
1660 xive_prepare_cpu(smp_processor_id());
1661
1662
1663 xive_setup_cpu();
1664
1665 pr_info("Interrupt handling initialized with %s backend\n",
1666 xive_ops->name);
1667 pr_info("Using priority %d for all interrupts\n", max_prio);
1668
1669 return true;
1670}
1671
1672__be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift)
1673{
1674 unsigned int alloc_order;
1675 struct page *pages;
1676 __be32 *qpage;
1677
1678 alloc_order = xive_alloc_order(queue_shift);
1679 pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order);
1680 if (!pages)
1681 return ERR_PTR(-ENOMEM);
1682 qpage = (__be32 *)page_address(pages);
1683 memset(qpage, 0, 1 << queue_shift);
1684
1685 return qpage;
1686}
1687
1688static int __init xive_off(char *arg)
1689{
1690 xive_cmdline_disabled = true;
1691 return 0;
1692}
1693__setup("xive=off", xive_off);
1694
1695static void xive_debug_show_cpu(struct seq_file *m, int cpu)
1696{
1697 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
1698
1699 seq_printf(m, "CPU %d:", cpu);
1700 if (xc) {
1701 seq_printf(m, "pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr);
1702
1703#ifdef CONFIG_SMP
1704 {
1705 u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET);
1706
1707 seq_printf(m, "IPI=0x%08x PQ=%c%c ", xc->hw_ipi,
1708 val & XIVE_ESB_VAL_P ? 'P' : '-',
1709 val & XIVE_ESB_VAL_Q ? 'Q' : '-');
1710 }
1711#endif
1712 {
1713 struct xive_q *q = &xc->queue[xive_irq_priority];
1714 u32 i0, i1, idx;
1715
1716 if (q->qpage) {
1717 idx = q->idx;
1718 i0 = be32_to_cpup(q->qpage + idx);
1719 idx = (idx + 1) & q->msk;
1720 i1 = be32_to_cpup(q->qpage + idx);
1721 seq_printf(m, "EQ idx=%d T=%d %08x %08x ...",
1722 q->idx, q->toggle, i0, i1);
1723 }
1724 }
1725 }
1726 seq_puts(m, "\n");
1727}
1728
1729static void xive_debug_show_irq(struct seq_file *m, struct irq_data *d)
1730{
1731 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
1732 int rc;
1733 u32 target;
1734 u8 prio;
1735 u32 lirq;
1736 struct xive_irq_data *xd;
1737 u64 val;
1738
1739 rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq);
1740 if (rc) {
1741 seq_printf(m, "IRQ 0x%08x : no config rc=%d\n", hw_irq, rc);
1742 return;
1743 }
1744
1745 seq_printf(m, "IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
1746 hw_irq, target, prio, lirq);
1747
1748 xd = irq_data_get_irq_handler_data(d);
1749 val = xive_esb_read(xd, XIVE_ESB_GET);
1750 seq_printf(m, "flags=%c%c%c PQ=%c%c",
1751 xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ',
1752 xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ',
1753 xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ',
1754 val & XIVE_ESB_VAL_P ? 'P' : '-',
1755 val & XIVE_ESB_VAL_Q ? 'Q' : '-');
1756 seq_puts(m, "\n");
1757}
1758
1759static int xive_core_debug_show(struct seq_file *m, void *private)
1760{
1761 unsigned int i;
1762 struct irq_desc *desc;
1763 int cpu;
1764
1765 if (xive_ops->debug_show)
1766 xive_ops->debug_show(m, private);
1767
1768 for_each_possible_cpu(cpu)
1769 xive_debug_show_cpu(m, cpu);
1770
1771 for_each_irq_desc(i, desc) {
1772 struct irq_data *d = irq_domain_get_irq_data(xive_irq_domain, i);
1773
1774 if (d)
1775 xive_debug_show_irq(m, d);
1776 }
1777 return 0;
1778}
1779DEFINE_SHOW_ATTRIBUTE(xive_core_debug);
1780
1781int xive_core_debug_init(void)
1782{
1783 if (xive_enabled())
1784 debugfs_create_file("xive", 0400, arch_debugfs_dir,
1785 NULL, &xive_core_debug_fops);
1786 return 0;
1787}
1788