1
2
3
4
5
6#define pr_fmt(fmt) "xive: " fmt
7
8#include <linux/types.h>
9#include <linux/threads.h>
10#include <linux/kernel.h>
11#include <linux/irq.h>
12#include <linux/irqdomain.h>
13#include <linux/debugfs.h>
14#include <linux/smp.h>
15#include <linux/interrupt.h>
16#include <linux/seq_file.h>
17#include <linux/init.h>
18#include <linux/cpu.h>
19#include <linux/of.h>
20#include <linux/slab.h>
21#include <linux/spinlock.h>
22#include <linux/msi.h>
23#include <linux/vmalloc.h>
24
25#include <asm/io.h>
26#include <asm/smp.h>
27#include <asm/machdep.h>
28#include <asm/irq.h>
29#include <asm/errno.h>
30#include <asm/xive.h>
31#include <asm/xive-regs.h>
32#include <asm/xmon.h>
33
34#include "xive-internal.h"
35
36#undef DEBUG_FLUSH
37#undef DEBUG_ALL
38
39#ifdef DEBUG_ALL
40#define DBG_VERBOSE(fmt, ...) pr_devel("cpu %d - " fmt, \
41 smp_processor_id(), ## __VA_ARGS__)
42#else
43#define DBG_VERBOSE(fmt...) do { } while(0)
44#endif
45
46bool __xive_enabled;
47EXPORT_SYMBOL_GPL(__xive_enabled);
48bool xive_cmdline_disabled;
49
50
51static u8 xive_irq_priority;
52
53
54void __iomem *xive_tima;
55EXPORT_SYMBOL_GPL(xive_tima);
56u32 xive_tima_offset;
57
58
59static const struct xive_ops *xive_ops;
60
61
62static struct irq_domain *xive_irq_domain;
63
64#ifdef CONFIG_SMP
65
66static struct xive_ipi_desc {
67 unsigned int irq;
68 char name[16];
69 atomic_t started;
70} *xive_ipis;
71
72
73
74
75static unsigned int xive_ipi_cpu_to_irq(unsigned int cpu)
76{
77 return xive_ipis[early_cpu_to_node(cpu)].irq;
78}
79#endif
80
81
82static DEFINE_PER_CPU(struct xive_cpu *, xive_cpu);
83
84
85#define XIVE_INVALID_TARGET (-1)
86
87
88
89
90static bool xive_store_eoi = true;
91
92static bool xive_is_store_eoi(struct xive_irq_data *xd)
93{
94 return xd->flags & XIVE_IRQ_FLAG_STORE_EOI && xive_store_eoi;
95}
96
97
98
99
100
101
102
103static u32 xive_read_eq(struct xive_q *q, bool just_peek)
104{
105 u32 cur;
106
107 if (!q->qpage)
108 return 0;
109 cur = be32_to_cpup(q->qpage + q->idx);
110
111
112 if ((cur >> 31) == q->toggle)
113 return 0;
114
115
116 if (!just_peek) {
117
118 q->idx = (q->idx + 1) & q->msk;
119
120
121 if (q->idx == 0)
122 q->toggle ^= 1;
123 }
124
125 return cur & 0x7fffffff;
126}
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151static u32 xive_scan_interrupts(struct xive_cpu *xc, bool just_peek)
152{
153 u32 irq = 0;
154 u8 prio = 0;
155
156
157 while (xc->pending_prio != 0) {
158 struct xive_q *q;
159
160 prio = ffs(xc->pending_prio) - 1;
161 DBG_VERBOSE("scan_irq: trying prio %d\n", prio);
162
163
164 irq = xive_read_eq(&xc->queue[prio], just_peek);
165
166
167 if (irq) {
168 if (just_peek || irq_to_desc(irq))
169 break;
170
171
172
173
174
175 pr_crit("xive: got interrupt %d without descriptor, dropping\n",
176 irq);
177 WARN_ON(1);
178 continue;
179 }
180
181
182 xc->pending_prio &= ~(1 << prio);
183
184
185
186
187
188
189 q = &xc->queue[prio];
190 if (atomic_read(&q->pending_count)) {
191 int p = atomic_xchg(&q->pending_count, 0);
192 if (p) {
193 WARN_ON(p > atomic_read(&q->count));
194 atomic_sub(p, &q->count);
195 }
196 }
197 }
198
199
200 if (irq == 0)
201 prio = 0xff;
202
203
204 if (prio != xc->cppr) {
205 DBG_VERBOSE("scan_irq: adjusting CPPR to %d\n", prio);
206 xc->cppr = prio;
207 out_8(xive_tima + xive_tima_offset + TM_CPPR, prio);
208 }
209
210 return irq;
211}
212
213
214
215
216
217static notrace u8 xive_esb_read(struct xive_irq_data *xd, u32 offset)
218{
219 u64 val;
220
221 if (offset == XIVE_ESB_SET_PQ_10 && xive_is_store_eoi(xd))
222 offset |= XIVE_ESB_LD_ST_MO;
223
224 if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw)
225 val = xive_ops->esb_rw(xd->hw_irq, offset, 0, 0);
226 else
227 val = in_be64(xd->eoi_mmio + offset);
228
229 return (u8)val;
230}
231
232static void xive_esb_write(struct xive_irq_data *xd, u32 offset, u64 data)
233{
234 if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw)
235 xive_ops->esb_rw(xd->hw_irq, offset, data, 1);
236 else
237 out_be64(xd->eoi_mmio + offset, data);
238}
239
240#if defined(CONFIG_XMON) || defined(CONFIG_DEBUG_FS)
241static void xive_irq_data_dump(struct xive_irq_data *xd, char *buffer, size_t size)
242{
243 u64 val = xive_esb_read(xd, XIVE_ESB_GET);
244
245 snprintf(buffer, size, "flags=%c%c%c PQ=%c%c 0x%016llx 0x%016llx",
246 xive_is_store_eoi(xd) ? 'S' : ' ',
247 xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ',
248 xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ',
249 val & XIVE_ESB_VAL_P ? 'P' : '-',
250 val & XIVE_ESB_VAL_Q ? 'Q' : '-',
251 xd->trig_page, xd->eoi_page);
252}
253#endif
254
255#ifdef CONFIG_XMON
256static notrace void xive_dump_eq(const char *name, struct xive_q *q)
257{
258 u32 i0, i1, idx;
259
260 if (!q->qpage)
261 return;
262 idx = q->idx;
263 i0 = be32_to_cpup(q->qpage + idx);
264 idx = (idx + 1) & q->msk;
265 i1 = be32_to_cpup(q->qpage + idx);
266 xmon_printf("%s idx=%d T=%d %08x %08x ...", name,
267 q->idx, q->toggle, i0, i1);
268}
269
270notrace void xmon_xive_do_dump(int cpu)
271{
272 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
273
274 xmon_printf("CPU %d:", cpu);
275 if (xc) {
276 xmon_printf("pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr);
277
278#ifdef CONFIG_SMP
279 {
280 char buffer[128];
281
282 xive_irq_data_dump(&xc->ipi_data, buffer, sizeof(buffer));
283 xmon_printf("IPI=0x%08x %s", xc->hw_ipi, buffer);
284 }
285#endif
286 xive_dump_eq("EQ", &xc->queue[xive_irq_priority]);
287 }
288 xmon_printf("\n");
289}
290
291static struct irq_data *xive_get_irq_data(u32 hw_irq)
292{
293 unsigned int irq = irq_find_mapping(xive_irq_domain, hw_irq);
294
295 return irq ? irq_get_irq_data(irq) : NULL;
296}
297
298int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d)
299{
300 int rc;
301 u32 target;
302 u8 prio;
303 u32 lirq;
304
305 rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq);
306 if (rc) {
307 xmon_printf("IRQ 0x%08x : no config rc=%d\n", hw_irq, rc);
308 return rc;
309 }
310
311 xmon_printf("IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
312 hw_irq, target, prio, lirq);
313
314 if (!d)
315 d = xive_get_irq_data(hw_irq);
316
317 if (d) {
318 char buffer[128];
319
320 xive_irq_data_dump(irq_data_get_irq_handler_data(d),
321 buffer, sizeof(buffer));
322 xmon_printf("%s", buffer);
323 }
324
325 xmon_printf("\n");
326 return 0;
327}
328
329void xmon_xive_get_irq_all(void)
330{
331 unsigned int i;
332 struct irq_desc *desc;
333
334 for_each_irq_desc(i, desc) {
335 struct irq_data *d = irq_domain_get_irq_data(xive_irq_domain, i);
336
337 if (d)
338 xmon_xive_get_irq_config(irqd_to_hwirq(d), d);
339 }
340}
341
342#endif
343
344static unsigned int xive_get_irq(void)
345{
346 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
347 u32 irq;
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363 xive_ops->update_pending(xc);
364
365 DBG_VERBOSE("get_irq: pending=%02x\n", xc->pending_prio);
366
367
368 irq = xive_scan_interrupts(xc, false);
369
370 DBG_VERBOSE("get_irq: got irq 0x%x, new pending=0x%02x\n",
371 irq, xc->pending_prio);
372
373
374 if (irq == XIVE_BAD_IRQ)
375 return 0;
376 return irq;
377}
378
379
380
381
382
383
384
385
386
387
388
389static void xive_do_queue_eoi(struct xive_cpu *xc)
390{
391 if (xive_scan_interrupts(xc, true) != 0) {
392 DBG_VERBOSE("eoi: pending=0x%02x\n", xc->pending_prio);
393 force_external_irq_replay();
394 }
395}
396
397
398
399
400
401static void xive_do_source_eoi(struct xive_irq_data *xd)
402{
403 u8 eoi_val;
404
405 xd->stale_p = false;
406
407
408 if (xive_is_store_eoi(xd)) {
409 xive_esb_write(xd, XIVE_ESB_STORE_EOI, 0);
410 return;
411 }
412
413
414
415
416
417
418 if (xd->flags & XIVE_IRQ_FLAG_LSI) {
419 xive_esb_read(xd, XIVE_ESB_LOAD_EOI);
420 return;
421 }
422
423
424
425
426
427
428
429 eoi_val = xive_esb_read(xd, XIVE_ESB_SET_PQ_00);
430 DBG_VERBOSE("eoi_val=%x\n", eoi_val);
431
432
433 if ((eoi_val & XIVE_ESB_VAL_Q) && xd->trig_mmio)
434 out_be64(xd->trig_mmio, 0);
435}
436
437
438static void xive_irq_eoi(struct irq_data *d)
439{
440 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
441 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
442
443 DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n",
444 d->irq, irqd_to_hwirq(d), xc->pending_prio);
445
446
447
448
449
450 if (!irqd_irq_disabled(d) && !irqd_is_forwarded_to_vcpu(d) &&
451 !(xd->flags & XIVE_IRQ_FLAG_NO_EOI))
452 xive_do_source_eoi(xd);
453 else
454 xd->stale_p = true;
455
456
457
458
459
460 xd->saved_p = false;
461
462
463 xive_do_queue_eoi(xc);
464}
465
466
467
468
469static void xive_do_source_set_mask(struct xive_irq_data *xd,
470 bool mask)
471{
472 u64 val;
473
474 pr_debug("%s: HW 0x%x %smask\n", __func__, xd->hw_irq, mask ? "" : "un");
475
476
477
478
479
480
481
482
483
484 if (mask) {
485 val = xive_esb_read(xd, XIVE_ESB_SET_PQ_01);
486 if (!xd->stale_p && !!(val & XIVE_ESB_VAL_P))
487 xd->saved_p = true;
488 xd->stale_p = false;
489 } else if (xd->saved_p) {
490 xive_esb_read(xd, XIVE_ESB_SET_PQ_10);
491 xd->saved_p = false;
492 } else {
493 xive_esb_read(xd, XIVE_ESB_SET_PQ_00);
494 xd->stale_p = false;
495 }
496}
497
498
499
500
501
502
503static bool xive_try_pick_target(int cpu)
504{
505 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
506 struct xive_q *q = &xc->queue[xive_irq_priority];
507 int max;
508
509
510
511
512
513
514 max = (q->msk + 1) - 1;
515 return !!atomic_add_unless(&q->count, 1, max);
516}
517
518
519
520
521
522
523
524
525
526
527static void xive_dec_target_count(int cpu)
528{
529 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
530 struct xive_q *q = &xc->queue[xive_irq_priority];
531
532 if (WARN_ON(cpu < 0 || !xc)) {
533 pr_err("%s: cpu=%d xc=%p\n", __func__, cpu, xc);
534 return;
535 }
536
537
538
539
540
541
542
543
544 atomic_inc(&q->pending_count);
545}
546
547
548static int xive_find_target_in_mask(const struct cpumask *mask,
549 unsigned int fuzz)
550{
551 int cpu, first, num, i;
552
553
554 num = min_t(int, cpumask_weight(mask), nr_cpu_ids);
555 first = fuzz % num;
556
557
558 cpu = cpumask_first(mask);
559 for (i = 0; i < first && cpu < nr_cpu_ids; i++)
560 cpu = cpumask_next(cpu, mask);
561
562
563 if (WARN_ON(cpu >= nr_cpu_ids))
564 cpu = cpumask_first(cpu_online_mask);
565
566
567 first = cpu;
568
569
570
571
572
573 do {
574
575
576
577
578 if (cpu_online(cpu) && xive_try_pick_target(cpu))
579 return cpu;
580 cpu = cpumask_next(cpu, mask);
581
582 if (cpu >= nr_cpu_ids)
583 cpu = cpumask_first(mask);
584 } while (cpu != first);
585
586 return -1;
587}
588
589
590
591
592
593
594static int xive_pick_irq_target(struct irq_data *d,
595 const struct cpumask *affinity)
596{
597 static unsigned int fuzz;
598 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
599 cpumask_var_t mask;
600 int cpu = -1;
601
602
603
604
605
606 if (xd->src_chip != XIVE_INVALID_CHIP_ID &&
607 zalloc_cpumask_var(&mask, GFP_ATOMIC)) {
608
609 for_each_cpu_and(cpu, affinity, cpu_online_mask) {
610 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
611 if (xc->chip_id == xd->src_chip)
612 cpumask_set_cpu(cpu, mask);
613 }
614
615 if (cpumask_empty(mask))
616 cpu = -1;
617 else
618 cpu = xive_find_target_in_mask(mask, fuzz++);
619 free_cpumask_var(mask);
620 if (cpu >= 0)
621 return cpu;
622 fuzz--;
623 }
624
625
626 return xive_find_target_in_mask(affinity, fuzz++);
627}
628
629static unsigned int xive_irq_startup(struct irq_data *d)
630{
631 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
632 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
633 int target, rc;
634
635 xd->saved_p = false;
636 xd->stale_p = false;
637
638 pr_debug("%s: irq %d [0x%x] data @%p\n", __func__, d->irq, hw_irq, d);
639
640
641 target = xive_pick_irq_target(d, irq_data_get_affinity_mask(d));
642 if (target == XIVE_INVALID_TARGET) {
643
644 target = xive_pick_irq_target(d, cpu_online_mask);
645 if (target == XIVE_INVALID_TARGET)
646 return -ENXIO;
647 pr_warn("irq %d started with broken affinity\n", d->irq);
648 }
649
650
651 if (WARN_ON(target == XIVE_INVALID_TARGET ||
652 target >= nr_cpu_ids))
653 target = smp_processor_id();
654
655 xd->target = target;
656
657
658
659
660
661 rc = xive_ops->configure_irq(hw_irq,
662 get_hard_smp_processor_id(target),
663 xive_irq_priority, d->irq);
664 if (rc)
665 return rc;
666
667
668 xive_do_source_set_mask(xd, false);
669
670 return 0;
671}
672
673
674static void xive_irq_shutdown(struct irq_data *d)
675{
676 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
677 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
678
679 pr_debug("%s: irq %d [0x%x] data @%p\n", __func__, d->irq, hw_irq, d);
680
681 if (WARN_ON(xd->target == XIVE_INVALID_TARGET))
682 return;
683
684
685 xive_do_source_set_mask(xd, true);
686
687
688
689
690
691 xive_ops->configure_irq(hw_irq,
692 get_hard_smp_processor_id(xd->target),
693 0xff, XIVE_BAD_IRQ);
694
695 xive_dec_target_count(xd->target);
696 xd->target = XIVE_INVALID_TARGET;
697}
698
699static void xive_irq_unmask(struct irq_data *d)
700{
701 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
702
703 pr_debug("%s: irq %d data @%p\n", __func__, d->irq, xd);
704
705 xive_do_source_set_mask(xd, false);
706}
707
708static void xive_irq_mask(struct irq_data *d)
709{
710 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
711
712 pr_debug("%s: irq %d data @%p\n", __func__, d->irq, xd);
713
714 xive_do_source_set_mask(xd, true);
715}
716
717static int xive_irq_set_affinity(struct irq_data *d,
718 const struct cpumask *cpumask,
719 bool force)
720{
721 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
722 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
723 u32 target, old_target;
724 int rc = 0;
725
726 pr_debug("%s: irq %d/0x%x\n", __func__, d->irq, hw_irq);
727
728
729 if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids)
730 return -EINVAL;
731
732
733
734
735
736 if (xd->target != XIVE_INVALID_TARGET &&
737 cpu_online(xd->target) &&
738 cpumask_test_cpu(xd->target, cpumask))
739 return IRQ_SET_MASK_OK;
740
741
742 target = xive_pick_irq_target(d, cpumask);
743
744
745 if (target == XIVE_INVALID_TARGET)
746 return -ENXIO;
747
748
749 if (WARN_ON(target >= nr_cpu_ids))
750 target = smp_processor_id();
751
752 old_target = xd->target;
753
754
755
756
757
758 if (!irqd_is_forwarded_to_vcpu(d))
759 rc = xive_ops->configure_irq(hw_irq,
760 get_hard_smp_processor_id(target),
761 xive_irq_priority, d->irq);
762 if (rc < 0) {
763 pr_err("Error %d reconfiguring irq %d\n", rc, d->irq);
764 return rc;
765 }
766
767 pr_debug(" target: 0x%x\n", target);
768 xd->target = target;
769
770
771 if (old_target != XIVE_INVALID_TARGET)
772 xive_dec_target_count(old_target);
773
774 return IRQ_SET_MASK_OK;
775}
776
777static int xive_irq_set_type(struct irq_data *d, unsigned int flow_type)
778{
779 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
780
781
782
783
784
785
786
787
788 if (flow_type == IRQ_TYPE_DEFAULT || flow_type == IRQ_TYPE_NONE)
789 flow_type = IRQ_TYPE_EDGE_RISING;
790
791 if (flow_type != IRQ_TYPE_EDGE_RISING &&
792 flow_type != IRQ_TYPE_LEVEL_LOW)
793 return -EINVAL;
794
795 irqd_set_trigger_type(d, flow_type);
796
797
798
799
800
801
802
803
804
805 if ((flow_type == IRQ_TYPE_LEVEL_LOW) !=
806 !!(xd->flags & XIVE_IRQ_FLAG_LSI)) {
807 pr_warn("Interrupt %d (HW 0x%x) type mismatch, Linux says %s, FW says %s\n",
808 d->irq, (u32)irqd_to_hwirq(d),
809 (flow_type == IRQ_TYPE_LEVEL_LOW) ? "Level" : "Edge",
810 (xd->flags & XIVE_IRQ_FLAG_LSI) ? "Level" : "Edge");
811 }
812
813 return IRQ_SET_MASK_OK_NOCOPY;
814}
815
816static int xive_irq_retrigger(struct irq_data *d)
817{
818 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
819
820
821 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
822 return 0;
823
824
825
826
827
828 xive_esb_read(xd, XIVE_ESB_SET_PQ_11);
829 xive_do_source_eoi(xd);
830
831 return 1;
832}
833
834
835
836
837
838static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
839{
840 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
841 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
842 int rc;
843 u8 pq;
844
845
846
847
848
849 if (state) {
850 irqd_set_forwarded_to_vcpu(d);
851
852
853 pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_10);
854 if (!xd->stale_p) {
855 xd->saved_p = !!(pq & XIVE_ESB_VAL_P);
856 xd->stale_p = !xd->saved_p;
857 }
858
859
860 if (xd->target == XIVE_INVALID_TARGET) {
861
862
863
864
865 WARN_ON(xd->saved_p);
866
867 return 0;
868 }
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885 if (xd->saved_p) {
886 xive_esb_read(xd, XIVE_ESB_SET_PQ_11);
887
888
889
890
891
892
893
894
895
896
897 if (xive_ops->sync_source)
898 xive_ops->sync_source(hw_irq);
899 }
900 } else {
901 irqd_clr_forwarded_to_vcpu(d);
902
903
904 if (xd->target == XIVE_INVALID_TARGET) {
905 xive_do_source_set_mask(xd, true);
906 return 0;
907 }
908
909
910
911
912
913
914 if (xive_ops->sync_source)
915 xive_ops->sync_source(hw_irq);
916
917
918
919
920
921
922
923
924
925 rc = xive_ops->configure_irq(hw_irq,
926 get_hard_smp_processor_id(xd->target),
927 xive_irq_priority, d->irq);
928 if (rc)
929 return rc;
930
931
932
933
934
935
936
937
938
939
940
941
942
943 if (!xd->saved_p)
944 xive_do_source_eoi(xd);
945
946 }
947 return 0;
948}
949
950
951static int xive_get_irqchip_state(struct irq_data *data,
952 enum irqchip_irq_state which, bool *state)
953{
954 struct xive_irq_data *xd = irq_data_get_irq_handler_data(data);
955 u8 pq;
956
957 switch (which) {
958 case IRQCHIP_STATE_ACTIVE:
959 pq = xive_esb_read(xd, XIVE_ESB_GET);
960
961
962
963
964
965
966
967
968 *state = (pq != XIVE_ESB_INVALID) && !xd->stale_p &&
969 (xd->saved_p || (!!(pq & XIVE_ESB_VAL_P) &&
970 !irqd_irq_disabled(data)));
971 return 0;
972 default:
973 return -EINVAL;
974 }
975}
976
977static struct irq_chip xive_irq_chip = {
978 .name = "XIVE-IRQ",
979 .irq_startup = xive_irq_startup,
980 .irq_shutdown = xive_irq_shutdown,
981 .irq_eoi = xive_irq_eoi,
982 .irq_mask = xive_irq_mask,
983 .irq_unmask = xive_irq_unmask,
984 .irq_set_affinity = xive_irq_set_affinity,
985 .irq_set_type = xive_irq_set_type,
986 .irq_retrigger = xive_irq_retrigger,
987 .irq_set_vcpu_affinity = xive_irq_set_vcpu_affinity,
988 .irq_get_irqchip_state = xive_get_irqchip_state,
989};
990
991bool is_xive_irq(struct irq_chip *chip)
992{
993 return chip == &xive_irq_chip;
994}
995EXPORT_SYMBOL_GPL(is_xive_irq);
996
997void xive_cleanup_irq_data(struct xive_irq_data *xd)
998{
999 pr_debug("%s for HW 0x%x\n", __func__, xd->hw_irq);
1000
1001 if (xd->eoi_mmio) {
1002 iounmap(xd->eoi_mmio);
1003 if (xd->eoi_mmio == xd->trig_mmio)
1004 xd->trig_mmio = NULL;
1005 xd->eoi_mmio = NULL;
1006 }
1007 if (xd->trig_mmio) {
1008 iounmap(xd->trig_mmio);
1009 xd->trig_mmio = NULL;
1010 }
1011}
1012EXPORT_SYMBOL_GPL(xive_cleanup_irq_data);
1013
1014static int xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw)
1015{
1016 struct xive_irq_data *xd;
1017 int rc;
1018
1019 xd = kzalloc(sizeof(struct xive_irq_data), GFP_KERNEL);
1020 if (!xd)
1021 return -ENOMEM;
1022 rc = xive_ops->populate_irq_data(hw, xd);
1023 if (rc) {
1024 kfree(xd);
1025 return rc;
1026 }
1027 xd->target = XIVE_INVALID_TARGET;
1028 irq_set_handler_data(virq, xd);
1029
1030
1031
1032
1033
1034
1035
1036
1037 xive_esb_read(xd, XIVE_ESB_SET_PQ_01);
1038
1039 return 0;
1040}
1041
1042void xive_irq_free_data(unsigned int virq)
1043{
1044 struct xive_irq_data *xd = irq_get_handler_data(virq);
1045
1046 if (!xd)
1047 return;
1048 irq_set_handler_data(virq, NULL);
1049 xive_cleanup_irq_data(xd);
1050 kfree(xd);
1051}
1052EXPORT_SYMBOL_GPL(xive_irq_free_data);
1053
1054#ifdef CONFIG_SMP
1055
1056static void xive_cause_ipi(int cpu)
1057{
1058 struct xive_cpu *xc;
1059 struct xive_irq_data *xd;
1060
1061 xc = per_cpu(xive_cpu, cpu);
1062
1063 DBG_VERBOSE("IPI CPU %d -> %d (HW IRQ 0x%x)\n",
1064 smp_processor_id(), cpu, xc->hw_ipi);
1065
1066 xd = &xc->ipi_data;
1067 if (WARN_ON(!xd->trig_mmio))
1068 return;
1069 out_be64(xd->trig_mmio, 0);
1070}
1071
1072static irqreturn_t xive_muxed_ipi_action(int irq, void *dev_id)
1073{
1074 return smp_ipi_demux();
1075}
1076
1077static void xive_ipi_eoi(struct irq_data *d)
1078{
1079 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1080
1081
1082 if (!xc)
1083 return;
1084
1085 DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n",
1086 d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio);
1087
1088 xive_do_source_eoi(&xc->ipi_data);
1089 xive_do_queue_eoi(xc);
1090}
1091
1092static void xive_ipi_do_nothing(struct irq_data *d)
1093{
1094
1095
1096
1097
1098}
1099
1100static struct irq_chip xive_ipi_chip = {
1101 .name = "XIVE-IPI",
1102 .irq_eoi = xive_ipi_eoi,
1103 .irq_mask = xive_ipi_do_nothing,
1104 .irq_unmask = xive_ipi_do_nothing,
1105};
1106
1107
1108
1109
1110
1111struct xive_ipi_alloc_info {
1112 irq_hw_number_t hwirq;
1113};
1114
1115static int xive_ipi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1116 unsigned int nr_irqs, void *arg)
1117{
1118 struct xive_ipi_alloc_info *info = arg;
1119 int i;
1120
1121 for (i = 0; i < nr_irqs; i++) {
1122 irq_domain_set_info(domain, virq + i, info->hwirq + i, &xive_ipi_chip,
1123 domain->host_data, handle_percpu_irq,
1124 NULL, NULL);
1125 }
1126 return 0;
1127}
1128
1129static const struct irq_domain_ops xive_ipi_irq_domain_ops = {
1130 .alloc = xive_ipi_irq_domain_alloc,
1131};
1132
1133static int __init xive_init_ipis(void)
1134{
1135 struct fwnode_handle *fwnode;
1136 struct irq_domain *ipi_domain;
1137 unsigned int node;
1138 int ret = -ENOMEM;
1139
1140 fwnode = irq_domain_alloc_named_fwnode("XIVE-IPI");
1141 if (!fwnode)
1142 goto out;
1143
1144 ipi_domain = irq_domain_create_linear(fwnode, nr_node_ids,
1145 &xive_ipi_irq_domain_ops, NULL);
1146 if (!ipi_domain)
1147 goto out_free_fwnode;
1148
1149 xive_ipis = kcalloc(nr_node_ids, sizeof(*xive_ipis), GFP_KERNEL | __GFP_NOFAIL);
1150 if (!xive_ipis)
1151 goto out_free_domain;
1152
1153 for_each_node(node) {
1154 struct xive_ipi_desc *xid = &xive_ipis[node];
1155 struct xive_ipi_alloc_info info = { node };
1156
1157
1158
1159
1160
1161
1162 ret = irq_domain_alloc_irqs(ipi_domain, 1, node, &info);
1163 if (ret < 0)
1164 goto out_free_xive_ipis;
1165 xid->irq = ret;
1166
1167 snprintf(xid->name, sizeof(xid->name), "IPI-%d", node);
1168 }
1169
1170 return ret;
1171
1172out_free_xive_ipis:
1173 kfree(xive_ipis);
1174out_free_domain:
1175 irq_domain_remove(ipi_domain);
1176out_free_fwnode:
1177 irq_domain_free_fwnode(fwnode);
1178out:
1179 return ret;
1180}
1181
1182static int xive_request_ipi(unsigned int cpu)
1183{
1184 struct xive_ipi_desc *xid = &xive_ipis[early_cpu_to_node(cpu)];
1185 int ret;
1186
1187 if (atomic_inc_return(&xid->started) > 1)
1188 return 0;
1189
1190 ret = request_irq(xid->irq, xive_muxed_ipi_action,
1191 IRQF_NO_DEBUG | IRQF_PERCPU | IRQF_NO_THREAD,
1192 xid->name, NULL);
1193
1194 WARN(ret < 0, "Failed to request IPI %d: %d\n", xid->irq, ret);
1195 return ret;
1196}
1197
1198static int xive_setup_cpu_ipi(unsigned int cpu)
1199{
1200 unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu);
1201 struct xive_cpu *xc;
1202 int rc;
1203
1204 pr_debug("Setting up IPI for CPU %d\n", cpu);
1205
1206 xc = per_cpu(xive_cpu, cpu);
1207
1208
1209 if (xc->hw_ipi != XIVE_BAD_IRQ)
1210 return 0;
1211
1212
1213 xive_request_ipi(cpu);
1214
1215
1216 if (xive_ops->get_ipi(cpu, xc))
1217 return -EIO;
1218
1219
1220
1221
1222
1223 rc = xive_ops->populate_irq_data(xc->hw_ipi, &xc->ipi_data);
1224 if (rc) {
1225 pr_err("Failed to populate IPI data on CPU %d\n", cpu);
1226 return -EIO;
1227 }
1228 rc = xive_ops->configure_irq(xc->hw_ipi,
1229 get_hard_smp_processor_id(cpu),
1230 xive_irq_priority, xive_ipi_irq);
1231 if (rc) {
1232 pr_err("Failed to map IPI CPU %d\n", cpu);
1233 return -EIO;
1234 }
1235 pr_debug("CPU %d HW IPI 0x%x, virq %d, trig_mmio=%p\n", cpu,
1236 xc->hw_ipi, xive_ipi_irq, xc->ipi_data.trig_mmio);
1237
1238
1239 xive_do_source_set_mask(&xc->ipi_data, false);
1240
1241 return 0;
1242}
1243
1244noinstr static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc)
1245{
1246 unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu);
1247
1248
1249
1250
1251 if (xc->hw_ipi == XIVE_BAD_IRQ)
1252 return;
1253
1254
1255
1256
1257 xive_do_source_set_mask(&xc->ipi_data, true);
1258
1259
1260
1261
1262
1263
1264
1265
1266 xive_ops->configure_irq(xc->hw_ipi, hard_smp_processor_id(),
1267 0xff, xive_ipi_irq);
1268
1269
1270 xive_ops->put_ipi(cpu, xc);
1271}
1272
1273void __init xive_smp_probe(void)
1274{
1275 smp_ops->cause_ipi = xive_cause_ipi;
1276
1277
1278 xive_init_ipis();
1279
1280
1281 xive_setup_cpu_ipi(smp_processor_id());
1282}
1283
1284#endif
1285
1286static int xive_irq_domain_map(struct irq_domain *h, unsigned int virq,
1287 irq_hw_number_t hw)
1288{
1289 int rc;
1290
1291
1292
1293
1294
1295 irq_clear_status_flags(virq, IRQ_LEVEL);
1296
1297 rc = xive_irq_alloc_data(virq, hw);
1298 if (rc)
1299 return rc;
1300
1301 irq_set_chip_and_handler(virq, &xive_irq_chip, handle_fasteoi_irq);
1302
1303 return 0;
1304}
1305
1306static void xive_irq_domain_unmap(struct irq_domain *d, unsigned int virq)
1307{
1308 xive_irq_free_data(virq);
1309}
1310
1311static int xive_irq_domain_xlate(struct irq_domain *h, struct device_node *ct,
1312 const u32 *intspec, unsigned int intsize,
1313 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
1314
1315{
1316 *out_hwirq = intspec[0];
1317
1318
1319
1320
1321
1322 if (intsize > 1) {
1323 if (intspec[1] & 1)
1324 *out_flags = IRQ_TYPE_LEVEL_LOW;
1325 else
1326 *out_flags = IRQ_TYPE_EDGE_RISING;
1327 } else
1328 *out_flags = IRQ_TYPE_LEVEL_LOW;
1329
1330 return 0;
1331}
1332
1333static int xive_irq_domain_match(struct irq_domain *h, struct device_node *node,
1334 enum irq_domain_bus_token bus_token)
1335{
1336 return xive_ops->match(node);
1337}
1338
1339#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
1340static const char * const esb_names[] = { "RESET", "OFF", "PENDING", "QUEUED" };
1341
1342static const struct {
1343 u64 mask;
1344 char *name;
1345} xive_irq_flags[] = {
1346 { XIVE_IRQ_FLAG_STORE_EOI, "STORE_EOI" },
1347 { XIVE_IRQ_FLAG_LSI, "LSI" },
1348 { XIVE_IRQ_FLAG_H_INT_ESB, "H_INT_ESB" },
1349 { XIVE_IRQ_FLAG_NO_EOI, "NO_EOI" },
1350};
1351
1352static void xive_irq_domain_debug_show(struct seq_file *m, struct irq_domain *d,
1353 struct irq_data *irqd, int ind)
1354{
1355 struct xive_irq_data *xd;
1356 u64 val;
1357 int i;
1358
1359
1360 if (!irqd)
1361 return;
1362
1363 if (!is_xive_irq(irq_data_get_irq_chip(irqd)))
1364 return;
1365
1366 seq_printf(m, "%*sXIVE:\n", ind, "");
1367 ind++;
1368
1369 xd = irq_data_get_irq_handler_data(irqd);
1370 if (!xd) {
1371 seq_printf(m, "%*snot assigned\n", ind, "");
1372 return;
1373 }
1374
1375 val = xive_esb_read(xd, XIVE_ESB_GET);
1376 seq_printf(m, "%*sESB: %s\n", ind, "", esb_names[val & 0x3]);
1377 seq_printf(m, "%*sPstate: %s %s\n", ind, "", xd->stale_p ? "stale" : "",
1378 xd->saved_p ? "saved" : "");
1379 seq_printf(m, "%*sTarget: %d\n", ind, "", xd->target);
1380 seq_printf(m, "%*sChip: %d\n", ind, "", xd->src_chip);
1381 seq_printf(m, "%*sTrigger: 0x%016llx\n", ind, "", xd->trig_page);
1382 seq_printf(m, "%*sEOI: 0x%016llx\n", ind, "", xd->eoi_page);
1383 seq_printf(m, "%*sFlags: 0x%llx\n", ind, "", xd->flags);
1384 for (i = 0; i < ARRAY_SIZE(xive_irq_flags); i++) {
1385 if (xd->flags & xive_irq_flags[i].mask)
1386 seq_printf(m, "%*s%s\n", ind + 12, "", xive_irq_flags[i].name);
1387 }
1388}
1389#endif
1390
1391#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1392static int xive_irq_domain_translate(struct irq_domain *d,
1393 struct irq_fwspec *fwspec,
1394 unsigned long *hwirq,
1395 unsigned int *type)
1396{
1397 return xive_irq_domain_xlate(d, to_of_node(fwspec->fwnode),
1398 fwspec->param, fwspec->param_count,
1399 hwirq, type);
1400}
1401
1402static int xive_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1403 unsigned int nr_irqs, void *arg)
1404{
1405 struct irq_fwspec *fwspec = arg;
1406 irq_hw_number_t hwirq;
1407 unsigned int type = IRQ_TYPE_NONE;
1408 int i, rc;
1409
1410 rc = xive_irq_domain_translate(domain, fwspec, &hwirq, &type);
1411 if (rc)
1412 return rc;
1413
1414 pr_debug("%s %d/0x%lx #%d\n", __func__, virq, hwirq, nr_irqs);
1415
1416 for (i = 0; i < nr_irqs; i++) {
1417
1418
1419
1420
1421
1422
1423 irq_clear_status_flags(virq, IRQ_LEVEL);
1424
1425
1426 rc = xive_irq_alloc_data(virq + i, hwirq + i);
1427 if (rc)
1428 return rc;
1429
1430 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
1431 &xive_irq_chip, domain->host_data);
1432 irq_set_handler(virq + i, handle_fasteoi_irq);
1433 }
1434
1435 return 0;
1436}
1437
1438static void xive_irq_domain_free(struct irq_domain *domain,
1439 unsigned int virq, unsigned int nr_irqs)
1440{
1441 int i;
1442
1443 pr_debug("%s %d #%d\n", __func__, virq, nr_irqs);
1444
1445 for (i = 0; i < nr_irqs; i++)
1446 xive_irq_free_data(virq + i);
1447}
1448#endif
1449
1450static const struct irq_domain_ops xive_irq_domain_ops = {
1451#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1452 .alloc = xive_irq_domain_alloc,
1453 .free = xive_irq_domain_free,
1454 .translate = xive_irq_domain_translate,
1455#endif
1456 .match = xive_irq_domain_match,
1457 .map = xive_irq_domain_map,
1458 .unmap = xive_irq_domain_unmap,
1459 .xlate = xive_irq_domain_xlate,
1460#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
1461 .debug_show = xive_irq_domain_debug_show,
1462#endif
1463};
1464
1465static void __init xive_init_host(struct device_node *np)
1466{
1467 xive_irq_domain = irq_domain_add_tree(np, &xive_irq_domain_ops, NULL);
1468 if (WARN_ON(xive_irq_domain == NULL))
1469 return;
1470 irq_set_default_host(xive_irq_domain);
1471}
1472
1473static void xive_cleanup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
1474{
1475 if (xc->queue[xive_irq_priority].qpage)
1476 xive_ops->cleanup_queue(cpu, xc, xive_irq_priority);
1477}
1478
1479static int xive_setup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
1480{
1481 int rc = 0;
1482
1483
1484 if (!xc->queue[xive_irq_priority].qpage)
1485 rc = xive_ops->setup_queue(cpu, xc, xive_irq_priority);
1486
1487 return rc;
1488}
1489
1490static int xive_prepare_cpu(unsigned int cpu)
1491{
1492 struct xive_cpu *xc;
1493
1494 xc = per_cpu(xive_cpu, cpu);
1495 if (!xc) {
1496 xc = kzalloc_node(sizeof(struct xive_cpu),
1497 GFP_KERNEL, cpu_to_node(cpu));
1498 if (!xc)
1499 return -ENOMEM;
1500 xc->hw_ipi = XIVE_BAD_IRQ;
1501 xc->chip_id = XIVE_INVALID_CHIP_ID;
1502 if (xive_ops->prepare_cpu)
1503 xive_ops->prepare_cpu(cpu, xc);
1504
1505 per_cpu(xive_cpu, cpu) = xc;
1506 }
1507
1508
1509 return xive_setup_cpu_queues(cpu, xc);
1510}
1511
1512static void xive_setup_cpu(void)
1513{
1514 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1515
1516
1517 if (xive_ops->setup_cpu)
1518 xive_ops->setup_cpu(smp_processor_id(), xc);
1519
1520
1521 xc->cppr = 0xff;
1522 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff);
1523}
1524
1525#ifdef CONFIG_SMP
1526void xive_smp_setup_cpu(void)
1527{
1528 pr_debug("SMP setup CPU %d\n", smp_processor_id());
1529
1530
1531 if (smp_processor_id() != boot_cpuid)
1532 xive_setup_cpu();
1533
1534}
1535
1536int xive_smp_prepare_cpu(unsigned int cpu)
1537{
1538 int rc;
1539
1540
1541 rc = xive_prepare_cpu(cpu);
1542 if (rc)
1543 return rc;
1544
1545
1546 return xive_setup_cpu_ipi(cpu);
1547}
1548
1549#ifdef CONFIG_HOTPLUG_CPU
1550static void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc)
1551{
1552 u32 irq;
1553
1554
1555 WARN_ON(!irqs_disabled());
1556
1557
1558 while ((irq = xive_scan_interrupts(xc, false)) != 0) {
1559
1560
1561
1562
1563 struct irq_desc *desc = irq_to_desc(irq);
1564 struct irq_data *d = irq_desc_get_irq_data(desc);
1565 struct xive_irq_data *xd;
1566
1567
1568
1569
1570
1571 if (d->domain != xive_irq_domain)
1572 continue;
1573
1574
1575
1576
1577
1578
1579#ifdef DEBUG_FLUSH
1580 pr_info("CPU %d: Got irq %d while offline, re-sending...\n",
1581 cpu, irq);
1582#endif
1583 raw_spin_lock(&desc->lock);
1584 xd = irq_desc_get_handler_data(desc);
1585
1586
1587
1588
1589 xd->saved_p = false;
1590
1591
1592
1593
1594
1595 if (xd->flags & XIVE_IRQ_FLAG_LSI)
1596 xive_do_source_eoi(xd);
1597 else
1598 xive_irq_retrigger(d);
1599
1600 raw_spin_unlock(&desc->lock);
1601 }
1602}
1603
1604void xive_smp_disable_cpu(void)
1605{
1606 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1607 unsigned int cpu = smp_processor_id();
1608
1609
1610 irq_migrate_all_off_this_cpu();
1611
1612
1613 xc->cppr = 0;
1614 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
1615
1616
1617 xive_flush_cpu_queue(cpu, xc);
1618
1619
1620 xc->cppr = 0xff;
1621 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff);
1622}
1623
1624void xive_flush_interrupt(void)
1625{
1626 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1627 unsigned int cpu = smp_processor_id();
1628
1629
1630 xive_flush_cpu_queue(cpu, xc);
1631}
1632
1633#endif
1634
1635#endif
1636
1637noinstr void xive_teardown_cpu(void)
1638{
1639 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1640 unsigned int cpu = smp_processor_id();
1641
1642
1643 xc->cppr = 0;
1644 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
1645
1646 if (xive_ops->teardown_cpu)
1647 xive_ops->teardown_cpu(cpu, xc);
1648
1649#ifdef CONFIG_SMP
1650
1651 xive_cleanup_cpu_ipi(cpu, xc);
1652#endif
1653
1654
1655 xive_cleanup_cpu_queues(cpu, xc);
1656}
1657
1658void xive_shutdown(void)
1659{
1660 xive_ops->shutdown();
1661}
1662
1663bool __init xive_core_init(struct device_node *np, const struct xive_ops *ops,
1664 void __iomem *area, u32 offset, u8 max_prio)
1665{
1666 xive_tima = area;
1667 xive_tima_offset = offset;
1668 xive_ops = ops;
1669 xive_irq_priority = max_prio;
1670
1671 ppc_md.get_irq = xive_get_irq;
1672 __xive_enabled = true;
1673
1674 pr_debug("Initializing host..\n");
1675 xive_init_host(np);
1676
1677 pr_debug("Initializing boot CPU..\n");
1678
1679
1680 xive_prepare_cpu(smp_processor_id());
1681
1682
1683 xive_setup_cpu();
1684
1685 pr_info("Interrupt handling initialized with %s backend\n",
1686 xive_ops->name);
1687 pr_info("Using priority %d for all interrupts\n", max_prio);
1688
1689 return true;
1690}
1691
1692__be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift)
1693{
1694 unsigned int alloc_order;
1695 struct page *pages;
1696 __be32 *qpage;
1697
1698 alloc_order = xive_alloc_order(queue_shift);
1699 pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order);
1700 if (!pages)
1701 return ERR_PTR(-ENOMEM);
1702 qpage = (__be32 *)page_address(pages);
1703 memset(qpage, 0, 1 << queue_shift);
1704
1705 return qpage;
1706}
1707
1708static int __init xive_off(char *arg)
1709{
1710 xive_cmdline_disabled = true;
1711 return 1;
1712}
1713__setup("xive=off", xive_off);
1714
1715static int __init xive_store_eoi_cmdline(char *arg)
1716{
1717 if (!arg)
1718 return 1;
1719
1720 if (strncmp(arg, "off", 3) == 0) {
1721 pr_info("StoreEOI disabled on kernel command line\n");
1722 xive_store_eoi = false;
1723 }
1724 return 1;
1725}
1726__setup("xive.store-eoi=", xive_store_eoi_cmdline);
1727
1728#ifdef CONFIG_DEBUG_FS
1729static void xive_debug_show_ipi(struct seq_file *m, int cpu)
1730{
1731 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
1732
1733 seq_printf(m, "CPU %d: ", cpu);
1734 if (xc) {
1735 seq_printf(m, "pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr);
1736
1737#ifdef CONFIG_SMP
1738 {
1739 char buffer[128];
1740
1741 xive_irq_data_dump(&xc->ipi_data, buffer, sizeof(buffer));
1742 seq_printf(m, "IPI=0x%08x %s", xc->hw_ipi, buffer);
1743 }
1744#endif
1745 }
1746 seq_puts(m, "\n");
1747}
1748
1749static void xive_debug_show_irq(struct seq_file *m, struct irq_data *d)
1750{
1751 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
1752 int rc;
1753 u32 target;
1754 u8 prio;
1755 u32 lirq;
1756 char buffer[128];
1757
1758 rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq);
1759 if (rc) {
1760 seq_printf(m, "IRQ 0x%08x : no config rc=%d\n", hw_irq, rc);
1761 return;
1762 }
1763
1764 seq_printf(m, "IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
1765 hw_irq, target, prio, lirq);
1766
1767 xive_irq_data_dump(irq_data_get_irq_handler_data(d), buffer, sizeof(buffer));
1768 seq_puts(m, buffer);
1769 seq_puts(m, "\n");
1770}
1771
1772static int xive_irq_debug_show(struct seq_file *m, void *private)
1773{
1774 unsigned int i;
1775 struct irq_desc *desc;
1776
1777 for_each_irq_desc(i, desc) {
1778 struct irq_data *d = irq_domain_get_irq_data(xive_irq_domain, i);
1779
1780 if (d)
1781 xive_debug_show_irq(m, d);
1782 }
1783 return 0;
1784}
1785DEFINE_SHOW_ATTRIBUTE(xive_irq_debug);
1786
1787static int xive_ipi_debug_show(struct seq_file *m, void *private)
1788{
1789 int cpu;
1790
1791 if (xive_ops->debug_show)
1792 xive_ops->debug_show(m, private);
1793
1794 for_each_online_cpu(cpu)
1795 xive_debug_show_ipi(m, cpu);
1796 return 0;
1797}
1798DEFINE_SHOW_ATTRIBUTE(xive_ipi_debug);
1799
1800static void xive_eq_debug_show_one(struct seq_file *m, struct xive_q *q, u8 prio)
1801{
1802 int i;
1803
1804 seq_printf(m, "EQ%d idx=%d T=%d\n", prio, q->idx, q->toggle);
1805 if (q->qpage) {
1806 for (i = 0; i < q->msk + 1; i++) {
1807 if (!(i % 8))
1808 seq_printf(m, "%05d ", i);
1809 seq_printf(m, "%08x%s", be32_to_cpup(q->qpage + i),
1810 (i + 1) % 8 ? " " : "\n");
1811 }
1812 }
1813 seq_puts(m, "\n");
1814}
1815
1816static int xive_eq_debug_show(struct seq_file *m, void *private)
1817{
1818 int cpu = (long)m->private;
1819 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
1820
1821 if (xc)
1822 xive_eq_debug_show_one(m, &xc->queue[xive_irq_priority],
1823 xive_irq_priority);
1824 return 0;
1825}
1826DEFINE_SHOW_ATTRIBUTE(xive_eq_debug);
1827
1828static void xive_core_debugfs_create(void)
1829{
1830 struct dentry *xive_dir;
1831 struct dentry *xive_eq_dir;
1832 long cpu;
1833 char name[16];
1834
1835 xive_dir = debugfs_create_dir("xive", arch_debugfs_dir);
1836 if (IS_ERR(xive_dir))
1837 return;
1838
1839 debugfs_create_file("ipis", 0400, xive_dir,
1840 NULL, &xive_ipi_debug_fops);
1841 debugfs_create_file("interrupts", 0400, xive_dir,
1842 NULL, &xive_irq_debug_fops);
1843 xive_eq_dir = debugfs_create_dir("eqs", xive_dir);
1844 for_each_possible_cpu(cpu) {
1845 snprintf(name, sizeof(name), "cpu%ld", cpu);
1846 debugfs_create_file(name, 0400, xive_eq_dir, (void *)cpu,
1847 &xive_eq_debug_fops);
1848 }
1849 debugfs_create_bool("store-eoi", 0600, xive_dir, &xive_store_eoi);
1850
1851 if (xive_ops->debug_create)
1852 xive_ops->debug_create(xive_dir);
1853}
1854#else
1855static inline void xive_core_debugfs_create(void) { }
1856#endif
1857
1858int xive_core_debug_init(void)
1859{
1860 if (xive_enabled() && IS_ENABLED(CONFIG_DEBUG_FS))
1861 xive_core_debugfs_create();
1862
1863 return 0;
1864}
1865