1
2
3
4
5
6#define pr_fmt(fmt) "xive: " fmt
7
8#include <linux/types.h>
9#include <linux/threads.h>
10#include <linux/kernel.h>
11#include <linux/irq.h>
12#include <linux/debugfs.h>
13#include <linux/smp.h>
14#include <linux/interrupt.h>
15#include <linux/seq_file.h>
16#include <linux/init.h>
17#include <linux/cpu.h>
18#include <linux/of.h>
19#include <linux/slab.h>
20#include <linux/spinlock.h>
21#include <linux/msi.h>
22
23#include <asm/prom.h>
24#include <asm/io.h>
25#include <asm/smp.h>
26#include <asm/machdep.h>
27#include <asm/irq.h>
28#include <asm/errno.h>
29#include <asm/xive.h>
30#include <asm/xive-regs.h>
31#include <asm/xmon.h>
32
33#include "xive-internal.h"
34
35#undef DEBUG_FLUSH
36#undef DEBUG_ALL
37
38#ifdef DEBUG_ALL
39#define DBG_VERBOSE(fmt, ...) pr_devel("cpu %d - " fmt, \
40 smp_processor_id(), ## __VA_ARGS__)
41#else
42#define DBG_VERBOSE(fmt...) do { } while(0)
43#endif
44
45bool __xive_enabled;
46EXPORT_SYMBOL_GPL(__xive_enabled);
47bool xive_cmdline_disabled;
48
49
50static u8 xive_irq_priority;
51
52
53void __iomem *xive_tima;
54EXPORT_SYMBOL_GPL(xive_tima);
55u32 xive_tima_offset;
56
57
58static const struct xive_ops *xive_ops;
59
60
61static struct irq_domain *xive_irq_domain;
62
63#ifdef CONFIG_SMP
64
65static u32 xive_ipi_irq;
66#endif
67
68
69static DEFINE_PER_CPU(struct xive_cpu *, xive_cpu);
70
71
72
73
74
75#define XIVE_BAD_IRQ 0x7fffffff
76#define XIVE_MAX_IRQ (XIVE_BAD_IRQ - 1)
77
78
79#define XIVE_INVALID_TARGET (-1)
80
81
82
83
84
85
86
87static u32 xive_read_eq(struct xive_q *q, bool just_peek)
88{
89 u32 cur;
90
91 if (!q->qpage)
92 return 0;
93 cur = be32_to_cpup(q->qpage + q->idx);
94
95
96 if ((cur >> 31) == q->toggle)
97 return 0;
98
99
100 if (!just_peek) {
101
102 q->idx = (q->idx + 1) & q->msk;
103
104
105 if (q->idx == 0)
106 q->toggle ^= 1;
107 }
108
109 return cur & 0x7fffffff;
110}
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135static u32 xive_scan_interrupts(struct xive_cpu *xc, bool just_peek)
136{
137 u32 irq = 0;
138 u8 prio = 0;
139
140
141 while (xc->pending_prio != 0) {
142 struct xive_q *q;
143
144 prio = ffs(xc->pending_prio) - 1;
145 DBG_VERBOSE("scan_irq: trying prio %d\n", prio);
146
147
148 irq = xive_read_eq(&xc->queue[prio], just_peek);
149
150
151 if (irq) {
152 if (just_peek || irq_to_desc(irq))
153 break;
154
155
156
157
158
159 pr_crit("xive: got interrupt %d without descriptor, dropping\n",
160 irq);
161 WARN_ON(1);
162 continue;
163 }
164
165
166 xc->pending_prio &= ~(1 << prio);
167
168
169
170
171
172
173 q = &xc->queue[prio];
174 if (atomic_read(&q->pending_count)) {
175 int p = atomic_xchg(&q->pending_count, 0);
176 if (p) {
177 WARN_ON(p > atomic_read(&q->count));
178 atomic_sub(p, &q->count);
179 }
180 }
181 }
182
183
184 if (irq == 0)
185 prio = 0xff;
186
187
188 if (prio != xc->cppr) {
189 DBG_VERBOSE("scan_irq: adjusting CPPR to %d\n", prio);
190 xc->cppr = prio;
191 out_8(xive_tima + xive_tima_offset + TM_CPPR, prio);
192 }
193
194 return irq;
195}
196
197
198
199
200
201static notrace u8 xive_esb_read(struct xive_irq_data *xd, u32 offset)
202{
203 u64 val;
204
205
206 if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
207 offset |= offset << 4;
208
209 if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw)
210 val = xive_ops->esb_rw(xd->hw_irq, offset, 0, 0);
211 else
212 val = in_be64(xd->eoi_mmio + offset);
213
214 return (u8)val;
215}
216
217static void xive_esb_write(struct xive_irq_data *xd, u32 offset, u64 data)
218{
219
220 if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
221 offset |= offset << 4;
222
223 if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw)
224 xive_ops->esb_rw(xd->hw_irq, offset, data, 1);
225 else
226 out_be64(xd->eoi_mmio + offset, data);
227}
228
229#ifdef CONFIG_XMON
230static notrace void xive_dump_eq(const char *name, struct xive_q *q)
231{
232 u32 i0, i1, idx;
233
234 if (!q->qpage)
235 return;
236 idx = q->idx;
237 i0 = be32_to_cpup(q->qpage + idx);
238 idx = (idx + 1) & q->msk;
239 i1 = be32_to_cpup(q->qpage + idx);
240 xmon_printf("%s idx=%d T=%d %08x %08x ...", name,
241 q->idx, q->toggle, i0, i1);
242}
243
244notrace void xmon_xive_do_dump(int cpu)
245{
246 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
247
248 xmon_printf("CPU %d:", cpu);
249 if (xc) {
250 xmon_printf("pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr);
251
252#ifdef CONFIG_SMP
253 {
254 u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET);
255
256 xmon_printf("IPI=0x%08x PQ=%c%c ", xc->hw_ipi,
257 val & XIVE_ESB_VAL_P ? 'P' : '-',
258 val & XIVE_ESB_VAL_Q ? 'Q' : '-');
259 }
260#endif
261 xive_dump_eq("EQ", &xc->queue[xive_irq_priority]);
262 }
263 xmon_printf("\n");
264}
265
266int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d)
267{
268 int rc;
269 u32 target;
270 u8 prio;
271 u32 lirq;
272
273 rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq);
274 if (rc) {
275 xmon_printf("IRQ 0x%08x : no config rc=%d\n", hw_irq, rc);
276 return rc;
277 }
278
279 xmon_printf("IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
280 hw_irq, target, prio, lirq);
281
282 if (d) {
283 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
284 u64 val = xive_esb_read(xd, XIVE_ESB_GET);
285
286 xmon_printf("PQ=%c%c",
287 val & XIVE_ESB_VAL_P ? 'P' : '-',
288 val & XIVE_ESB_VAL_Q ? 'Q' : '-');
289 }
290
291 xmon_printf("\n");
292 return 0;
293}
294
295#endif
296
297static unsigned int xive_get_irq(void)
298{
299 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
300 u32 irq;
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316 xive_ops->update_pending(xc);
317
318 DBG_VERBOSE("get_irq: pending=%02x\n", xc->pending_prio);
319
320
321 irq = xive_scan_interrupts(xc, false);
322
323 DBG_VERBOSE("get_irq: got irq 0x%x, new pending=0x%02x\n",
324 irq, xc->pending_prio);
325
326
327 if (irq == XIVE_BAD_IRQ)
328 return 0;
329 return irq;
330}
331
332
333
334
335
336
337
338
339
340
341
342static void xive_do_queue_eoi(struct xive_cpu *xc)
343{
344 if (xive_scan_interrupts(xc, true) != 0) {
345 DBG_VERBOSE("eoi: pending=0x%02x\n", xc->pending_prio);
346 force_external_irq_replay();
347 }
348}
349
350
351
352
353
354static void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
355{
356 xd->stale_p = false;
357
358 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
359 xive_esb_write(xd, XIVE_ESB_STORE_EOI, 0);
360 else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
361
362
363
364
365
366
367
368 if (WARN_ON_ONCE(!xive_ops->eoi))
369 return;
370 xive_ops->eoi(hw_irq);
371 } else {
372 u8 eoi_val;
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387 if (xd->flags & XIVE_IRQ_FLAG_LSI)
388 xive_esb_read(xd, XIVE_ESB_LOAD_EOI);
389 else {
390 eoi_val = xive_esb_read(xd, XIVE_ESB_SET_PQ_00);
391 DBG_VERBOSE("eoi_val=%x\n", eoi_val);
392
393
394 if ((eoi_val & XIVE_ESB_VAL_Q) && xd->trig_mmio)
395 out_be64(xd->trig_mmio, 0);
396 }
397 }
398}
399
400
401static void xive_irq_eoi(struct irq_data *d)
402{
403 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
404 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
405
406 DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n",
407 d->irq, irqd_to_hwirq(d), xc->pending_prio);
408
409
410
411
412
413 if (!irqd_irq_disabled(d) && !irqd_is_forwarded_to_vcpu(d) &&
414 !(xd->flags & XIVE_IRQ_NO_EOI))
415 xive_do_source_eoi(irqd_to_hwirq(d), xd);
416 else
417 xd->stale_p = true;
418
419
420
421
422
423 xd->saved_p = false;
424
425
426 xive_do_queue_eoi(xc);
427}
428
429
430
431
432
433
434static void xive_do_source_set_mask(struct xive_irq_data *xd,
435 bool mask)
436{
437 u64 val;
438
439
440
441
442
443
444
445
446
447 if (mask) {
448 val = xive_esb_read(xd, XIVE_ESB_SET_PQ_01);
449 if (!xd->stale_p && !!(val & XIVE_ESB_VAL_P))
450 xd->saved_p = true;
451 xd->stale_p = false;
452 } else if (xd->saved_p) {
453 xive_esb_read(xd, XIVE_ESB_SET_PQ_10);
454 xd->saved_p = false;
455 } else {
456 xive_esb_read(xd, XIVE_ESB_SET_PQ_00);
457 xd->stale_p = false;
458 }
459}
460
461
462
463
464
465
466static bool xive_try_pick_target(int cpu)
467{
468 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
469 struct xive_q *q = &xc->queue[xive_irq_priority];
470 int max;
471
472
473
474
475
476
477 max = (q->msk + 1) - 1;
478 return !!atomic_add_unless(&q->count, 1, max);
479}
480
481
482
483
484
485
486
487
488
489
490static void xive_dec_target_count(int cpu)
491{
492 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
493 struct xive_q *q = &xc->queue[xive_irq_priority];
494
495 if (WARN_ON(cpu < 0 || !xc)) {
496 pr_err("%s: cpu=%d xc=%p\n", __func__, cpu, xc);
497 return;
498 }
499
500
501
502
503
504
505
506
507 atomic_inc(&q->pending_count);
508}
509
510
511static int xive_find_target_in_mask(const struct cpumask *mask,
512 unsigned int fuzz)
513{
514 int cpu, first, num, i;
515
516
517 num = min_t(int, cpumask_weight(mask), nr_cpu_ids);
518 first = fuzz % num;
519
520
521 cpu = cpumask_first(mask);
522 for (i = 0; i < first && cpu < nr_cpu_ids; i++)
523 cpu = cpumask_next(cpu, mask);
524
525
526 if (WARN_ON(cpu >= nr_cpu_ids))
527 cpu = cpumask_first(cpu_online_mask);
528
529
530 first = cpu;
531
532
533
534
535
536 do {
537
538
539
540
541 if (cpu_online(cpu) && xive_try_pick_target(cpu))
542 return cpu;
543 cpu = cpumask_next(cpu, mask);
544
545 if (cpu >= nr_cpu_ids)
546 cpu = cpumask_first(mask);
547 } while (cpu != first);
548
549 return -1;
550}
551
552
553
554
555
556
557static int xive_pick_irq_target(struct irq_data *d,
558 const struct cpumask *affinity)
559{
560 static unsigned int fuzz;
561 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
562 cpumask_var_t mask;
563 int cpu = -1;
564
565
566
567
568
569 if (xd->src_chip != XIVE_INVALID_CHIP_ID &&
570 zalloc_cpumask_var(&mask, GFP_ATOMIC)) {
571
572 for_each_cpu_and(cpu, affinity, cpu_online_mask) {
573 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
574 if (xc->chip_id == xd->src_chip)
575 cpumask_set_cpu(cpu, mask);
576 }
577
578 if (cpumask_empty(mask))
579 cpu = -1;
580 else
581 cpu = xive_find_target_in_mask(mask, fuzz++);
582 free_cpumask_var(mask);
583 if (cpu >= 0)
584 return cpu;
585 fuzz--;
586 }
587
588
589 return xive_find_target_in_mask(affinity, fuzz++);
590}
591
592static unsigned int xive_irq_startup(struct irq_data *d)
593{
594 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
595 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
596 int target, rc;
597
598 xd->saved_p = false;
599 xd->stale_p = false;
600 pr_devel("xive_irq_startup: irq %d [0x%x] data @%p\n",
601 d->irq, hw_irq, d);
602
603#ifdef CONFIG_PCI_MSI
604
605
606
607
608
609 if (irq_data_get_msi_desc(d))
610 pci_msi_unmask_irq(d);
611#endif
612
613
614 target = xive_pick_irq_target(d, irq_data_get_affinity_mask(d));
615 if (target == XIVE_INVALID_TARGET) {
616
617 target = xive_pick_irq_target(d, cpu_online_mask);
618 if (target == XIVE_INVALID_TARGET)
619 return -ENXIO;
620 pr_warn("irq %d started with broken affinity\n", d->irq);
621 }
622
623
624 if (WARN_ON(target == XIVE_INVALID_TARGET ||
625 target >= nr_cpu_ids))
626 target = smp_processor_id();
627
628 xd->target = target;
629
630
631
632
633
634 rc = xive_ops->configure_irq(hw_irq,
635 get_hard_smp_processor_id(target),
636 xive_irq_priority, d->irq);
637 if (rc)
638 return rc;
639
640
641 xive_do_source_set_mask(xd, false);
642
643 return 0;
644}
645
646
647static void xive_irq_shutdown(struct irq_data *d)
648{
649 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
650 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
651
652 pr_devel("xive_irq_shutdown: irq %d [0x%x] data @%p\n",
653 d->irq, hw_irq, d);
654
655 if (WARN_ON(xd->target == XIVE_INVALID_TARGET))
656 return;
657
658
659 xive_do_source_set_mask(xd, true);
660
661
662
663
664
665 xive_ops->configure_irq(hw_irq,
666 get_hard_smp_processor_id(xd->target),
667 0xff, XIVE_BAD_IRQ);
668
669 xive_dec_target_count(xd->target);
670 xd->target = XIVE_INVALID_TARGET;
671}
672
673static void xive_irq_unmask(struct irq_data *d)
674{
675 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
676
677 pr_devel("xive_irq_unmask: irq %d data @%p\n", d->irq, xd);
678
679
680
681
682
683
684
685 if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) {
686 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
687 xive_ops->configure_irq(hw_irq,
688 get_hard_smp_processor_id(xd->target),
689 xive_irq_priority, d->irq);
690 return;
691 }
692
693 xive_do_source_set_mask(xd, false);
694}
695
696static void xive_irq_mask(struct irq_data *d)
697{
698 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
699
700 pr_devel("xive_irq_mask: irq %d data @%p\n", d->irq, xd);
701
702
703
704
705
706
707
708 if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) {
709 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
710 xive_ops->configure_irq(hw_irq,
711 get_hard_smp_processor_id(xd->target),
712 0xff, d->irq);
713 return;
714 }
715
716 xive_do_source_set_mask(xd, true);
717}
718
719static int xive_irq_set_affinity(struct irq_data *d,
720 const struct cpumask *cpumask,
721 bool force)
722{
723 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
724 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
725 u32 target, old_target;
726 int rc = 0;
727
728 pr_devel("xive_irq_set_affinity: irq %d\n", d->irq);
729
730
731 if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids)
732 return -EINVAL;
733
734
735 if (!irqd_is_started(d))
736 return IRQ_SET_MASK_OK;
737
738
739
740
741
742 if (xd->target != XIVE_INVALID_TARGET &&
743 cpu_online(xd->target) &&
744 cpumask_test_cpu(xd->target, cpumask))
745 return IRQ_SET_MASK_OK;
746
747
748 target = xive_pick_irq_target(d, cpumask);
749
750
751 if (target == XIVE_INVALID_TARGET)
752 return -ENXIO;
753
754
755 if (WARN_ON(target >= nr_cpu_ids))
756 target = smp_processor_id();
757
758 old_target = xd->target;
759
760
761
762
763
764 if (!irqd_is_forwarded_to_vcpu(d))
765 rc = xive_ops->configure_irq(hw_irq,
766 get_hard_smp_processor_id(target),
767 xive_irq_priority, d->irq);
768 if (rc < 0) {
769 pr_err("Error %d reconfiguring irq %d\n", rc, d->irq);
770 return rc;
771 }
772
773 pr_devel(" target: 0x%x\n", target);
774 xd->target = target;
775
776
777 if (old_target != XIVE_INVALID_TARGET)
778 xive_dec_target_count(old_target);
779
780 return IRQ_SET_MASK_OK;
781}
782
783static int xive_irq_set_type(struct irq_data *d, unsigned int flow_type)
784{
785 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
786
787
788
789
790
791
792
793
794 if (flow_type == IRQ_TYPE_DEFAULT || flow_type == IRQ_TYPE_NONE)
795 flow_type = IRQ_TYPE_EDGE_RISING;
796
797 if (flow_type != IRQ_TYPE_EDGE_RISING &&
798 flow_type != IRQ_TYPE_LEVEL_LOW)
799 return -EINVAL;
800
801 irqd_set_trigger_type(d, flow_type);
802
803
804
805
806
807
808
809
810
811 if ((flow_type == IRQ_TYPE_LEVEL_LOW) !=
812 !!(xd->flags & XIVE_IRQ_FLAG_LSI)) {
813 pr_warn("Interrupt %d (HW 0x%x) type mismatch, Linux says %s, FW says %s\n",
814 d->irq, (u32)irqd_to_hwirq(d),
815 (flow_type == IRQ_TYPE_LEVEL_LOW) ? "Level" : "Edge",
816 (xd->flags & XIVE_IRQ_FLAG_LSI) ? "Level" : "Edge");
817 }
818
819 return IRQ_SET_MASK_OK_NOCOPY;
820}
821
822static int xive_irq_retrigger(struct irq_data *d)
823{
824 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
825
826
827 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
828 return 0;
829
830
831
832
833
834 xive_esb_read(xd, XIVE_ESB_SET_PQ_11);
835
836
837
838
839
840
841
842 xive_do_source_eoi(0, xd);
843
844 return 1;
845}
846
847
848
849
850
851static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
852{
853 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
854 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
855 int rc;
856 u8 pq;
857
858
859
860
861
862 if (xd->flags & XIVE_IRQ_FLAG_MASK_FW)
863 return -EIO;
864
865
866
867
868
869 if (state) {
870 irqd_set_forwarded_to_vcpu(d);
871
872
873 pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_10);
874 if (!xd->stale_p) {
875 xd->saved_p = !!(pq & XIVE_ESB_VAL_P);
876 xd->stale_p = !xd->saved_p;
877 }
878
879
880 if (xd->target == XIVE_INVALID_TARGET) {
881
882
883
884
885 WARN_ON(xd->saved_p);
886
887 return 0;
888 }
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905 if (xd->saved_p) {
906 xive_esb_read(xd, XIVE_ESB_SET_PQ_11);
907
908
909
910
911
912
913
914
915
916
917 if (xive_ops->sync_source)
918 xive_ops->sync_source(hw_irq);
919 }
920 } else {
921 irqd_clr_forwarded_to_vcpu(d);
922
923
924 if (xd->target == XIVE_INVALID_TARGET) {
925 xive_do_source_set_mask(xd, true);
926 return 0;
927 }
928
929
930
931
932
933
934 if (xive_ops->sync_source)
935 xive_ops->sync_source(hw_irq);
936
937
938
939
940
941
942
943
944
945 rc = xive_ops->configure_irq(hw_irq,
946 get_hard_smp_processor_id(xd->target),
947 xive_irq_priority, d->irq);
948 if (rc)
949 return rc;
950
951
952
953
954
955
956
957
958
959
960
961
962
963 if (!xd->saved_p)
964 xive_do_source_eoi(hw_irq, xd);
965
966 }
967 return 0;
968}
969
970
971static int xive_get_irqchip_state(struct irq_data *data,
972 enum irqchip_irq_state which, bool *state)
973{
974 struct xive_irq_data *xd = irq_data_get_irq_handler_data(data);
975
976 switch (which) {
977 case IRQCHIP_STATE_ACTIVE:
978 *state = !xd->stale_p &&
979 (xd->saved_p ||
980 !!(xive_esb_read(xd, XIVE_ESB_GET) & XIVE_ESB_VAL_P));
981 return 0;
982 default:
983 return -EINVAL;
984 }
985}
986
987static struct irq_chip xive_irq_chip = {
988 .name = "XIVE-IRQ",
989 .irq_startup = xive_irq_startup,
990 .irq_shutdown = xive_irq_shutdown,
991 .irq_eoi = xive_irq_eoi,
992 .irq_mask = xive_irq_mask,
993 .irq_unmask = xive_irq_unmask,
994 .irq_set_affinity = xive_irq_set_affinity,
995 .irq_set_type = xive_irq_set_type,
996 .irq_retrigger = xive_irq_retrigger,
997 .irq_set_vcpu_affinity = xive_irq_set_vcpu_affinity,
998 .irq_get_irqchip_state = xive_get_irqchip_state,
999};
1000
1001bool is_xive_irq(struct irq_chip *chip)
1002{
1003 return chip == &xive_irq_chip;
1004}
1005EXPORT_SYMBOL_GPL(is_xive_irq);
1006
1007void xive_cleanup_irq_data(struct xive_irq_data *xd)
1008{
1009 if (xd->eoi_mmio) {
1010 iounmap(xd->eoi_mmio);
1011 if (xd->eoi_mmio == xd->trig_mmio)
1012 xd->trig_mmio = NULL;
1013 xd->eoi_mmio = NULL;
1014 }
1015 if (xd->trig_mmio) {
1016 iounmap(xd->trig_mmio);
1017 xd->trig_mmio = NULL;
1018 }
1019}
1020EXPORT_SYMBOL_GPL(xive_cleanup_irq_data);
1021
1022static int xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw)
1023{
1024 struct xive_irq_data *xd;
1025 int rc;
1026
1027 xd = kzalloc(sizeof(struct xive_irq_data), GFP_KERNEL);
1028 if (!xd)
1029 return -ENOMEM;
1030 rc = xive_ops->populate_irq_data(hw, xd);
1031 if (rc) {
1032 kfree(xd);
1033 return rc;
1034 }
1035 xd->target = XIVE_INVALID_TARGET;
1036 irq_set_handler_data(virq, xd);
1037
1038 return 0;
1039}
1040
1041static void xive_irq_free_data(unsigned int virq)
1042{
1043 struct xive_irq_data *xd = irq_get_handler_data(virq);
1044
1045 if (!xd)
1046 return;
1047 irq_set_handler_data(virq, NULL);
1048 xive_cleanup_irq_data(xd);
1049 kfree(xd);
1050}
1051
1052#ifdef CONFIG_SMP
1053
1054static void xive_cause_ipi(int cpu)
1055{
1056 struct xive_cpu *xc;
1057 struct xive_irq_data *xd;
1058
1059 xc = per_cpu(xive_cpu, cpu);
1060
1061 DBG_VERBOSE("IPI CPU %d -> %d (HW IRQ 0x%x)\n",
1062 smp_processor_id(), cpu, xc->hw_ipi);
1063
1064 xd = &xc->ipi_data;
1065 if (WARN_ON(!xd->trig_mmio))
1066 return;
1067 out_be64(xd->trig_mmio, 0);
1068}
1069
1070static irqreturn_t xive_muxed_ipi_action(int irq, void *dev_id)
1071{
1072 return smp_ipi_demux();
1073}
1074
1075static void xive_ipi_eoi(struct irq_data *d)
1076{
1077 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1078
1079
1080 if (!xc)
1081 return;
1082
1083 DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n",
1084 d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio);
1085
1086 xive_do_source_eoi(xc->hw_ipi, &xc->ipi_data);
1087 xive_do_queue_eoi(xc);
1088}
1089
1090static void xive_ipi_do_nothing(struct irq_data *d)
1091{
1092
1093
1094
1095
1096}
1097
1098static struct irq_chip xive_ipi_chip = {
1099 .name = "XIVE-IPI",
1100 .irq_eoi = xive_ipi_eoi,
1101 .irq_mask = xive_ipi_do_nothing,
1102 .irq_unmask = xive_ipi_do_nothing,
1103};
1104
1105static void __init xive_request_ipi(void)
1106{
1107 unsigned int virq;
1108
1109
1110
1111
1112
1113
1114 if (!xive_irq_domain)
1115 return;
1116
1117
1118 virq = irq_create_mapping(xive_irq_domain, 0);
1119 xive_ipi_irq = virq;
1120
1121 WARN_ON(request_irq(virq, xive_muxed_ipi_action,
1122 IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL));
1123}
1124
1125static int xive_setup_cpu_ipi(unsigned int cpu)
1126{
1127 struct xive_cpu *xc;
1128 int rc;
1129
1130 pr_debug("Setting up IPI for CPU %d\n", cpu);
1131
1132 xc = per_cpu(xive_cpu, cpu);
1133
1134
1135 if (xc->hw_ipi != 0)
1136 return 0;
1137
1138
1139 if (xive_ops->get_ipi(cpu, xc))
1140 return -EIO;
1141
1142
1143
1144
1145
1146 rc = xive_ops->populate_irq_data(xc->hw_ipi, &xc->ipi_data);
1147 if (rc) {
1148 pr_err("Failed to populate IPI data on CPU %d\n", cpu);
1149 return -EIO;
1150 }
1151 rc = xive_ops->configure_irq(xc->hw_ipi,
1152 get_hard_smp_processor_id(cpu),
1153 xive_irq_priority, xive_ipi_irq);
1154 if (rc) {
1155 pr_err("Failed to map IPI CPU %d\n", cpu);
1156 return -EIO;
1157 }
1158 pr_devel("CPU %d HW IPI %x, virq %d, trig_mmio=%p\n", cpu,
1159 xc->hw_ipi, xive_ipi_irq, xc->ipi_data.trig_mmio);
1160
1161
1162 xive_do_source_set_mask(&xc->ipi_data, false);
1163
1164 return 0;
1165}
1166
1167static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc)
1168{
1169
1170
1171
1172 if (xc->hw_ipi == 0)
1173 return;
1174
1175
1176 xive_do_source_set_mask(&xc->ipi_data, true);
1177
1178
1179
1180
1181
1182
1183
1184
1185 xive_ops->configure_irq(xc->hw_ipi, hard_smp_processor_id(),
1186 0xff, xive_ipi_irq);
1187
1188
1189 xive_ops->put_ipi(cpu, xc);
1190}
1191
1192void __init xive_smp_probe(void)
1193{
1194 smp_ops->cause_ipi = xive_cause_ipi;
1195
1196
1197 xive_request_ipi();
1198
1199
1200 xive_setup_cpu_ipi(smp_processor_id());
1201}
1202
1203#endif
1204
1205static int xive_irq_domain_map(struct irq_domain *h, unsigned int virq,
1206 irq_hw_number_t hw)
1207{
1208 int rc;
1209
1210
1211
1212
1213
1214 irq_clear_status_flags(virq, IRQ_LEVEL);
1215
1216#ifdef CONFIG_SMP
1217
1218 if (hw == 0) {
1219
1220
1221
1222
1223 irq_set_chip_and_handler(virq, &xive_ipi_chip,
1224 handle_percpu_irq);
1225 return 0;
1226 }
1227#endif
1228
1229 rc = xive_irq_alloc_data(virq, hw);
1230 if (rc)
1231 return rc;
1232
1233 irq_set_chip_and_handler(virq, &xive_irq_chip, handle_fasteoi_irq);
1234
1235 return 0;
1236}
1237
1238static void xive_irq_domain_unmap(struct irq_domain *d, unsigned int virq)
1239{
1240 struct irq_data *data = irq_get_irq_data(virq);
1241 unsigned int hw_irq;
1242
1243
1244 if (!data)
1245 return;
1246 hw_irq = (unsigned int)irqd_to_hwirq(data);
1247 if (hw_irq)
1248 xive_irq_free_data(virq);
1249}
1250
1251static int xive_irq_domain_xlate(struct irq_domain *h, struct device_node *ct,
1252 const u32 *intspec, unsigned int intsize,
1253 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
1254
1255{
1256 *out_hwirq = intspec[0];
1257
1258
1259
1260
1261
1262 if (intsize > 1) {
1263 if (intspec[1] & 1)
1264 *out_flags = IRQ_TYPE_LEVEL_LOW;
1265 else
1266 *out_flags = IRQ_TYPE_EDGE_RISING;
1267 } else
1268 *out_flags = IRQ_TYPE_LEVEL_LOW;
1269
1270 return 0;
1271}
1272
1273static int xive_irq_domain_match(struct irq_domain *h, struct device_node *node,
1274 enum irq_domain_bus_token bus_token)
1275{
1276 return xive_ops->match(node);
1277}
1278
1279static const struct irq_domain_ops xive_irq_domain_ops = {
1280 .match = xive_irq_domain_match,
1281 .map = xive_irq_domain_map,
1282 .unmap = xive_irq_domain_unmap,
1283 .xlate = xive_irq_domain_xlate,
1284};
1285
1286static void __init xive_init_host(void)
1287{
1288 xive_irq_domain = irq_domain_add_nomap(NULL, XIVE_MAX_IRQ,
1289 &xive_irq_domain_ops, NULL);
1290 if (WARN_ON(xive_irq_domain == NULL))
1291 return;
1292 irq_set_default_host(xive_irq_domain);
1293}
1294
1295static void xive_cleanup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
1296{
1297 if (xc->queue[xive_irq_priority].qpage)
1298 xive_ops->cleanup_queue(cpu, xc, xive_irq_priority);
1299}
1300
1301static int xive_setup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
1302{
1303 int rc = 0;
1304
1305
1306 if (!xc->queue[xive_irq_priority].qpage)
1307 rc = xive_ops->setup_queue(cpu, xc, xive_irq_priority);
1308
1309 return rc;
1310}
1311
1312static int xive_prepare_cpu(unsigned int cpu)
1313{
1314 struct xive_cpu *xc;
1315
1316 xc = per_cpu(xive_cpu, cpu);
1317 if (!xc) {
1318 struct device_node *np;
1319
1320 xc = kzalloc_node(sizeof(struct xive_cpu),
1321 GFP_KERNEL, cpu_to_node(cpu));
1322 if (!xc)
1323 return -ENOMEM;
1324 np = of_get_cpu_node(cpu, NULL);
1325 if (np)
1326 xc->chip_id = of_get_ibm_chip_id(np);
1327 of_node_put(np);
1328
1329 per_cpu(xive_cpu, cpu) = xc;
1330 }
1331
1332
1333 return xive_setup_cpu_queues(cpu, xc);
1334}
1335
1336static void xive_setup_cpu(void)
1337{
1338 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1339
1340
1341 if (xive_ops->setup_cpu)
1342 xive_ops->setup_cpu(smp_processor_id(), xc);
1343
1344
1345 xc->cppr = 0xff;
1346 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff);
1347}
1348
1349#ifdef CONFIG_SMP
1350void xive_smp_setup_cpu(void)
1351{
1352 pr_devel("SMP setup CPU %d\n", smp_processor_id());
1353
1354
1355 if (smp_processor_id() != boot_cpuid)
1356 xive_setup_cpu();
1357
1358}
1359
1360int xive_smp_prepare_cpu(unsigned int cpu)
1361{
1362 int rc;
1363
1364
1365 rc = xive_prepare_cpu(cpu);
1366 if (rc)
1367 return rc;
1368
1369
1370 return xive_setup_cpu_ipi(cpu);
1371}
1372
1373#ifdef CONFIG_HOTPLUG_CPU
1374static void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc)
1375{
1376 u32 irq;
1377
1378
1379 WARN_ON(!irqs_disabled());
1380
1381
1382 while ((irq = xive_scan_interrupts(xc, false)) != 0) {
1383
1384
1385
1386
1387 struct irq_desc *desc = irq_to_desc(irq);
1388 struct irq_data *d = irq_desc_get_irq_data(desc);
1389 struct xive_irq_data *xd;
1390 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
1391
1392
1393
1394
1395
1396 if (d->domain != xive_irq_domain || hw_irq == 0)
1397 continue;
1398
1399
1400
1401
1402
1403
1404#ifdef DEBUG_FLUSH
1405 pr_info("CPU %d: Got irq %d while offline, re-sending...\n",
1406 cpu, irq);
1407#endif
1408 raw_spin_lock(&desc->lock);
1409 xd = irq_desc_get_handler_data(desc);
1410
1411
1412
1413
1414 xd->saved_p = false;
1415
1416
1417
1418
1419
1420 if (xd->flags & XIVE_IRQ_FLAG_LSI)
1421 xive_do_source_eoi(irqd_to_hwirq(d), xd);
1422 else
1423 xive_irq_retrigger(d);
1424
1425 raw_spin_unlock(&desc->lock);
1426 }
1427}
1428
1429void xive_smp_disable_cpu(void)
1430{
1431 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1432 unsigned int cpu = smp_processor_id();
1433
1434
1435 irq_migrate_all_off_this_cpu();
1436
1437
1438 xc->cppr = 0;
1439 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
1440
1441
1442 xive_flush_cpu_queue(cpu, xc);
1443
1444
1445 xc->cppr = 0xff;
1446 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff);
1447}
1448
1449void xive_flush_interrupt(void)
1450{
1451 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1452 unsigned int cpu = smp_processor_id();
1453
1454
1455 xive_flush_cpu_queue(cpu, xc);
1456}
1457
1458#endif
1459
1460#endif
1461
1462void xive_teardown_cpu(void)
1463{
1464 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1465 unsigned int cpu = smp_processor_id();
1466
1467
1468 xc->cppr = 0;
1469 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
1470
1471 if (xive_ops->teardown_cpu)
1472 xive_ops->teardown_cpu(cpu, xc);
1473
1474#ifdef CONFIG_SMP
1475
1476 xive_cleanup_cpu_ipi(cpu, xc);
1477#endif
1478
1479
1480 xive_cleanup_cpu_queues(cpu, xc);
1481}
1482
1483void xive_shutdown(void)
1484{
1485 xive_ops->shutdown();
1486}
1487
1488bool __init xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset,
1489 u8 max_prio)
1490{
1491 xive_tima = area;
1492 xive_tima_offset = offset;
1493 xive_ops = ops;
1494 xive_irq_priority = max_prio;
1495
1496 ppc_md.get_irq = xive_get_irq;
1497 __xive_enabled = true;
1498
1499 pr_devel("Initializing host..\n");
1500 xive_init_host();
1501
1502 pr_devel("Initializing boot CPU..\n");
1503
1504
1505 xive_prepare_cpu(smp_processor_id());
1506
1507
1508 xive_setup_cpu();
1509
1510 pr_info("Interrupt handling initialized with %s backend\n",
1511 xive_ops->name);
1512 pr_info("Using priority %d for all interrupts\n", max_prio);
1513
1514 return true;
1515}
1516
1517__be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift)
1518{
1519 unsigned int alloc_order;
1520 struct page *pages;
1521 __be32 *qpage;
1522
1523 alloc_order = xive_alloc_order(queue_shift);
1524 pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order);
1525 if (!pages)
1526 return ERR_PTR(-ENOMEM);
1527 qpage = (__be32 *)page_address(pages);
1528 memset(qpage, 0, 1 << queue_shift);
1529
1530 return qpage;
1531}
1532
1533static int __init xive_off(char *arg)
1534{
1535 xive_cmdline_disabled = true;
1536 return 0;
1537}
1538__setup("xive=off", xive_off);
1539