1
2
3
4
5
6
7#define pr_fmt(fmt) "GICv3: " fmt
8
9#include <linux/acpi.h>
10#include <linux/cpu.h>
11#include <linux/cpu_pm.h>
12#include <linux/delay.h>
13#include <linux/interrupt.h>
14#include <linux/irqdomain.h>
15#include <linux/of.h>
16#include <linux/of_address.h>
17#include <linux/of_irq.h>
18#include <linux/percpu.h>
19#include <linux/refcount.h>
20#include <linux/slab.h>
21
22#include <linux/irqchip.h>
23#include <linux/irqchip/arm-gic-common.h>
24#include <linux/irqchip/arm-gic-v3.h>
25#include <linux/irqchip/irq-partition-percpu.h>
26
27#include <asm/cputype.h>
28#include <asm/exception.h>
29#include <asm/smp_plat.h>
30#include <asm/virt.h>
31
32#include "irq-gic-common.h"
33
34#define GICD_INT_NMI_PRI (GICD_INT_DEF_PRI & ~0x80)
35
36#define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0)
37#define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1)
38
39#define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1)
40
41struct redist_region {
42 void __iomem *redist_base;
43 phys_addr_t phys_base;
44 bool single_redist;
45};
46
47struct gic_chip_data {
48 struct fwnode_handle *fwnode;
49 void __iomem *dist_base;
50 struct redist_region *redist_regions;
51 struct rdists rdists;
52 struct irq_domain *domain;
53 u64 redist_stride;
54 u32 nr_redist_regions;
55 u64 flags;
56 bool has_rss;
57 unsigned int ppi_nr;
58 struct partition_desc **ppi_descs;
59};
60
61static struct gic_chip_data gic_data __read_mostly;
62static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
63
64#define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
65#define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
66#define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer)
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis);
90
91
92
93
94
95
96
97DEFINE_STATIC_KEY_FALSE(gic_pmr_sync);
98EXPORT_SYMBOL(gic_pmr_sync);
99
100DEFINE_STATIC_KEY_FALSE(gic_nonsecure_priorities);
101EXPORT_SYMBOL(gic_nonsecure_priorities);
102
103
104
105
106
107
108
109
110
111
112
113
114
115#define GICD_INT_RPR_PRI(priority) \
116 ({ \
117 u32 __priority = (priority); \
118 if (static_branch_unlikely(&gic_nonsecure_priorities)) \
119 __priority = 0x80 | (__priority >> 1); \
120 \
121 __priority; \
122 })
123
124
125static refcount_t *ppi_nmi_refs;
126
127static struct gic_kvm_info gic_v3_kvm_info __initdata;
128static DEFINE_PER_CPU(bool, has_rss);
129
130#define MPIDR_RS(mpidr) (((mpidr) & 0xF0UL) >> 4)
131#define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist))
132#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
133#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
134
135
136#define DEFAULT_PMR_VALUE 0xf0
137
138enum gic_intid_range {
139 SGI_RANGE,
140 PPI_RANGE,
141 SPI_RANGE,
142 EPPI_RANGE,
143 ESPI_RANGE,
144 LPI_RANGE,
145 __INVALID_RANGE__
146};
147
148static enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq)
149{
150 switch (hwirq) {
151 case 0 ... 15:
152 return SGI_RANGE;
153 case 16 ... 31:
154 return PPI_RANGE;
155 case 32 ... 1019:
156 return SPI_RANGE;
157 case EPPI_BASE_INTID ... (EPPI_BASE_INTID + 63):
158 return EPPI_RANGE;
159 case ESPI_BASE_INTID ... (ESPI_BASE_INTID + 1023):
160 return ESPI_RANGE;
161 case 8192 ... GENMASK(23, 0):
162 return LPI_RANGE;
163 default:
164 return __INVALID_RANGE__;
165 }
166}
167
168static enum gic_intid_range get_intid_range(struct irq_data *d)
169{
170 return __get_intid_range(d->hwirq);
171}
172
173static inline unsigned int gic_irq(struct irq_data *d)
174{
175 return d->hwirq;
176}
177
178static inline bool gic_irq_in_rdist(struct irq_data *d)
179{
180 switch (get_intid_range(d)) {
181 case SGI_RANGE:
182 case PPI_RANGE:
183 case EPPI_RANGE:
184 return true;
185 default:
186 return false;
187 }
188}
189
190static inline void __iomem *gic_dist_base(struct irq_data *d)
191{
192 switch (get_intid_range(d)) {
193 case SGI_RANGE:
194 case PPI_RANGE:
195 case EPPI_RANGE:
196
197 return gic_data_rdist_sgi_base();
198
199 case SPI_RANGE:
200 case ESPI_RANGE:
201
202 return gic_data.dist_base;
203
204 default:
205 return NULL;
206 }
207}
208
209static void gic_do_wait_for_rwp(void __iomem *base)
210{
211 u32 count = 1000000;
212
213 while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) {
214 count--;
215 if (!count) {
216 pr_err_ratelimited("RWP timeout, gone fishing\n");
217 return;
218 }
219 cpu_relax();
220 udelay(1);
221 }
222}
223
224
225static void gic_dist_wait_for_rwp(void)
226{
227 gic_do_wait_for_rwp(gic_data.dist_base);
228}
229
230
231static void gic_redist_wait_for_rwp(void)
232{
233 gic_do_wait_for_rwp(gic_data_rdist_rd_base());
234}
235
236#ifdef CONFIG_ARM64
237
238static u64 __maybe_unused gic_read_iar(void)
239{
240 if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154))
241 return gic_read_iar_cavium_thunderx();
242 else
243 return gic_read_iar_common();
244}
245#endif
246
247static void gic_enable_redist(bool enable)
248{
249 void __iomem *rbase;
250 u32 count = 1000000;
251 u32 val;
252
253 if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996)
254 return;
255
256 rbase = gic_data_rdist_rd_base();
257
258 val = readl_relaxed(rbase + GICR_WAKER);
259 if (enable)
260
261 val &= ~GICR_WAKER_ProcessorSleep;
262 else
263 val |= GICR_WAKER_ProcessorSleep;
264 writel_relaxed(val, rbase + GICR_WAKER);
265
266 if (!enable) {
267 val = readl_relaxed(rbase + GICR_WAKER);
268 if (!(val & GICR_WAKER_ProcessorSleep))
269 return;
270 }
271
272 while (--count) {
273 val = readl_relaxed(rbase + GICR_WAKER);
274 if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
275 break;
276 cpu_relax();
277 udelay(1);
278 }
279 if (!count)
280 pr_err_ratelimited("redistributor failed to %s...\n",
281 enable ? "wakeup" : "sleep");
282}
283
284
285
286
287static u32 convert_offset_index(struct irq_data *d, u32 offset, u32 *index)
288{
289 switch (get_intid_range(d)) {
290 case SGI_RANGE:
291 case PPI_RANGE:
292 case SPI_RANGE:
293 *index = d->hwirq;
294 return offset;
295 case EPPI_RANGE:
296
297
298
299
300
301 *index = d->hwirq - EPPI_BASE_INTID + 32;
302 return offset;
303 case ESPI_RANGE:
304 *index = d->hwirq - ESPI_BASE_INTID;
305 switch (offset) {
306 case GICD_ISENABLER:
307 return GICD_ISENABLERnE;
308 case GICD_ICENABLER:
309 return GICD_ICENABLERnE;
310 case GICD_ISPENDR:
311 return GICD_ISPENDRnE;
312 case GICD_ICPENDR:
313 return GICD_ICPENDRnE;
314 case GICD_ISACTIVER:
315 return GICD_ISACTIVERnE;
316 case GICD_ICACTIVER:
317 return GICD_ICACTIVERnE;
318 case GICD_IPRIORITYR:
319 return GICD_IPRIORITYRnE;
320 case GICD_ICFGR:
321 return GICD_ICFGRnE;
322 case GICD_IROUTER:
323 return GICD_IROUTERnE;
324 default:
325 break;
326 }
327 break;
328 default:
329 break;
330 }
331
332 WARN_ON(1);
333 *index = d->hwirq;
334 return offset;
335}
336
337static int gic_peek_irq(struct irq_data *d, u32 offset)
338{
339 void __iomem *base;
340 u32 index, mask;
341
342 offset = convert_offset_index(d, offset, &index);
343 mask = 1 << (index % 32);
344
345 if (gic_irq_in_rdist(d))
346 base = gic_data_rdist_sgi_base();
347 else
348 base = gic_data.dist_base;
349
350 return !!(readl_relaxed(base + offset + (index / 32) * 4) & mask);
351}
352
353static void gic_poke_irq(struct irq_data *d, u32 offset)
354{
355 void (*rwp_wait)(void);
356 void __iomem *base;
357 u32 index, mask;
358
359 offset = convert_offset_index(d, offset, &index);
360 mask = 1 << (index % 32);
361
362 if (gic_irq_in_rdist(d)) {
363 base = gic_data_rdist_sgi_base();
364 rwp_wait = gic_redist_wait_for_rwp;
365 } else {
366 base = gic_data.dist_base;
367 rwp_wait = gic_dist_wait_for_rwp;
368 }
369
370 writel_relaxed(mask, base + offset + (index / 32) * 4);
371 rwp_wait();
372}
373
374static void gic_mask_irq(struct irq_data *d)
375{
376 gic_poke_irq(d, GICD_ICENABLER);
377}
378
379static void gic_eoimode1_mask_irq(struct irq_data *d)
380{
381 gic_mask_irq(d);
382
383
384
385
386
387
388
389
390 if (irqd_is_forwarded_to_vcpu(d))
391 gic_poke_irq(d, GICD_ICACTIVER);
392}
393
394static void gic_unmask_irq(struct irq_data *d)
395{
396 gic_poke_irq(d, GICD_ISENABLER);
397}
398
399static inline bool gic_supports_nmi(void)
400{
401 return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
402 static_branch_likely(&supports_pseudo_nmis);
403}
404
405static int gic_irq_set_irqchip_state(struct irq_data *d,
406 enum irqchip_irq_state which, bool val)
407{
408 u32 reg;
409
410 if (d->hwirq >= 8192)
411 return -EINVAL;
412
413 switch (which) {
414 case IRQCHIP_STATE_PENDING:
415 reg = val ? GICD_ISPENDR : GICD_ICPENDR;
416 break;
417
418 case IRQCHIP_STATE_ACTIVE:
419 reg = val ? GICD_ISACTIVER : GICD_ICACTIVER;
420 break;
421
422 case IRQCHIP_STATE_MASKED:
423 reg = val ? GICD_ICENABLER : GICD_ISENABLER;
424 break;
425
426 default:
427 return -EINVAL;
428 }
429
430 gic_poke_irq(d, reg);
431 return 0;
432}
433
434static int gic_irq_get_irqchip_state(struct irq_data *d,
435 enum irqchip_irq_state which, bool *val)
436{
437 if (d->hwirq >= 8192)
438 return -EINVAL;
439
440 switch (which) {
441 case IRQCHIP_STATE_PENDING:
442 *val = gic_peek_irq(d, GICD_ISPENDR);
443 break;
444
445 case IRQCHIP_STATE_ACTIVE:
446 *val = gic_peek_irq(d, GICD_ISACTIVER);
447 break;
448
449 case IRQCHIP_STATE_MASKED:
450 *val = !gic_peek_irq(d, GICD_ISENABLER);
451 break;
452
453 default:
454 return -EINVAL;
455 }
456
457 return 0;
458}
459
460static void gic_irq_set_prio(struct irq_data *d, u8 prio)
461{
462 void __iomem *base = gic_dist_base(d);
463 u32 offset, index;
464
465 offset = convert_offset_index(d, GICD_IPRIORITYR, &index);
466
467 writeb_relaxed(prio, base + offset + index);
468}
469
470static u32 __gic_get_ppi_index(irq_hw_number_t hwirq)
471{
472 switch (__get_intid_range(hwirq)) {
473 case PPI_RANGE:
474 return hwirq - 16;
475 case EPPI_RANGE:
476 return hwirq - EPPI_BASE_INTID + 16;
477 default:
478 unreachable();
479 }
480}
481
482static u32 gic_get_ppi_index(struct irq_data *d)
483{
484 return __gic_get_ppi_index(d->hwirq);
485}
486
487static int gic_irq_nmi_setup(struct irq_data *d)
488{
489 struct irq_desc *desc = irq_to_desc(d->irq);
490
491 if (!gic_supports_nmi())
492 return -EINVAL;
493
494 if (gic_peek_irq(d, GICD_ISENABLER)) {
495 pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
496 return -EINVAL;
497 }
498
499
500
501
502
503 if (WARN_ON(gic_irq(d) >= 8192))
504 return -EINVAL;
505
506
507 if (gic_irq_in_rdist(d)) {
508 u32 idx = gic_get_ppi_index(d);
509
510
511 if (!refcount_inc_not_zero(&ppi_nmi_refs[idx])) {
512 refcount_set(&ppi_nmi_refs[idx], 1);
513 desc->handle_irq = handle_percpu_devid_fasteoi_nmi;
514 }
515 } else {
516 desc->handle_irq = handle_fasteoi_nmi;
517 }
518
519 gic_irq_set_prio(d, GICD_INT_NMI_PRI);
520
521 return 0;
522}
523
524static void gic_irq_nmi_teardown(struct irq_data *d)
525{
526 struct irq_desc *desc = irq_to_desc(d->irq);
527
528 if (WARN_ON(!gic_supports_nmi()))
529 return;
530
531 if (gic_peek_irq(d, GICD_ISENABLER)) {
532 pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
533 return;
534 }
535
536
537
538
539
540 if (WARN_ON(gic_irq(d) >= 8192))
541 return;
542
543
544 if (gic_irq_in_rdist(d)) {
545 u32 idx = gic_get_ppi_index(d);
546
547
548 if (refcount_dec_and_test(&ppi_nmi_refs[idx]))
549 desc->handle_irq = handle_percpu_devid_irq;
550 } else {
551 desc->handle_irq = handle_fasteoi_irq;
552 }
553
554 gic_irq_set_prio(d, GICD_INT_DEF_PRI);
555}
556
557static void gic_eoi_irq(struct irq_data *d)
558{
559 gic_write_eoir(gic_irq(d));
560}
561
562static void gic_eoimode1_eoi_irq(struct irq_data *d)
563{
564
565
566
567
568 if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d))
569 return;
570 gic_write_dir(gic_irq(d));
571}
572
573static int gic_set_type(struct irq_data *d, unsigned int type)
574{
575 enum gic_intid_range range;
576 unsigned int irq = gic_irq(d);
577 void (*rwp_wait)(void);
578 void __iomem *base;
579 u32 offset, index;
580 int ret;
581
582 range = get_intid_range(d);
583
584
585 if (range == SGI_RANGE)
586 return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0;
587
588
589 if ((range == SPI_RANGE || range == ESPI_RANGE) &&
590 type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
591 return -EINVAL;
592
593 if (gic_irq_in_rdist(d)) {
594 base = gic_data_rdist_sgi_base();
595 rwp_wait = gic_redist_wait_for_rwp;
596 } else {
597 base = gic_data.dist_base;
598 rwp_wait = gic_dist_wait_for_rwp;
599 }
600
601 offset = convert_offset_index(d, GICD_ICFGR, &index);
602
603 ret = gic_configure_irq(index, type, base + offset, rwp_wait);
604 if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) {
605
606 pr_warn("GIC: PPI INTID%d is secure or misconfigured\n", irq);
607 ret = 0;
608 }
609
610 return ret;
611}
612
613static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
614{
615 if (get_intid_range(d) == SGI_RANGE)
616 return -EINVAL;
617
618 if (vcpu)
619 irqd_set_forwarded_to_vcpu(d);
620 else
621 irqd_clr_forwarded_to_vcpu(d);
622 return 0;
623}
624
625static u64 gic_mpidr_to_affinity(unsigned long mpidr)
626{
627 u64 aff;
628
629 aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
630 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
631 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
632 MPIDR_AFFINITY_LEVEL(mpidr, 0));
633
634 return aff;
635}
636
637static void gic_deactivate_unhandled(u32 irqnr)
638{
639 if (static_branch_likely(&supports_deactivate_key)) {
640 if (irqnr < 8192)
641 gic_write_dir(irqnr);
642 } else {
643 gic_write_eoir(irqnr);
644 }
645}
646
647static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
648{
649 bool irqs_enabled = interrupts_enabled(regs);
650 int err;
651
652 if (irqs_enabled)
653 nmi_enter();
654
655 if (static_branch_likely(&supports_deactivate_key))
656 gic_write_eoir(irqnr);
657
658
659
660
661
662
663 err = generic_handle_domain_nmi(gic_data.domain, irqnr);
664 if (err)
665 gic_deactivate_unhandled(irqnr);
666
667 if (irqs_enabled)
668 nmi_exit();
669}
670
671static u32 do_read_iar(struct pt_regs *regs)
672{
673 u32 iar;
674
675 if (gic_supports_nmi() && unlikely(!interrupts_enabled(regs))) {
676 u64 pmr;
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691 pmr = gic_read_pmr();
692 gic_pmr_mask_irqs();
693 isb();
694
695 iar = gic_read_iar();
696
697 gic_write_pmr(pmr);
698 } else {
699 iar = gic_read_iar();
700 }
701
702 return iar;
703}
704
705static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
706{
707 u32 irqnr;
708
709 irqnr = do_read_iar(regs);
710
711
712 if ((irqnr >= 1020 && irqnr <= 1023))
713 return;
714
715 if (gic_supports_nmi() &&
716 unlikely(gic_read_rpr() == GICD_INT_RPR_PRI(GICD_INT_NMI_PRI))) {
717 gic_handle_nmi(irqnr, regs);
718 return;
719 }
720
721 if (gic_prio_masking_enabled()) {
722 gic_pmr_mask_irqs();
723 gic_arch_enable_irqs();
724 }
725
726 if (static_branch_likely(&supports_deactivate_key))
727 gic_write_eoir(irqnr);
728 else
729 isb();
730
731 if (generic_handle_domain_irq(gic_data.domain, irqnr)) {
732 WARN_ONCE(true, "Unexpected interrupt received!\n");
733 gic_deactivate_unhandled(irqnr);
734 }
735}
736
737static u32 gic_get_pribits(void)
738{
739 u32 pribits;
740
741 pribits = gic_read_ctlr();
742 pribits &= ICC_CTLR_EL1_PRI_BITS_MASK;
743 pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT;
744 pribits++;
745
746 return pribits;
747}
748
749static bool gic_has_group0(void)
750{
751 u32 val;
752 u32 old_pmr;
753
754 old_pmr = gic_read_pmr();
755
756
757
758
759
760
761
762
763
764
765
766
767 gic_write_pmr(BIT(8 - gic_get_pribits()));
768 val = gic_read_pmr();
769
770 gic_write_pmr(old_pmr);
771
772 return val != 0;
773}
774
775static void __init gic_dist_init(void)
776{
777 unsigned int i;
778 u64 affinity;
779 void __iomem *base = gic_data.dist_base;
780 u32 val;
781
782
783 writel_relaxed(0, base + GICD_CTLR);
784 gic_dist_wait_for_rwp();
785
786
787
788
789
790
791
792 for (i = 32; i < GIC_LINE_NR; i += 32)
793 writel_relaxed(~0, base + GICD_IGROUPR + i / 8);
794
795
796 for (i = 0; i < GIC_ESPI_NR; i += 32) {
797 writel_relaxed(~0U, base + GICD_ICENABLERnE + i / 8);
798 writel_relaxed(~0U, base + GICD_ICACTIVERnE + i / 8);
799 }
800
801 for (i = 0; i < GIC_ESPI_NR; i += 32)
802 writel_relaxed(~0U, base + GICD_IGROUPRnE + i / 8);
803
804 for (i = 0; i < GIC_ESPI_NR; i += 16)
805 writel_relaxed(0, base + GICD_ICFGRnE + i / 4);
806
807 for (i = 0; i < GIC_ESPI_NR; i += 4)
808 writel_relaxed(GICD_INT_DEF_PRI_X4, base + GICD_IPRIORITYRnE + i);
809
810
811 gic_dist_config(base, GIC_LINE_NR, gic_dist_wait_for_rwp);
812
813 val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1;
814 if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) {
815 pr_info("Enabling SGIs without active state\n");
816 val |= GICD_CTLR_nASSGIreq;
817 }
818
819
820 writel_relaxed(val, base + GICD_CTLR);
821
822
823
824
825
826 affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
827 for (i = 32; i < GIC_LINE_NR; i++)
828 gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
829
830 for (i = 0; i < GIC_ESPI_NR; i++)
831 gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8);
832}
833
834static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))
835{
836 int ret = -ENODEV;
837 int i;
838
839 for (i = 0; i < gic_data.nr_redist_regions; i++) {
840 void __iomem *ptr = gic_data.redist_regions[i].redist_base;
841 u64 typer;
842 u32 reg;
843
844 reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
845 if (reg != GIC_PIDR2_ARCH_GICv3 &&
846 reg != GIC_PIDR2_ARCH_GICv4) {
847 pr_warn("No redistributor present @%p\n", ptr);
848 break;
849 }
850
851 do {
852 typer = gic_read_typer(ptr + GICR_TYPER);
853 ret = fn(gic_data.redist_regions + i, ptr);
854 if (!ret)
855 return 0;
856
857 if (gic_data.redist_regions[i].single_redist)
858 break;
859
860 if (gic_data.redist_stride) {
861 ptr += gic_data.redist_stride;
862 } else {
863 ptr += SZ_64K * 2;
864 if (typer & GICR_TYPER_VLPIS)
865 ptr += SZ_64K * 2;
866 }
867 } while (!(typer & GICR_TYPER_LAST));
868 }
869
870 return ret ? -ENODEV : 0;
871}
872
873static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
874{
875 unsigned long mpidr = cpu_logical_map(smp_processor_id());
876 u64 typer;
877 u32 aff;
878
879
880
881
882
883 aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
884 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
885 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
886 MPIDR_AFFINITY_LEVEL(mpidr, 0));
887
888 typer = gic_read_typer(ptr + GICR_TYPER);
889 if ((typer >> 32) == aff) {
890 u64 offset = ptr - region->redist_base;
891 raw_spin_lock_init(&gic_data_rdist()->rd_lock);
892 gic_data_rdist_rd_base() = ptr;
893 gic_data_rdist()->phys_base = region->phys_base + offset;
894
895 pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
896 smp_processor_id(), mpidr,
897 (int)(region - gic_data.redist_regions),
898 &gic_data_rdist()->phys_base);
899 return 0;
900 }
901
902
903 return 1;
904}
905
906static int gic_populate_rdist(void)
907{
908 if (gic_iterate_rdists(__gic_populate_rdist) == 0)
909 return 0;
910
911
912 WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n",
913 smp_processor_id(),
914 (unsigned long)cpu_logical_map(smp_processor_id()));
915 return -ENODEV;
916}
917
918static int __gic_update_rdist_properties(struct redist_region *region,
919 void __iomem *ptr)
920{
921 u64 typer = gic_read_typer(ptr + GICR_TYPER);
922
923 gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS);
924
925
926 gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID);
927 gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) |
928 gic_data.rdists.has_rvpeid);
929 gic_data.rdists.has_vpend_valid_dirty &= !!(typer & GICR_TYPER_DIRTY);
930
931
932 if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) {
933 gic_data.rdists.has_direct_lpi = false;
934 gic_data.rdists.has_vlpis = false;
935 gic_data.rdists.has_rvpeid = false;
936 }
937
938 gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr);
939
940 return 1;
941}
942
943static void gic_update_rdist_properties(void)
944{
945 gic_data.ppi_nr = UINT_MAX;
946 gic_iterate_rdists(__gic_update_rdist_properties);
947 if (WARN_ON(gic_data.ppi_nr == UINT_MAX))
948 gic_data.ppi_nr = 0;
949 pr_info("%d PPIs implemented\n", gic_data.ppi_nr);
950 if (gic_data.rdists.has_vlpis)
951 pr_info("GICv4 features: %s%s%s\n",
952 gic_data.rdists.has_direct_lpi ? "DirectLPI " : "",
953 gic_data.rdists.has_rvpeid ? "RVPEID " : "",
954 gic_data.rdists.has_vpend_valid_dirty ? "Valid+Dirty " : "");
955}
956
957
958static inline bool gic_dist_security_disabled(void)
959{
960 return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS;
961}
962
963static void gic_cpu_sys_reg_init(void)
964{
965 int i, cpu = smp_processor_id();
966 u64 mpidr = cpu_logical_map(cpu);
967 u64 need_rss = MPIDR_RS(mpidr);
968 bool group0;
969 u32 pribits;
970
971
972
973
974
975
976
977
978 if (!gic_enable_sre())
979 pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
980
981 pribits = gic_get_pribits();
982
983 group0 = gic_has_group0();
984
985
986 if (!gic_prio_masking_enabled()) {
987 write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1);
988 } else if (gic_supports_nmi()) {
989
990
991
992
993
994
995
996
997
998 if (static_branch_unlikely(&gic_nonsecure_priorities))
999 WARN_ON(!group0 || gic_dist_security_disabled());
1000 else
1001 WARN_ON(group0 && !gic_dist_security_disabled());
1002 }
1003
1004
1005
1006
1007
1008
1009
1010 gic_write_bpr1(0);
1011
1012 if (static_branch_likely(&supports_deactivate_key)) {
1013
1014 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop);
1015 } else {
1016
1017 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
1018 }
1019
1020
1021 if (group0) {
1022 switch(pribits) {
1023 case 8:
1024 case 7:
1025 write_gicreg(0, ICC_AP0R3_EL1);
1026 write_gicreg(0, ICC_AP0R2_EL1);
1027 fallthrough;
1028 case 6:
1029 write_gicreg(0, ICC_AP0R1_EL1);
1030 fallthrough;
1031 case 5:
1032 case 4:
1033 write_gicreg(0, ICC_AP0R0_EL1);
1034 }
1035
1036 isb();
1037 }
1038
1039 switch(pribits) {
1040 case 8:
1041 case 7:
1042 write_gicreg(0, ICC_AP1R3_EL1);
1043 write_gicreg(0, ICC_AP1R2_EL1);
1044 fallthrough;
1045 case 6:
1046 write_gicreg(0, ICC_AP1R1_EL1);
1047 fallthrough;
1048 case 5:
1049 case 4:
1050 write_gicreg(0, ICC_AP1R0_EL1);
1051 }
1052
1053 isb();
1054
1055
1056 gic_write_grpen1(1);
1057
1058
1059 per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS);
1060
1061
1062 for_each_online_cpu(i) {
1063 bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu);
1064
1065 need_rss |= MPIDR_RS(cpu_logical_map(i));
1066 if (need_rss && (!have_rss))
1067 pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n",
1068 cpu, (unsigned long)mpidr,
1069 i, (unsigned long)cpu_logical_map(i));
1070 }
1071
1072
1073
1074
1075
1076
1077
1078
1079 if (need_rss && (!gic_data.has_rss))
1080 pr_crit_once("RSS is required but GICD doesn't support it\n");
1081}
1082
1083static bool gicv3_nolpi;
1084
1085static int __init gicv3_nolpi_cfg(char *buf)
1086{
1087 return strtobool(buf, &gicv3_nolpi);
1088}
1089early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg);
1090
1091static int gic_dist_supports_lpis(void)
1092{
1093 return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) &&
1094 !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) &&
1095 !gicv3_nolpi);
1096}
1097
1098static void gic_cpu_init(void)
1099{
1100 void __iomem *rbase;
1101 int i;
1102
1103
1104 if (gic_populate_rdist())
1105 return;
1106
1107 gic_enable_redist(true);
1108
1109 WARN((gic_data.ppi_nr > 16 || GIC_ESPI_NR != 0) &&
1110 !(gic_read_ctlr() & ICC_CTLR_EL1_ExtRange),
1111 "Distributor has extended ranges, but CPU%d doesn't\n",
1112 smp_processor_id());
1113
1114 rbase = gic_data_rdist_sgi_base();
1115
1116
1117 for (i = 0; i < gic_data.ppi_nr + 16; i += 32)
1118 writel_relaxed(~0, rbase + GICR_IGROUPR0 + i / 8);
1119
1120 gic_cpu_config(rbase, gic_data.ppi_nr + 16, gic_redist_wait_for_rwp);
1121
1122
1123 gic_cpu_sys_reg_init();
1124}
1125
1126#ifdef CONFIG_SMP
1127
1128#define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT)
1129#define MPIDR_TO_SGI_CLUSTER_ID(mpidr) ((mpidr) & ~0xFUL)
1130
1131static int gic_starting_cpu(unsigned int cpu)
1132{
1133 gic_cpu_init();
1134
1135 if (gic_dist_supports_lpis())
1136 its_cpu_init();
1137
1138 return 0;
1139}
1140
1141static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
1142 unsigned long cluster_id)
1143{
1144 int next_cpu, cpu = *base_cpu;
1145 unsigned long mpidr = cpu_logical_map(cpu);
1146 u16 tlist = 0;
1147
1148 while (cpu < nr_cpu_ids) {
1149 tlist |= 1 << (mpidr & 0xf);
1150
1151 next_cpu = cpumask_next(cpu, mask);
1152 if (next_cpu >= nr_cpu_ids)
1153 goto out;
1154 cpu = next_cpu;
1155
1156 mpidr = cpu_logical_map(cpu);
1157
1158 if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) {
1159 cpu--;
1160 goto out;
1161 }
1162 }
1163out:
1164 *base_cpu = cpu;
1165 return tlist;
1166}
1167
1168#define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \
1169 (MPIDR_AFFINITY_LEVEL(cluster_id, level) \
1170 << ICC_SGI1R_AFFINITY_## level ##_SHIFT)
1171
1172static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
1173{
1174 u64 val;
1175
1176 val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) |
1177 MPIDR_TO_SGI_AFFINITY(cluster_id, 2) |
1178 irq << ICC_SGI1R_SGI_ID_SHIFT |
1179 MPIDR_TO_SGI_AFFINITY(cluster_id, 1) |
1180 MPIDR_TO_SGI_RS(cluster_id) |
1181 tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
1182
1183 pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
1184 gic_write_sgi1r(val);
1185}
1186
1187static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
1188{
1189 int cpu;
1190
1191 if (WARN_ON(d->hwirq >= 16))
1192 return;
1193
1194
1195
1196
1197
1198 wmb();
1199
1200 for_each_cpu(cpu, mask) {
1201 u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu));
1202 u16 tlist;
1203
1204 tlist = gic_compute_target_list(&cpu, mask, cluster_id);
1205 gic_send_sgi(cluster_id, tlist, d->hwirq);
1206 }
1207
1208
1209 isb();
1210}
1211
1212static void __init gic_smp_init(void)
1213{
1214 struct irq_fwspec sgi_fwspec = {
1215 .fwnode = gic_data.fwnode,
1216 .param_count = 1,
1217 };
1218 int base_sgi;
1219
1220 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
1221 "irqchip/arm/gicv3:starting",
1222 gic_starting_cpu, NULL);
1223
1224
1225 base_sgi = __irq_domain_alloc_irqs(gic_data.domain, -1, 8,
1226 NUMA_NO_NODE, &sgi_fwspec,
1227 false, NULL);
1228 if (WARN_ON(base_sgi <= 0))
1229 return;
1230
1231 set_smp_ipi_range(base_sgi, 8);
1232}
1233
1234static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1235 bool force)
1236{
1237 unsigned int cpu;
1238 u32 offset, index;
1239 void __iomem *reg;
1240 int enabled;
1241 u64 val;
1242
1243 if (force)
1244 cpu = cpumask_first(mask_val);
1245 else
1246 cpu = cpumask_any_and(mask_val, cpu_online_mask);
1247
1248 if (cpu >= nr_cpu_ids)
1249 return -EINVAL;
1250
1251 if (gic_irq_in_rdist(d))
1252 return -EINVAL;
1253
1254
1255 enabled = gic_peek_irq(d, GICD_ISENABLER);
1256 if (enabled)
1257 gic_mask_irq(d);
1258
1259 offset = convert_offset_index(d, GICD_IROUTER, &index);
1260 reg = gic_dist_base(d) + offset + (index * 8);
1261 val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
1262
1263 gic_write_irouter(val, reg);
1264
1265
1266
1267
1268
1269 if (enabled)
1270 gic_unmask_irq(d);
1271 else
1272 gic_dist_wait_for_rwp();
1273
1274 irq_data_update_effective_affinity(d, cpumask_of(cpu));
1275
1276 return IRQ_SET_MASK_OK_DONE;
1277}
1278#else
1279#define gic_set_affinity NULL
1280#define gic_ipi_send_mask NULL
1281#define gic_smp_init() do { } while(0)
1282#endif
1283
1284static int gic_retrigger(struct irq_data *data)
1285{
1286 return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true);
1287}
1288
1289#ifdef CONFIG_CPU_PM
1290static int gic_cpu_pm_notifier(struct notifier_block *self,
1291 unsigned long cmd, void *v)
1292{
1293 if (cmd == CPU_PM_EXIT) {
1294 if (gic_dist_security_disabled())
1295 gic_enable_redist(true);
1296 gic_cpu_sys_reg_init();
1297 } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) {
1298 gic_write_grpen1(0);
1299 gic_enable_redist(false);
1300 }
1301 return NOTIFY_OK;
1302}
1303
1304static struct notifier_block gic_cpu_pm_notifier_block = {
1305 .notifier_call = gic_cpu_pm_notifier,
1306};
1307
1308static void gic_cpu_pm_init(void)
1309{
1310 cpu_pm_register_notifier(&gic_cpu_pm_notifier_block);
1311}
1312
1313#else
1314static inline void gic_cpu_pm_init(void) { }
1315#endif
1316
1317static struct irq_chip gic_chip = {
1318 .name = "GICv3",
1319 .irq_mask = gic_mask_irq,
1320 .irq_unmask = gic_unmask_irq,
1321 .irq_eoi = gic_eoi_irq,
1322 .irq_set_type = gic_set_type,
1323 .irq_set_affinity = gic_set_affinity,
1324 .irq_retrigger = gic_retrigger,
1325 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
1326 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
1327 .irq_nmi_setup = gic_irq_nmi_setup,
1328 .irq_nmi_teardown = gic_irq_nmi_teardown,
1329 .ipi_send_mask = gic_ipi_send_mask,
1330 .flags = IRQCHIP_SET_TYPE_MASKED |
1331 IRQCHIP_SKIP_SET_WAKE |
1332 IRQCHIP_MASK_ON_SUSPEND,
1333};
1334
1335static struct irq_chip gic_eoimode1_chip = {
1336 .name = "GICv3",
1337 .irq_mask = gic_eoimode1_mask_irq,
1338 .irq_unmask = gic_unmask_irq,
1339 .irq_eoi = gic_eoimode1_eoi_irq,
1340 .irq_set_type = gic_set_type,
1341 .irq_set_affinity = gic_set_affinity,
1342 .irq_retrigger = gic_retrigger,
1343 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
1344 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
1345 .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
1346 .irq_nmi_setup = gic_irq_nmi_setup,
1347 .irq_nmi_teardown = gic_irq_nmi_teardown,
1348 .ipi_send_mask = gic_ipi_send_mask,
1349 .flags = IRQCHIP_SET_TYPE_MASKED |
1350 IRQCHIP_SKIP_SET_WAKE |
1351 IRQCHIP_MASK_ON_SUSPEND,
1352};
1353
1354static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
1355 irq_hw_number_t hw)
1356{
1357 struct irq_chip *chip = &gic_chip;
1358 struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq));
1359
1360 if (static_branch_likely(&supports_deactivate_key))
1361 chip = &gic_eoimode1_chip;
1362
1363 switch (__get_intid_range(hw)) {
1364 case SGI_RANGE:
1365 case PPI_RANGE:
1366 case EPPI_RANGE:
1367 irq_set_percpu_devid(irq);
1368 irq_domain_set_info(d, irq, hw, chip, d->host_data,
1369 handle_percpu_devid_irq, NULL, NULL);
1370 break;
1371
1372 case SPI_RANGE:
1373 case ESPI_RANGE:
1374 irq_domain_set_info(d, irq, hw, chip, d->host_data,
1375 handle_fasteoi_irq, NULL, NULL);
1376 irq_set_probe(irq);
1377 irqd_set_single_target(irqd);
1378 break;
1379
1380 case LPI_RANGE:
1381 if (!gic_dist_supports_lpis())
1382 return -EPERM;
1383 irq_domain_set_info(d, irq, hw, chip, d->host_data,
1384 handle_fasteoi_irq, NULL, NULL);
1385 break;
1386
1387 default:
1388 return -EPERM;
1389 }
1390
1391
1392 irqd_set_handle_enforce_irqctx(irqd);
1393 return 0;
1394}
1395
1396static int gic_irq_domain_translate(struct irq_domain *d,
1397 struct irq_fwspec *fwspec,
1398 unsigned long *hwirq,
1399 unsigned int *type)
1400{
1401 if (fwspec->param_count == 1 && fwspec->param[0] < 16) {
1402 *hwirq = fwspec->param[0];
1403 *type = IRQ_TYPE_EDGE_RISING;
1404 return 0;
1405 }
1406
1407 if (is_of_node(fwspec->fwnode)) {
1408 if (fwspec->param_count < 3)
1409 return -EINVAL;
1410
1411 switch (fwspec->param[0]) {
1412 case 0:
1413 *hwirq = fwspec->param[1] + 32;
1414 break;
1415 case 1:
1416 *hwirq = fwspec->param[1] + 16;
1417 break;
1418 case 2:
1419 *hwirq = fwspec->param[1] + ESPI_BASE_INTID;
1420 break;
1421 case 3:
1422 *hwirq = fwspec->param[1] + EPPI_BASE_INTID;
1423 break;
1424 case GIC_IRQ_TYPE_LPI:
1425 *hwirq = fwspec->param[1];
1426 break;
1427 case GIC_IRQ_TYPE_PARTITION:
1428 *hwirq = fwspec->param[1];
1429 if (fwspec->param[1] >= 16)
1430 *hwirq += EPPI_BASE_INTID - 16;
1431 else
1432 *hwirq += 16;
1433 break;
1434 default:
1435 return -EINVAL;
1436 }
1437
1438 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
1439
1440
1441
1442
1443
1444 WARN_ON(*type == IRQ_TYPE_NONE &&
1445 fwspec->param[0] != GIC_IRQ_TYPE_PARTITION);
1446 return 0;
1447 }
1448
1449 if (is_fwnode_irqchip(fwspec->fwnode)) {
1450 if(fwspec->param_count != 2)
1451 return -EINVAL;
1452
1453 *hwirq = fwspec->param[0];
1454 *type = fwspec->param[1];
1455
1456 WARN_ON(*type == IRQ_TYPE_NONE);
1457 return 0;
1458 }
1459
1460 return -EINVAL;
1461}
1462
1463static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1464 unsigned int nr_irqs, void *arg)
1465{
1466 int i, ret;
1467 irq_hw_number_t hwirq;
1468 unsigned int type = IRQ_TYPE_NONE;
1469 struct irq_fwspec *fwspec = arg;
1470
1471 ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type);
1472 if (ret)
1473 return ret;
1474
1475 for (i = 0; i < nr_irqs; i++) {
1476 ret = gic_irq_domain_map(domain, virq + i, hwirq + i);
1477 if (ret)
1478 return ret;
1479 }
1480
1481 return 0;
1482}
1483
1484static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
1485 unsigned int nr_irqs)
1486{
1487 int i;
1488
1489 for (i = 0; i < nr_irqs; i++) {
1490 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
1491 irq_set_handler(virq + i, NULL);
1492 irq_domain_reset_irq_data(d);
1493 }
1494}
1495
1496static bool fwspec_is_partitioned_ppi(struct irq_fwspec *fwspec,
1497 irq_hw_number_t hwirq)
1498{
1499 enum gic_intid_range range;
1500
1501 if (!gic_data.ppi_descs)
1502 return false;
1503
1504 if (!is_of_node(fwspec->fwnode))
1505 return false;
1506
1507 if (fwspec->param_count < 4 || !fwspec->param[3])
1508 return false;
1509
1510 range = __get_intid_range(hwirq);
1511 if (range != PPI_RANGE && range != EPPI_RANGE)
1512 return false;
1513
1514 return true;
1515}
1516
1517static int gic_irq_domain_select(struct irq_domain *d,
1518 struct irq_fwspec *fwspec,
1519 enum irq_domain_bus_token bus_token)
1520{
1521 unsigned int type, ret, ppi_idx;
1522 irq_hw_number_t hwirq;
1523
1524
1525 if (fwspec->fwnode != d->fwnode)
1526 return 0;
1527
1528
1529 if (!is_of_node(fwspec->fwnode))
1530 return 1;
1531
1532 ret = gic_irq_domain_translate(d, fwspec, &hwirq, &type);
1533 if (WARN_ON_ONCE(ret))
1534 return 0;
1535
1536 if (!fwspec_is_partitioned_ppi(fwspec, hwirq))
1537 return d == gic_data.domain;
1538
1539
1540
1541
1542
1543 ppi_idx = __gic_get_ppi_index(hwirq);
1544 return d == partition_get_domain(gic_data.ppi_descs[ppi_idx]);
1545}
1546
1547static const struct irq_domain_ops gic_irq_domain_ops = {
1548 .translate = gic_irq_domain_translate,
1549 .alloc = gic_irq_domain_alloc,
1550 .free = gic_irq_domain_free,
1551 .select = gic_irq_domain_select,
1552};
1553
1554static int partition_domain_translate(struct irq_domain *d,
1555 struct irq_fwspec *fwspec,
1556 unsigned long *hwirq,
1557 unsigned int *type)
1558{
1559 unsigned long ppi_intid;
1560 struct device_node *np;
1561 unsigned int ppi_idx;
1562 int ret;
1563
1564 if (!gic_data.ppi_descs)
1565 return -ENOMEM;
1566
1567 np = of_find_node_by_phandle(fwspec->param[3]);
1568 if (WARN_ON(!np))
1569 return -EINVAL;
1570
1571 ret = gic_irq_domain_translate(d, fwspec, &ppi_intid, type);
1572 if (WARN_ON_ONCE(ret))
1573 return 0;
1574
1575 ppi_idx = __gic_get_ppi_index(ppi_intid);
1576 ret = partition_translate_id(gic_data.ppi_descs[ppi_idx],
1577 of_node_to_fwnode(np));
1578 if (ret < 0)
1579 return ret;
1580
1581 *hwirq = ret;
1582 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
1583
1584 return 0;
1585}
1586
1587static const struct irq_domain_ops partition_domain_ops = {
1588 .translate = partition_domain_translate,
1589 .select = gic_irq_domain_select,
1590};
1591
1592static bool gic_enable_quirk_msm8996(void *data)
1593{
1594 struct gic_chip_data *d = data;
1595
1596 d->flags |= FLAGS_WORKAROUND_GICR_WAKER_MSM8996;
1597
1598 return true;
1599}
1600
1601static bool gic_enable_quirk_cavium_38539(void *data)
1602{
1603 struct gic_chip_data *d = data;
1604
1605 d->flags |= FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539;
1606
1607 return true;
1608}
1609
1610static bool gic_enable_quirk_hip06_07(void *data)
1611{
1612 struct gic_chip_data *d = data;
1613
1614
1615
1616
1617
1618
1619
1620
1621 if (d->rdists.gicd_typer & GICD_TYPER_ESPI) {
1622
1623 d->rdists.gicd_typer &= ~GENMASK(9, 8);
1624 return true;
1625 }
1626
1627 return false;
1628}
1629
1630static const struct gic_quirk gic_quirks[] = {
1631 {
1632 .desc = "GICv3: Qualcomm MSM8996 broken firmware",
1633 .compatible = "qcom,msm8996-gic-v3",
1634 .init = gic_enable_quirk_msm8996,
1635 },
1636 {
1637 .desc = "GICv3: HIP06 erratum 161010803",
1638 .iidr = 0x0204043b,
1639 .mask = 0xffffffff,
1640 .init = gic_enable_quirk_hip06_07,
1641 },
1642 {
1643 .desc = "GICv3: HIP07 erratum 161010803",
1644 .iidr = 0x00000000,
1645 .mask = 0xffffffff,
1646 .init = gic_enable_quirk_hip06_07,
1647 },
1648 {
1649
1650
1651
1652
1653
1654
1655
1656 .desc = "GICv3: Cavium erratum 38539",
1657 .iidr = 0xa000034c,
1658 .mask = 0xe8f00fff,
1659 .init = gic_enable_quirk_cavium_38539,
1660 },
1661 {
1662 }
1663};
1664
1665static void gic_enable_nmi_support(void)
1666{
1667 int i;
1668
1669 if (!gic_prio_masking_enabled())
1670 return;
1671
1672 ppi_nmi_refs = kcalloc(gic_data.ppi_nr, sizeof(*ppi_nmi_refs), GFP_KERNEL);
1673 if (!ppi_nmi_refs)
1674 return;
1675
1676 for (i = 0; i < gic_data.ppi_nr; i++)
1677 refcount_set(&ppi_nmi_refs[i], 0);
1678
1679
1680
1681
1682
1683
1684 if (gic_read_ctlr() & ICC_CTLR_EL1_PMHE_MASK)
1685 static_branch_enable(&gic_pmr_sync);
1686
1687 pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n",
1688 static_branch_unlikely(&gic_pmr_sync) ? "forced" : "relaxed");
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717 if (gic_has_group0() && !gic_dist_security_disabled())
1718 static_branch_enable(&gic_nonsecure_priorities);
1719
1720 static_branch_enable(&supports_pseudo_nmis);
1721
1722 if (static_branch_likely(&supports_deactivate_key))
1723 gic_eoimode1_chip.flags |= IRQCHIP_SUPPORTS_NMI;
1724 else
1725 gic_chip.flags |= IRQCHIP_SUPPORTS_NMI;
1726}
1727
1728static int __init gic_init_bases(void __iomem *dist_base,
1729 struct redist_region *rdist_regs,
1730 u32 nr_redist_regions,
1731 u64 redist_stride,
1732 struct fwnode_handle *handle)
1733{
1734 u32 typer;
1735 int err;
1736
1737 if (!is_hyp_mode_available())
1738 static_branch_disable(&supports_deactivate_key);
1739
1740 if (static_branch_likely(&supports_deactivate_key))
1741 pr_info("GIC: Using split EOI/Deactivate mode\n");
1742
1743 gic_data.fwnode = handle;
1744 gic_data.dist_base = dist_base;
1745 gic_data.redist_regions = rdist_regs;
1746 gic_data.nr_redist_regions = nr_redist_regions;
1747 gic_data.redist_stride = redist_stride;
1748
1749
1750
1751
1752 typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
1753 gic_data.rdists.gicd_typer = typer;
1754
1755 gic_enable_quirks(readl_relaxed(gic_data.dist_base + GICD_IIDR),
1756 gic_quirks, &gic_data);
1757
1758 pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32);
1759 pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR);
1760
1761
1762
1763
1764
1765 if (!(gic_data.flags & FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539))
1766 gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2);
1767
1768 gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
1769 &gic_data);
1770 gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
1771 gic_data.rdists.has_rvpeid = true;
1772 gic_data.rdists.has_vlpis = true;
1773 gic_data.rdists.has_direct_lpi = true;
1774 gic_data.rdists.has_vpend_valid_dirty = true;
1775
1776 if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
1777 err = -ENOMEM;
1778 goto out_free;
1779 }
1780
1781 irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
1782
1783 gic_data.has_rss = !!(typer & GICD_TYPER_RSS);
1784 pr_info("Distributor has %sRange Selector support\n",
1785 gic_data.has_rss ? "" : "no ");
1786
1787 if (typer & GICD_TYPER_MBIS) {
1788 err = mbi_init(handle, gic_data.domain);
1789 if (err)
1790 pr_err("Failed to initialize MBIs\n");
1791 }
1792
1793 set_handle_irq(gic_handle_irq);
1794
1795 gic_update_rdist_properties();
1796
1797 gic_dist_init();
1798 gic_cpu_init();
1799 gic_smp_init();
1800 gic_cpu_pm_init();
1801
1802 if (gic_dist_supports_lpis()) {
1803 its_init(handle, &gic_data.rdists, gic_data.domain);
1804 its_cpu_init();
1805 } else {
1806 if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
1807 gicv2m_init(handle, gic_data.domain);
1808 }
1809
1810 gic_enable_nmi_support();
1811
1812 return 0;
1813
1814out_free:
1815 if (gic_data.domain)
1816 irq_domain_remove(gic_data.domain);
1817 free_percpu(gic_data.rdists.rdist);
1818 return err;
1819}
1820
1821static int __init gic_validate_dist_version(void __iomem *dist_base)
1822{
1823 u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
1824
1825 if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4)
1826 return -ENODEV;
1827
1828 return 0;
1829}
1830
1831
1832static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
1833{
1834 struct device_node *parts_node, *child_part;
1835 int part_idx = 0, i;
1836 int nr_parts;
1837 struct partition_affinity *parts;
1838
1839 parts_node = of_get_child_by_name(gic_node, "ppi-partitions");
1840 if (!parts_node)
1841 return;
1842
1843 gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL);
1844 if (!gic_data.ppi_descs)
1845 return;
1846
1847 nr_parts = of_get_child_count(parts_node);
1848
1849 if (!nr_parts)
1850 goto out_put_node;
1851
1852 parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL);
1853 if (WARN_ON(!parts))
1854 goto out_put_node;
1855
1856 for_each_child_of_node(parts_node, child_part) {
1857 struct partition_affinity *part;
1858 int n;
1859
1860 part = &parts[part_idx];
1861
1862 part->partition_id = of_node_to_fwnode(child_part);
1863
1864 pr_info("GIC: PPI partition %pOFn[%d] { ",
1865 child_part, part_idx);
1866
1867 n = of_property_count_elems_of_size(child_part, "affinity",
1868 sizeof(u32));
1869 WARN_ON(n <= 0);
1870
1871 for (i = 0; i < n; i++) {
1872 int err, cpu;
1873 u32 cpu_phandle;
1874 struct device_node *cpu_node;
1875
1876 err = of_property_read_u32_index(child_part, "affinity",
1877 i, &cpu_phandle);
1878 if (WARN_ON(err))
1879 continue;
1880
1881 cpu_node = of_find_node_by_phandle(cpu_phandle);
1882 if (WARN_ON(!cpu_node))
1883 continue;
1884
1885 cpu = of_cpu_node_to_id(cpu_node);
1886 if (WARN_ON(cpu < 0))
1887 continue;
1888
1889 pr_cont("%pOF[%d] ", cpu_node, cpu);
1890
1891 cpumask_set_cpu(cpu, &part->mask);
1892 }
1893
1894 pr_cont("}\n");
1895 part_idx++;
1896 }
1897
1898 for (i = 0; i < gic_data.ppi_nr; i++) {
1899 unsigned int irq;
1900 struct partition_desc *desc;
1901 struct irq_fwspec ppi_fwspec = {
1902 .fwnode = gic_data.fwnode,
1903 .param_count = 3,
1904 .param = {
1905 [0] = GIC_IRQ_TYPE_PARTITION,
1906 [1] = i,
1907 [2] = IRQ_TYPE_NONE,
1908 },
1909 };
1910
1911 irq = irq_create_fwspec_mapping(&ppi_fwspec);
1912 if (WARN_ON(!irq))
1913 continue;
1914 desc = partition_create_desc(gic_data.fwnode, parts, nr_parts,
1915 irq, &partition_domain_ops);
1916 if (WARN_ON(!desc))
1917 continue;
1918
1919 gic_data.ppi_descs[i] = desc;
1920 }
1921
1922out_put_node:
1923 of_node_put(parts_node);
1924}
1925
1926static void __init gic_of_setup_kvm_info(struct device_node *node)
1927{
1928 int ret;
1929 struct resource r;
1930 u32 gicv_idx;
1931
1932 gic_v3_kvm_info.type = GIC_V3;
1933
1934 gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
1935 if (!gic_v3_kvm_info.maint_irq)
1936 return;
1937
1938 if (of_property_read_u32(node, "#redistributor-regions",
1939 &gicv_idx))
1940 gicv_idx = 1;
1941
1942 gicv_idx += 3;
1943 ret = of_address_to_resource(node, gicv_idx, &r);
1944 if (!ret)
1945 gic_v3_kvm_info.vcpu = r;
1946
1947 gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
1948 gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
1949 vgic_set_kvm_info(&gic_v3_kvm_info);
1950}
1951
1952static int __init gic_of_init(struct device_node *node, struct device_node *parent)
1953{
1954 void __iomem *dist_base;
1955 struct redist_region *rdist_regs;
1956 u64 redist_stride;
1957 u32 nr_redist_regions;
1958 int err, i;
1959
1960 dist_base = of_iomap(node, 0);
1961 if (!dist_base) {
1962 pr_err("%pOF: unable to map gic dist registers\n", node);
1963 return -ENXIO;
1964 }
1965
1966 err = gic_validate_dist_version(dist_base);
1967 if (err) {
1968 pr_err("%pOF: no distributor detected, giving up\n", node);
1969 goto out_unmap_dist;
1970 }
1971
1972 if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions))
1973 nr_redist_regions = 1;
1974
1975 rdist_regs = kcalloc(nr_redist_regions, sizeof(*rdist_regs),
1976 GFP_KERNEL);
1977 if (!rdist_regs) {
1978 err = -ENOMEM;
1979 goto out_unmap_dist;
1980 }
1981
1982 for (i = 0; i < nr_redist_regions; i++) {
1983 struct resource res;
1984 int ret;
1985
1986 ret = of_address_to_resource(node, 1 + i, &res);
1987 rdist_regs[i].redist_base = of_iomap(node, 1 + i);
1988 if (ret || !rdist_regs[i].redist_base) {
1989 pr_err("%pOF: couldn't map region %d\n", node, i);
1990 err = -ENODEV;
1991 goto out_unmap_rdist;
1992 }
1993 rdist_regs[i].phys_base = res.start;
1994 }
1995
1996 if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
1997 redist_stride = 0;
1998
1999 gic_enable_of_quirks(node, gic_quirks, &gic_data);
2000
2001 err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions,
2002 redist_stride, &node->fwnode);
2003 if (err)
2004 goto out_unmap_rdist;
2005
2006 gic_populate_ppi_partitions(node);
2007
2008 if (static_branch_likely(&supports_deactivate_key))
2009 gic_of_setup_kvm_info(node);
2010 return 0;
2011
2012out_unmap_rdist:
2013 for (i = 0; i < nr_redist_regions; i++)
2014 if (rdist_regs[i].redist_base)
2015 iounmap(rdist_regs[i].redist_base);
2016 kfree(rdist_regs);
2017out_unmap_dist:
2018 iounmap(dist_base);
2019 return err;
2020}
2021
2022IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
2023
2024#ifdef CONFIG_ACPI
2025static struct
2026{
2027 void __iomem *dist_base;
2028 struct redist_region *redist_regs;
2029 u32 nr_redist_regions;
2030 bool single_redist;
2031 int enabled_rdists;
2032 u32 maint_irq;
2033 int maint_irq_mode;
2034 phys_addr_t vcpu_base;
2035} acpi_data __initdata;
2036
2037static void __init
2038gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base)
2039{
2040 static int count = 0;
2041
2042 acpi_data.redist_regs[count].phys_base = phys_base;
2043 acpi_data.redist_regs[count].redist_base = redist_base;
2044 acpi_data.redist_regs[count].single_redist = acpi_data.single_redist;
2045 count++;
2046}
2047
2048static int __init
2049gic_acpi_parse_madt_redist(union acpi_subtable_headers *header,
2050 const unsigned long end)
2051{
2052 struct acpi_madt_generic_redistributor *redist =
2053 (struct acpi_madt_generic_redistributor *)header;
2054 void __iomem *redist_base;
2055
2056 redist_base = ioremap(redist->base_address, redist->length);
2057 if (!redist_base) {
2058 pr_err("Couldn't map GICR region @%llx\n", redist->base_address);
2059 return -ENOMEM;
2060 }
2061
2062 gic_acpi_register_redist(redist->base_address, redist_base);
2063 return 0;
2064}
2065
2066static int __init
2067gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header,
2068 const unsigned long end)
2069{
2070 struct acpi_madt_generic_interrupt *gicc =
2071 (struct acpi_madt_generic_interrupt *)header;
2072 u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
2073 u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
2074 void __iomem *redist_base;
2075
2076
2077 if (!(gicc->flags & ACPI_MADT_ENABLED))
2078 return 0;
2079
2080 redist_base = ioremap(gicc->gicr_base_address, size);
2081 if (!redist_base)
2082 return -ENOMEM;
2083
2084 gic_acpi_register_redist(gicc->gicr_base_address, redist_base);
2085 return 0;
2086}
2087
2088static int __init gic_acpi_collect_gicr_base(void)
2089{
2090 acpi_tbl_entry_handler redist_parser;
2091 enum acpi_madt_type type;
2092
2093 if (acpi_data.single_redist) {
2094 type = ACPI_MADT_TYPE_GENERIC_INTERRUPT;
2095 redist_parser = gic_acpi_parse_madt_gicc;
2096 } else {
2097 type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR;
2098 redist_parser = gic_acpi_parse_madt_redist;
2099 }
2100
2101
2102 if (acpi_table_parse_madt(type, redist_parser, 0) > 0)
2103 return 0;
2104
2105 pr_info("No valid GICR entries exist\n");
2106 return -ENODEV;
2107}
2108
2109static int __init gic_acpi_match_gicr(union acpi_subtable_headers *header,
2110 const unsigned long end)
2111{
2112
2113 return 0;
2114}
2115
2116static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header,
2117 const unsigned long end)
2118{
2119 struct acpi_madt_generic_interrupt *gicc =
2120 (struct acpi_madt_generic_interrupt *)header;
2121
2122
2123
2124
2125
2126 if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) {
2127 acpi_data.enabled_rdists++;
2128 return 0;
2129 }
2130
2131
2132
2133
2134
2135 if (!(gicc->flags & ACPI_MADT_ENABLED))
2136 return 0;
2137
2138 return -ENODEV;
2139}
2140
2141static int __init gic_acpi_count_gicr_regions(void)
2142{
2143 int count;
2144
2145
2146
2147
2148
2149
2150 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
2151 gic_acpi_match_gicr, 0);
2152 if (count > 0) {
2153 acpi_data.single_redist = false;
2154 return count;
2155 }
2156
2157 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
2158 gic_acpi_match_gicc, 0);
2159 if (count > 0) {
2160 acpi_data.single_redist = true;
2161 count = acpi_data.enabled_rdists;
2162 }
2163
2164 return count;
2165}
2166
2167static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header,
2168 struct acpi_probe_entry *ape)
2169{
2170 struct acpi_madt_generic_distributor *dist;
2171 int count;
2172
2173 dist = (struct acpi_madt_generic_distributor *)header;
2174 if (dist->version != ape->driver_data)
2175 return false;
2176
2177
2178 count = gic_acpi_count_gicr_regions();
2179 if (count <= 0)
2180 return false;
2181
2182 acpi_data.nr_redist_regions = count;
2183 return true;
2184}
2185
2186static int __init gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers *header,
2187 const unsigned long end)
2188{
2189 struct acpi_madt_generic_interrupt *gicc =
2190 (struct acpi_madt_generic_interrupt *)header;
2191 int maint_irq_mode;
2192 static int first_madt = true;
2193
2194
2195 if (!(gicc->flags & ACPI_MADT_ENABLED))
2196 return 0;
2197
2198 maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ?
2199 ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE;
2200
2201 if (first_madt) {
2202 first_madt = false;
2203
2204 acpi_data.maint_irq = gicc->vgic_interrupt;
2205 acpi_data.maint_irq_mode = maint_irq_mode;
2206 acpi_data.vcpu_base = gicc->gicv_base_address;
2207
2208 return 0;
2209 }
2210
2211
2212
2213
2214 if ((acpi_data.maint_irq != gicc->vgic_interrupt) ||
2215 (acpi_data.maint_irq_mode != maint_irq_mode) ||
2216 (acpi_data.vcpu_base != gicc->gicv_base_address))
2217 return -EINVAL;
2218
2219 return 0;
2220}
2221
2222static bool __init gic_acpi_collect_virt_info(void)
2223{
2224 int count;
2225
2226 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
2227 gic_acpi_parse_virt_madt_gicc, 0);
2228
2229 return (count > 0);
2230}
2231
2232#define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K)
2233#define ACPI_GICV2_VCTRL_MEM_SIZE (SZ_4K)
2234#define ACPI_GICV2_VCPU_MEM_SIZE (SZ_8K)
2235
2236static void __init gic_acpi_setup_kvm_info(void)
2237{
2238 int irq;
2239
2240 if (!gic_acpi_collect_virt_info()) {
2241 pr_warn("Unable to get hardware information used for virtualization\n");
2242 return;
2243 }
2244
2245 gic_v3_kvm_info.type = GIC_V3;
2246
2247 irq = acpi_register_gsi(NULL, acpi_data.maint_irq,
2248 acpi_data.maint_irq_mode,
2249 ACPI_ACTIVE_HIGH);
2250 if (irq <= 0)
2251 return;
2252
2253 gic_v3_kvm_info.maint_irq = irq;
2254
2255 if (acpi_data.vcpu_base) {
2256 struct resource *vcpu = &gic_v3_kvm_info.vcpu;
2257
2258 vcpu->flags = IORESOURCE_MEM;
2259 vcpu->start = acpi_data.vcpu_base;
2260 vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
2261 }
2262
2263 gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
2264 gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
2265 vgic_set_kvm_info(&gic_v3_kvm_info);
2266}
2267
2268static int __init
2269gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end)
2270{
2271 struct acpi_madt_generic_distributor *dist;
2272 struct fwnode_handle *domain_handle;
2273 size_t size;
2274 int i, err;
2275
2276
2277 dist = (struct acpi_madt_generic_distributor *)header;
2278 acpi_data.dist_base = ioremap(dist->base_address,
2279 ACPI_GICV3_DIST_MEM_SIZE);
2280 if (!acpi_data.dist_base) {
2281 pr_err("Unable to map GICD registers\n");
2282 return -ENOMEM;
2283 }
2284
2285 err = gic_validate_dist_version(acpi_data.dist_base);
2286 if (err) {
2287 pr_err("No distributor detected at @%p, giving up\n",
2288 acpi_data.dist_base);
2289 goto out_dist_unmap;
2290 }
2291
2292 size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions;
2293 acpi_data.redist_regs = kzalloc(size, GFP_KERNEL);
2294 if (!acpi_data.redist_regs) {
2295 err = -ENOMEM;
2296 goto out_dist_unmap;
2297 }
2298
2299 err = gic_acpi_collect_gicr_base();
2300 if (err)
2301 goto out_redist_unmap;
2302
2303 domain_handle = irq_domain_alloc_fwnode(&dist->base_address);
2304 if (!domain_handle) {
2305 err = -ENOMEM;
2306 goto out_redist_unmap;
2307 }
2308
2309 err = gic_init_bases(acpi_data.dist_base, acpi_data.redist_regs,
2310 acpi_data.nr_redist_regions, 0, domain_handle);
2311 if (err)
2312 goto out_fwhandle_free;
2313
2314 acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
2315
2316 if (static_branch_likely(&supports_deactivate_key))
2317 gic_acpi_setup_kvm_info();
2318
2319 return 0;
2320
2321out_fwhandle_free:
2322 irq_domain_free_fwnode(domain_handle);
2323out_redist_unmap:
2324 for (i = 0; i < acpi_data.nr_redist_regions; i++)
2325 if (acpi_data.redist_regs[i].redist_base)
2326 iounmap(acpi_data.redist_regs[i].redist_base);
2327 kfree(acpi_data.redist_regs);
2328out_dist_unmap:
2329 iounmap(acpi_data.dist_base);
2330 return err;
2331}
2332IRQCHIP_ACPI_DECLARE(gic_v3, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2333 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3,
2334 gic_acpi_init);
2335IRQCHIP_ACPI_DECLARE(gic_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2336 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V4,
2337 gic_acpi_init);
2338IRQCHIP_ACPI_DECLARE(gic_v3_or_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2339 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_NONE,
2340 gic_acpi_init);
2341#endif
2342