1
2
3
4
5
6
7#define pr_fmt(fmt) "GICv3: " fmt
8
9#include <linux/acpi.h>
10#include <linux/cpu.h>
11#include <linux/cpu_pm.h>
12#include <linux/delay.h>
13#include <linux/interrupt.h>
14#include <linux/irqdomain.h>
15#include <linux/of.h>
16#include <linux/of_address.h>
17#include <linux/of_irq.h>
18#include <linux/percpu.h>
19#include <linux/refcount.h>
20#include <linux/slab.h>
21
22#include <linux/irqchip.h>
23#include <linux/irqchip/arm-gic-common.h>
24#include <linux/irqchip/arm-gic-v3.h>
25#include <linux/irqchip/irq-partition-percpu.h>
26
27#include <asm/cputype.h>
28#include <asm/exception.h>
29#include <asm/smp_plat.h>
30#include <asm/virt.h>
31
32#include "irq-gic-common.h"
33
34#define GICD_INT_NMI_PRI (GICD_INT_DEF_PRI & ~0x80)
35
36#define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0)
37#define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1)
38
39#define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1)
40
41struct redist_region {
42 void __iomem *redist_base;
43 phys_addr_t phys_base;
44 bool single_redist;
45};
46
47struct gic_chip_data {
48 struct fwnode_handle *fwnode;
49 void __iomem *dist_base;
50 struct redist_region *redist_regions;
51 struct rdists rdists;
52 struct irq_domain *domain;
53 u64 redist_stride;
54 u32 nr_redist_regions;
55 u64 flags;
56 bool has_rss;
57 unsigned int ppi_nr;
58 struct partition_desc **ppi_descs;
59};
60
61static struct gic_chip_data gic_data __read_mostly;
62static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
63
64#define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
65#define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
66#define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer)
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis);
90
91
92
93
94
95
96
97DEFINE_STATIC_KEY_FALSE(gic_pmr_sync);
98EXPORT_SYMBOL(gic_pmr_sync);
99
100DEFINE_STATIC_KEY_FALSE(gic_nonsecure_priorities);
101EXPORT_SYMBOL(gic_nonsecure_priorities);
102
103
104static refcount_t *ppi_nmi_refs;
105
106static struct gic_kvm_info gic_v3_kvm_info;
107static DEFINE_PER_CPU(bool, has_rss);
108
109#define MPIDR_RS(mpidr) (((mpidr) & 0xF0UL) >> 4)
110#define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist))
111#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
112#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
113
114
115#define DEFAULT_PMR_VALUE 0xf0
116
117enum gic_intid_range {
118 SGI_RANGE,
119 PPI_RANGE,
120 SPI_RANGE,
121 EPPI_RANGE,
122 ESPI_RANGE,
123 LPI_RANGE,
124 __INVALID_RANGE__
125};
126
127static enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq)
128{
129 switch (hwirq) {
130 case 0 ... 15:
131 return SGI_RANGE;
132 case 16 ... 31:
133 return PPI_RANGE;
134 case 32 ... 1019:
135 return SPI_RANGE;
136 case EPPI_BASE_INTID ... (EPPI_BASE_INTID + 63):
137 return EPPI_RANGE;
138 case ESPI_BASE_INTID ... (ESPI_BASE_INTID + 1023):
139 return ESPI_RANGE;
140 case 8192 ... GENMASK(23, 0):
141 return LPI_RANGE;
142 default:
143 return __INVALID_RANGE__;
144 }
145}
146
147static enum gic_intid_range get_intid_range(struct irq_data *d)
148{
149 return __get_intid_range(d->hwirq);
150}
151
152static inline unsigned int gic_irq(struct irq_data *d)
153{
154 return d->hwirq;
155}
156
157static inline bool gic_irq_in_rdist(struct irq_data *d)
158{
159 switch (get_intid_range(d)) {
160 case SGI_RANGE:
161 case PPI_RANGE:
162 case EPPI_RANGE:
163 return true;
164 default:
165 return false;
166 }
167}
168
169static inline void __iomem *gic_dist_base(struct irq_data *d)
170{
171 switch (get_intid_range(d)) {
172 case SGI_RANGE:
173 case PPI_RANGE:
174 case EPPI_RANGE:
175
176 return gic_data_rdist_sgi_base();
177
178 case SPI_RANGE:
179 case ESPI_RANGE:
180
181 return gic_data.dist_base;
182
183 default:
184 return NULL;
185 }
186}
187
188static void gic_do_wait_for_rwp(void __iomem *base)
189{
190 u32 count = 1000000;
191
192 while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) {
193 count--;
194 if (!count) {
195 pr_err_ratelimited("RWP timeout, gone fishing\n");
196 return;
197 }
198 cpu_relax();
199 udelay(1);
200 }
201}
202
203
204static void gic_dist_wait_for_rwp(void)
205{
206 gic_do_wait_for_rwp(gic_data.dist_base);
207}
208
209
210static void gic_redist_wait_for_rwp(void)
211{
212 gic_do_wait_for_rwp(gic_data_rdist_rd_base());
213}
214
215#ifdef CONFIG_ARM64
216
217static u64 __maybe_unused gic_read_iar(void)
218{
219 if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154))
220 return gic_read_iar_cavium_thunderx();
221 else
222 return gic_read_iar_common();
223}
224#endif
225
226static void gic_enable_redist(bool enable)
227{
228 void __iomem *rbase;
229 u32 count = 1000000;
230 u32 val;
231
232 if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996)
233 return;
234
235 rbase = gic_data_rdist_rd_base();
236
237 val = readl_relaxed(rbase + GICR_WAKER);
238 if (enable)
239
240 val &= ~GICR_WAKER_ProcessorSleep;
241 else
242 val |= GICR_WAKER_ProcessorSleep;
243 writel_relaxed(val, rbase + GICR_WAKER);
244
245 if (!enable) {
246 val = readl_relaxed(rbase + GICR_WAKER);
247 if (!(val & GICR_WAKER_ProcessorSleep))
248 return;
249 }
250
251 while (--count) {
252 val = readl_relaxed(rbase + GICR_WAKER);
253 if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
254 break;
255 cpu_relax();
256 udelay(1);
257 }
258 if (!count)
259 pr_err_ratelimited("redistributor failed to %s...\n",
260 enable ? "wakeup" : "sleep");
261}
262
263
264
265
266static u32 convert_offset_index(struct irq_data *d, u32 offset, u32 *index)
267{
268 switch (get_intid_range(d)) {
269 case SGI_RANGE:
270 case PPI_RANGE:
271 case SPI_RANGE:
272 *index = d->hwirq;
273 return offset;
274 case EPPI_RANGE:
275
276
277
278
279
280 *index = d->hwirq - EPPI_BASE_INTID + 32;
281 return offset;
282 case ESPI_RANGE:
283 *index = d->hwirq - ESPI_BASE_INTID;
284 switch (offset) {
285 case GICD_ISENABLER:
286 return GICD_ISENABLERnE;
287 case GICD_ICENABLER:
288 return GICD_ICENABLERnE;
289 case GICD_ISPENDR:
290 return GICD_ISPENDRnE;
291 case GICD_ICPENDR:
292 return GICD_ICPENDRnE;
293 case GICD_ISACTIVER:
294 return GICD_ISACTIVERnE;
295 case GICD_ICACTIVER:
296 return GICD_ICACTIVERnE;
297 case GICD_IPRIORITYR:
298 return GICD_IPRIORITYRnE;
299 case GICD_ICFGR:
300 return GICD_ICFGRnE;
301 case GICD_IROUTER:
302 return GICD_IROUTERnE;
303 default:
304 break;
305 }
306 break;
307 default:
308 break;
309 }
310
311 WARN_ON(1);
312 *index = d->hwirq;
313 return offset;
314}
315
316static int gic_peek_irq(struct irq_data *d, u32 offset)
317{
318 void __iomem *base;
319 u32 index, mask;
320
321 offset = convert_offset_index(d, offset, &index);
322 mask = 1 << (index % 32);
323
324 if (gic_irq_in_rdist(d))
325 base = gic_data_rdist_sgi_base();
326 else
327 base = gic_data.dist_base;
328
329 return !!(readl_relaxed(base + offset + (index / 32) * 4) & mask);
330}
331
332static void gic_poke_irq(struct irq_data *d, u32 offset)
333{
334 void (*rwp_wait)(void);
335 void __iomem *base;
336 u32 index, mask;
337
338 offset = convert_offset_index(d, offset, &index);
339 mask = 1 << (index % 32);
340
341 if (gic_irq_in_rdist(d)) {
342 base = gic_data_rdist_sgi_base();
343 rwp_wait = gic_redist_wait_for_rwp;
344 } else {
345 base = gic_data.dist_base;
346 rwp_wait = gic_dist_wait_for_rwp;
347 }
348
349 writel_relaxed(mask, base + offset + (index / 32) * 4);
350 rwp_wait();
351}
352
353static void gic_mask_irq(struct irq_data *d)
354{
355 gic_poke_irq(d, GICD_ICENABLER);
356}
357
358static void gic_eoimode1_mask_irq(struct irq_data *d)
359{
360 gic_mask_irq(d);
361
362
363
364
365
366
367
368
369 if (irqd_is_forwarded_to_vcpu(d))
370 gic_poke_irq(d, GICD_ICACTIVER);
371}
372
373static void gic_unmask_irq(struct irq_data *d)
374{
375 gic_poke_irq(d, GICD_ISENABLER);
376}
377
378static inline bool gic_supports_nmi(void)
379{
380 return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
381 static_branch_likely(&supports_pseudo_nmis);
382}
383
384static int gic_irq_set_irqchip_state(struct irq_data *d,
385 enum irqchip_irq_state which, bool val)
386{
387 u32 reg;
388
389 if (d->hwirq >= 8192)
390 return -EINVAL;
391
392 switch (which) {
393 case IRQCHIP_STATE_PENDING:
394 reg = val ? GICD_ISPENDR : GICD_ICPENDR;
395 break;
396
397 case IRQCHIP_STATE_ACTIVE:
398 reg = val ? GICD_ISACTIVER : GICD_ICACTIVER;
399 break;
400
401 case IRQCHIP_STATE_MASKED:
402 reg = val ? GICD_ICENABLER : GICD_ISENABLER;
403 break;
404
405 default:
406 return -EINVAL;
407 }
408
409 gic_poke_irq(d, reg);
410 return 0;
411}
412
413static int gic_irq_get_irqchip_state(struct irq_data *d,
414 enum irqchip_irq_state which, bool *val)
415{
416 if (d->hwirq >= 8192)
417 return -EINVAL;
418
419 switch (which) {
420 case IRQCHIP_STATE_PENDING:
421 *val = gic_peek_irq(d, GICD_ISPENDR);
422 break;
423
424 case IRQCHIP_STATE_ACTIVE:
425 *val = gic_peek_irq(d, GICD_ISACTIVER);
426 break;
427
428 case IRQCHIP_STATE_MASKED:
429 *val = !gic_peek_irq(d, GICD_ISENABLER);
430 break;
431
432 default:
433 return -EINVAL;
434 }
435
436 return 0;
437}
438
439static void gic_irq_set_prio(struct irq_data *d, u8 prio)
440{
441 void __iomem *base = gic_dist_base(d);
442 u32 offset, index;
443
444 offset = convert_offset_index(d, GICD_IPRIORITYR, &index);
445
446 writeb_relaxed(prio, base + offset + index);
447}
448
449static u32 gic_get_ppi_index(struct irq_data *d)
450{
451 switch (get_intid_range(d)) {
452 case PPI_RANGE:
453 return d->hwirq - 16;
454 case EPPI_RANGE:
455 return d->hwirq - EPPI_BASE_INTID + 16;
456 default:
457 unreachable();
458 }
459}
460
461static int gic_irq_nmi_setup(struct irq_data *d)
462{
463 struct irq_desc *desc = irq_to_desc(d->irq);
464
465 if (!gic_supports_nmi())
466 return -EINVAL;
467
468 if (gic_peek_irq(d, GICD_ISENABLER)) {
469 pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
470 return -EINVAL;
471 }
472
473
474
475
476
477 if (WARN_ON(gic_irq(d) >= 8192))
478 return -EINVAL;
479
480
481 if (gic_irq_in_rdist(d)) {
482 u32 idx = gic_get_ppi_index(d);
483
484
485 if (!refcount_inc_not_zero(&ppi_nmi_refs[idx])) {
486 refcount_set(&ppi_nmi_refs[idx], 1);
487 desc->handle_irq = handle_percpu_devid_fasteoi_nmi;
488 }
489 } else {
490 desc->handle_irq = handle_fasteoi_nmi;
491 }
492
493 gic_irq_set_prio(d, GICD_INT_NMI_PRI);
494
495 return 0;
496}
497
498static void gic_irq_nmi_teardown(struct irq_data *d)
499{
500 struct irq_desc *desc = irq_to_desc(d->irq);
501
502 if (WARN_ON(!gic_supports_nmi()))
503 return;
504
505 if (gic_peek_irq(d, GICD_ISENABLER)) {
506 pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
507 return;
508 }
509
510
511
512
513
514 if (WARN_ON(gic_irq(d) >= 8192))
515 return;
516
517
518 if (gic_irq_in_rdist(d)) {
519 u32 idx = gic_get_ppi_index(d);
520
521
522 if (refcount_dec_and_test(&ppi_nmi_refs[idx]))
523 desc->handle_irq = handle_percpu_devid_irq;
524 } else {
525 desc->handle_irq = handle_fasteoi_irq;
526 }
527
528 gic_irq_set_prio(d, GICD_INT_DEF_PRI);
529}
530
531static void gic_eoi_irq(struct irq_data *d)
532{
533 gic_write_eoir(gic_irq(d));
534}
535
536static void gic_eoimode1_eoi_irq(struct irq_data *d)
537{
538
539
540
541
542 if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d))
543 return;
544 gic_write_dir(gic_irq(d));
545}
546
547static int gic_set_type(struct irq_data *d, unsigned int type)
548{
549 enum gic_intid_range range;
550 unsigned int irq = gic_irq(d);
551 void (*rwp_wait)(void);
552 void __iomem *base;
553 u32 offset, index;
554 int ret;
555
556 range = get_intid_range(d);
557
558
559 if (range == SGI_RANGE)
560 return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0;
561
562
563 if ((range == SPI_RANGE || range == ESPI_RANGE) &&
564 type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
565 return -EINVAL;
566
567 if (gic_irq_in_rdist(d)) {
568 base = gic_data_rdist_sgi_base();
569 rwp_wait = gic_redist_wait_for_rwp;
570 } else {
571 base = gic_data.dist_base;
572 rwp_wait = gic_dist_wait_for_rwp;
573 }
574
575 offset = convert_offset_index(d, GICD_ICFGR, &index);
576
577 ret = gic_configure_irq(index, type, base + offset, rwp_wait);
578 if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) {
579
580 pr_warn("GIC: PPI INTID%d is secure or misconfigured\n", irq);
581 ret = 0;
582 }
583
584 return ret;
585}
586
587static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
588{
589 if (get_intid_range(d) == SGI_RANGE)
590 return -EINVAL;
591
592 if (vcpu)
593 irqd_set_forwarded_to_vcpu(d);
594 else
595 irqd_clr_forwarded_to_vcpu(d);
596 return 0;
597}
598
599static u64 gic_mpidr_to_affinity(unsigned long mpidr)
600{
601 u64 aff;
602
603 aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
604 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
605 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
606 MPIDR_AFFINITY_LEVEL(mpidr, 0));
607
608 return aff;
609}
610
611static void gic_deactivate_unhandled(u32 irqnr)
612{
613 if (static_branch_likely(&supports_deactivate_key)) {
614 if (irqnr < 8192)
615 gic_write_dir(irqnr);
616 } else {
617 gic_write_eoir(irqnr);
618 }
619}
620
621static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
622{
623 bool irqs_enabled = interrupts_enabled(regs);
624 int err;
625
626 if (irqs_enabled)
627 nmi_enter();
628
629 if (static_branch_likely(&supports_deactivate_key))
630 gic_write_eoir(irqnr);
631
632
633
634
635
636
637 err = handle_domain_nmi(gic_data.domain, irqnr, regs);
638 if (err)
639 gic_deactivate_unhandled(irqnr);
640
641 if (irqs_enabled)
642 nmi_exit();
643}
644
645static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
646{
647 u32 irqnr;
648
649 irqnr = gic_read_iar();
650
651 if (gic_supports_nmi() &&
652 unlikely(gic_read_rpr() == GICD_INT_NMI_PRI)) {
653 gic_handle_nmi(irqnr, regs);
654 return;
655 }
656
657 if (gic_prio_masking_enabled()) {
658 gic_pmr_mask_irqs();
659 gic_arch_enable_irqs();
660 }
661
662
663 if ((irqnr >= 1020 && irqnr <= 1023))
664 return;
665
666 if (static_branch_likely(&supports_deactivate_key))
667 gic_write_eoir(irqnr);
668 else
669 isb();
670
671 if (handle_domain_irq(gic_data.domain, irqnr, regs)) {
672 WARN_ONCE(true, "Unexpected interrupt received!\n");
673 gic_deactivate_unhandled(irqnr);
674 }
675}
676
677static u32 gic_get_pribits(void)
678{
679 u32 pribits;
680
681 pribits = gic_read_ctlr();
682 pribits &= ICC_CTLR_EL1_PRI_BITS_MASK;
683 pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT;
684 pribits++;
685
686 return pribits;
687}
688
689static bool gic_has_group0(void)
690{
691 u32 val;
692 u32 old_pmr;
693
694 old_pmr = gic_read_pmr();
695
696
697
698
699
700
701
702
703
704
705
706
707 gic_write_pmr(BIT(8 - gic_get_pribits()));
708 val = gic_read_pmr();
709
710 gic_write_pmr(old_pmr);
711
712 return val != 0;
713}
714
715static void __init gic_dist_init(void)
716{
717 unsigned int i;
718 u64 affinity;
719 void __iomem *base = gic_data.dist_base;
720 u32 val;
721
722
723 writel_relaxed(0, base + GICD_CTLR);
724 gic_dist_wait_for_rwp();
725
726
727
728
729
730
731
732 for (i = 32; i < GIC_LINE_NR; i += 32)
733 writel_relaxed(~0, base + GICD_IGROUPR + i / 8);
734
735
736 for (i = 0; i < GIC_ESPI_NR; i += 32) {
737 writel_relaxed(~0U, base + GICD_ICENABLERnE + i / 8);
738 writel_relaxed(~0U, base + GICD_ICACTIVERnE + i / 8);
739 }
740
741 for (i = 0; i < GIC_ESPI_NR; i += 32)
742 writel_relaxed(~0U, base + GICD_IGROUPRnE + i / 8);
743
744 for (i = 0; i < GIC_ESPI_NR; i += 16)
745 writel_relaxed(0, base + GICD_ICFGRnE + i / 4);
746
747 for (i = 0; i < GIC_ESPI_NR; i += 4)
748 writel_relaxed(GICD_INT_DEF_PRI_X4, base + GICD_IPRIORITYRnE + i);
749
750
751 gic_dist_config(base, GIC_LINE_NR, gic_dist_wait_for_rwp);
752
753 val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1;
754 if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) {
755 pr_info("Enabling SGIs without active state\n");
756 val |= GICD_CTLR_nASSGIreq;
757 }
758
759
760 writel_relaxed(val, base + GICD_CTLR);
761
762
763
764
765
766 affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
767 for (i = 32; i < GIC_LINE_NR; i++)
768 gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
769
770 for (i = 0; i < GIC_ESPI_NR; i++)
771 gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8);
772}
773
774static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))
775{
776 int ret = -ENODEV;
777 int i;
778
779 for (i = 0; i < gic_data.nr_redist_regions; i++) {
780 void __iomem *ptr = gic_data.redist_regions[i].redist_base;
781 u64 typer;
782 u32 reg;
783
784 reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
785 if (reg != GIC_PIDR2_ARCH_GICv3 &&
786 reg != GIC_PIDR2_ARCH_GICv4) {
787 pr_warn("No redistributor present @%p\n", ptr);
788 break;
789 }
790
791 do {
792 typer = gic_read_typer(ptr + GICR_TYPER);
793 ret = fn(gic_data.redist_regions + i, ptr);
794 if (!ret)
795 return 0;
796
797 if (gic_data.redist_regions[i].single_redist)
798 break;
799
800 if (gic_data.redist_stride) {
801 ptr += gic_data.redist_stride;
802 } else {
803 ptr += SZ_64K * 2;
804 if (typer & GICR_TYPER_VLPIS)
805 ptr += SZ_64K * 2;
806 }
807 } while (!(typer & GICR_TYPER_LAST));
808 }
809
810 return ret ? -ENODEV : 0;
811}
812
813static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
814{
815 unsigned long mpidr = cpu_logical_map(smp_processor_id());
816 u64 typer;
817 u32 aff;
818
819
820
821
822
823 aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
824 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
825 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
826 MPIDR_AFFINITY_LEVEL(mpidr, 0));
827
828 typer = gic_read_typer(ptr + GICR_TYPER);
829 if ((typer >> 32) == aff) {
830 u64 offset = ptr - region->redist_base;
831 raw_spin_lock_init(&gic_data_rdist()->rd_lock);
832 gic_data_rdist_rd_base() = ptr;
833 gic_data_rdist()->phys_base = region->phys_base + offset;
834
835 pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
836 smp_processor_id(), mpidr,
837 (int)(region - gic_data.redist_regions),
838 &gic_data_rdist()->phys_base);
839 return 0;
840 }
841
842
843 return 1;
844}
845
846static int gic_populate_rdist(void)
847{
848 if (gic_iterate_rdists(__gic_populate_rdist) == 0)
849 return 0;
850
851
852 WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n",
853 smp_processor_id(),
854 (unsigned long)cpu_logical_map(smp_processor_id()));
855 return -ENODEV;
856}
857
858static int __gic_update_rdist_properties(struct redist_region *region,
859 void __iomem *ptr)
860{
861 u64 typer = gic_read_typer(ptr + GICR_TYPER);
862
863 gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS);
864
865
866 gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID);
867 gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) |
868 gic_data.rdists.has_rvpeid);
869 gic_data.rdists.has_vpend_valid_dirty &= !!(typer & GICR_TYPER_DIRTY);
870
871
872 if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) {
873 gic_data.rdists.has_direct_lpi = false;
874 gic_data.rdists.has_vlpis = false;
875 gic_data.rdists.has_rvpeid = false;
876 }
877
878 gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr);
879
880 return 1;
881}
882
883static void gic_update_rdist_properties(void)
884{
885 gic_data.ppi_nr = UINT_MAX;
886 gic_iterate_rdists(__gic_update_rdist_properties);
887 if (WARN_ON(gic_data.ppi_nr == UINT_MAX))
888 gic_data.ppi_nr = 0;
889 pr_info("%d PPIs implemented\n", gic_data.ppi_nr);
890 if (gic_data.rdists.has_vlpis)
891 pr_info("GICv4 features: %s%s%s\n",
892 gic_data.rdists.has_direct_lpi ? "DirectLPI " : "",
893 gic_data.rdists.has_rvpeid ? "RVPEID " : "",
894 gic_data.rdists.has_vpend_valid_dirty ? "Valid+Dirty " : "");
895}
896
897
898static inline bool gic_dist_security_disabled(void)
899{
900 return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS;
901}
902
903static void gic_cpu_sys_reg_init(void)
904{
905 int i, cpu = smp_processor_id();
906 u64 mpidr = cpu_logical_map(cpu);
907 u64 need_rss = MPIDR_RS(mpidr);
908 bool group0;
909 u32 pribits;
910
911
912
913
914
915
916
917
918 if (!gic_enable_sre())
919 pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
920
921 pribits = gic_get_pribits();
922
923 group0 = gic_has_group0();
924
925
926 if (!gic_prio_masking_enabled()) {
927 write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1);
928 } else if (gic_supports_nmi()) {
929
930
931
932
933
934
935
936
937
938 if (static_branch_unlikely(&gic_nonsecure_priorities))
939 WARN_ON(!group0 || gic_dist_security_disabled());
940 else
941 WARN_ON(group0 && !gic_dist_security_disabled());
942 }
943
944
945
946
947
948
949
950 gic_write_bpr1(0);
951
952 if (static_branch_likely(&supports_deactivate_key)) {
953
954 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop);
955 } else {
956
957 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
958 }
959
960
961 if (group0) {
962 switch(pribits) {
963 case 8:
964 case 7:
965 write_gicreg(0, ICC_AP0R3_EL1);
966 write_gicreg(0, ICC_AP0R2_EL1);
967 fallthrough;
968 case 6:
969 write_gicreg(0, ICC_AP0R1_EL1);
970 fallthrough;
971 case 5:
972 case 4:
973 write_gicreg(0, ICC_AP0R0_EL1);
974 }
975
976 isb();
977 }
978
979 switch(pribits) {
980 case 8:
981 case 7:
982 write_gicreg(0, ICC_AP1R3_EL1);
983 write_gicreg(0, ICC_AP1R2_EL1);
984 fallthrough;
985 case 6:
986 write_gicreg(0, ICC_AP1R1_EL1);
987 fallthrough;
988 case 5:
989 case 4:
990 write_gicreg(0, ICC_AP1R0_EL1);
991 }
992
993 isb();
994
995
996 gic_write_grpen1(1);
997
998
999 per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS);
1000
1001
1002 for_each_online_cpu(i) {
1003 bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu);
1004
1005 need_rss |= MPIDR_RS(cpu_logical_map(i));
1006 if (need_rss && (!have_rss))
1007 pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n",
1008 cpu, (unsigned long)mpidr,
1009 i, (unsigned long)cpu_logical_map(i));
1010 }
1011
1012
1013
1014
1015
1016
1017
1018
1019 if (need_rss && (!gic_data.has_rss))
1020 pr_crit_once("RSS is required but GICD doesn't support it\n");
1021}
1022
1023static bool gicv3_nolpi;
1024
1025static int __init gicv3_nolpi_cfg(char *buf)
1026{
1027 return strtobool(buf, &gicv3_nolpi);
1028}
1029early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg);
1030
1031static int gic_dist_supports_lpis(void)
1032{
1033 return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) &&
1034 !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) &&
1035 !gicv3_nolpi);
1036}
1037
1038static void gic_cpu_init(void)
1039{
1040 void __iomem *rbase;
1041 int i;
1042
1043
1044 if (gic_populate_rdist())
1045 return;
1046
1047 gic_enable_redist(true);
1048
1049 WARN((gic_data.ppi_nr > 16 || GIC_ESPI_NR != 0) &&
1050 !(gic_read_ctlr() & ICC_CTLR_EL1_ExtRange),
1051 "Distributor has extended ranges, but CPU%d doesn't\n",
1052 smp_processor_id());
1053
1054 rbase = gic_data_rdist_sgi_base();
1055
1056
1057 for (i = 0; i < gic_data.ppi_nr + 16; i += 32)
1058 writel_relaxed(~0, rbase + GICR_IGROUPR0 + i / 8);
1059
1060 gic_cpu_config(rbase, gic_data.ppi_nr + 16, gic_redist_wait_for_rwp);
1061
1062
1063 gic_cpu_sys_reg_init();
1064}
1065
1066#ifdef CONFIG_SMP
1067
1068#define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT)
1069#define MPIDR_TO_SGI_CLUSTER_ID(mpidr) ((mpidr) & ~0xFUL)
1070
1071static int gic_starting_cpu(unsigned int cpu)
1072{
1073 gic_cpu_init();
1074
1075 if (gic_dist_supports_lpis())
1076 its_cpu_init();
1077
1078 return 0;
1079}
1080
1081static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
1082 unsigned long cluster_id)
1083{
1084 int next_cpu, cpu = *base_cpu;
1085 unsigned long mpidr = cpu_logical_map(cpu);
1086 u16 tlist = 0;
1087
1088 while (cpu < nr_cpu_ids) {
1089 tlist |= 1 << (mpidr & 0xf);
1090
1091 next_cpu = cpumask_next(cpu, mask);
1092 if (next_cpu >= nr_cpu_ids)
1093 goto out;
1094 cpu = next_cpu;
1095
1096 mpidr = cpu_logical_map(cpu);
1097
1098 if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) {
1099 cpu--;
1100 goto out;
1101 }
1102 }
1103out:
1104 *base_cpu = cpu;
1105 return tlist;
1106}
1107
1108#define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \
1109 (MPIDR_AFFINITY_LEVEL(cluster_id, level) \
1110 << ICC_SGI1R_AFFINITY_## level ##_SHIFT)
1111
1112static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
1113{
1114 u64 val;
1115
1116 val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) |
1117 MPIDR_TO_SGI_AFFINITY(cluster_id, 2) |
1118 irq << ICC_SGI1R_SGI_ID_SHIFT |
1119 MPIDR_TO_SGI_AFFINITY(cluster_id, 1) |
1120 MPIDR_TO_SGI_RS(cluster_id) |
1121 tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
1122
1123 pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
1124 gic_write_sgi1r(val);
1125}
1126
1127static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
1128{
1129 int cpu;
1130
1131 if (WARN_ON(d->hwirq >= 16))
1132 return;
1133
1134
1135
1136
1137
1138 wmb();
1139
1140 for_each_cpu(cpu, mask) {
1141 u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu));
1142 u16 tlist;
1143
1144 tlist = gic_compute_target_list(&cpu, mask, cluster_id);
1145 gic_send_sgi(cluster_id, tlist, d->hwirq);
1146 }
1147
1148
1149 isb();
1150}
1151
1152static void __init gic_smp_init(void)
1153{
1154 struct irq_fwspec sgi_fwspec = {
1155 .fwnode = gic_data.fwnode,
1156 .param_count = 1,
1157 };
1158 int base_sgi;
1159
1160 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
1161 "irqchip/arm/gicv3:starting",
1162 gic_starting_cpu, NULL);
1163
1164
1165 base_sgi = __irq_domain_alloc_irqs(gic_data.domain, -1, 8,
1166 NUMA_NO_NODE, &sgi_fwspec,
1167 false, NULL);
1168 if (WARN_ON(base_sgi <= 0))
1169 return;
1170
1171 set_smp_ipi_range(base_sgi, 8);
1172}
1173
1174static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1175 bool force)
1176{
1177 unsigned int cpu;
1178 u32 offset, index;
1179 void __iomem *reg;
1180 int enabled;
1181 u64 val;
1182
1183 if (force)
1184 cpu = cpumask_first(mask_val);
1185 else
1186 cpu = cpumask_any_and(mask_val, cpu_online_mask);
1187
1188 if (cpu >= nr_cpu_ids)
1189 return -EINVAL;
1190
1191 if (gic_irq_in_rdist(d))
1192 return -EINVAL;
1193
1194
1195 enabled = gic_peek_irq(d, GICD_ISENABLER);
1196 if (enabled)
1197 gic_mask_irq(d);
1198
1199 offset = convert_offset_index(d, GICD_IROUTER, &index);
1200 reg = gic_dist_base(d) + offset + (index * 8);
1201 val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
1202
1203 gic_write_irouter(val, reg);
1204
1205
1206
1207
1208
1209 if (enabled)
1210 gic_unmask_irq(d);
1211 else
1212 gic_dist_wait_for_rwp();
1213
1214 irq_data_update_effective_affinity(d, cpumask_of(cpu));
1215
1216 return IRQ_SET_MASK_OK_DONE;
1217}
1218#else
1219#define gic_set_affinity NULL
1220#define gic_ipi_send_mask NULL
1221#define gic_smp_init() do { } while(0)
1222#endif
1223
1224static int gic_retrigger(struct irq_data *data)
1225{
1226 return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true);
1227}
1228
1229#ifdef CONFIG_CPU_PM
1230static int gic_cpu_pm_notifier(struct notifier_block *self,
1231 unsigned long cmd, void *v)
1232{
1233 if (cmd == CPU_PM_EXIT) {
1234 if (gic_dist_security_disabled())
1235 gic_enable_redist(true);
1236 gic_cpu_sys_reg_init();
1237 } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) {
1238 gic_write_grpen1(0);
1239 gic_enable_redist(false);
1240 }
1241 return NOTIFY_OK;
1242}
1243
1244static struct notifier_block gic_cpu_pm_notifier_block = {
1245 .notifier_call = gic_cpu_pm_notifier,
1246};
1247
1248static void gic_cpu_pm_init(void)
1249{
1250 cpu_pm_register_notifier(&gic_cpu_pm_notifier_block);
1251}
1252
1253#else
1254static inline void gic_cpu_pm_init(void) { }
1255#endif
1256
1257static struct irq_chip gic_chip = {
1258 .name = "GICv3",
1259 .irq_mask = gic_mask_irq,
1260 .irq_unmask = gic_unmask_irq,
1261 .irq_eoi = gic_eoi_irq,
1262 .irq_set_type = gic_set_type,
1263 .irq_set_affinity = gic_set_affinity,
1264 .irq_retrigger = gic_retrigger,
1265 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
1266 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
1267 .irq_nmi_setup = gic_irq_nmi_setup,
1268 .irq_nmi_teardown = gic_irq_nmi_teardown,
1269 .ipi_send_mask = gic_ipi_send_mask,
1270 .flags = IRQCHIP_SET_TYPE_MASKED |
1271 IRQCHIP_SKIP_SET_WAKE |
1272 IRQCHIP_MASK_ON_SUSPEND,
1273};
1274
1275static struct irq_chip gic_eoimode1_chip = {
1276 .name = "GICv3",
1277 .irq_mask = gic_eoimode1_mask_irq,
1278 .irq_unmask = gic_unmask_irq,
1279 .irq_eoi = gic_eoimode1_eoi_irq,
1280 .irq_set_type = gic_set_type,
1281 .irq_set_affinity = gic_set_affinity,
1282 .irq_retrigger = gic_retrigger,
1283 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
1284 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
1285 .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
1286 .irq_nmi_setup = gic_irq_nmi_setup,
1287 .irq_nmi_teardown = gic_irq_nmi_teardown,
1288 .ipi_send_mask = gic_ipi_send_mask,
1289 .flags = IRQCHIP_SET_TYPE_MASKED |
1290 IRQCHIP_SKIP_SET_WAKE |
1291 IRQCHIP_MASK_ON_SUSPEND,
1292};
1293
1294static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
1295 irq_hw_number_t hw)
1296{
1297 struct irq_chip *chip = &gic_chip;
1298 struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq));
1299
1300 if (static_branch_likely(&supports_deactivate_key))
1301 chip = &gic_eoimode1_chip;
1302
1303 switch (__get_intid_range(hw)) {
1304 case SGI_RANGE:
1305 case PPI_RANGE:
1306 case EPPI_RANGE:
1307 irq_set_percpu_devid(irq);
1308 irq_domain_set_info(d, irq, hw, chip, d->host_data,
1309 handle_percpu_devid_irq, NULL, NULL);
1310 break;
1311
1312 case SPI_RANGE:
1313 case ESPI_RANGE:
1314 irq_domain_set_info(d, irq, hw, chip, d->host_data,
1315 handle_fasteoi_irq, NULL, NULL);
1316 irq_set_probe(irq);
1317 irqd_set_single_target(irqd);
1318 break;
1319
1320 case LPI_RANGE:
1321 if (!gic_dist_supports_lpis())
1322 return -EPERM;
1323 irq_domain_set_info(d, irq, hw, chip, d->host_data,
1324 handle_fasteoi_irq, NULL, NULL);
1325 break;
1326
1327 default:
1328 return -EPERM;
1329 }
1330
1331
1332 irqd_set_handle_enforce_irqctx(irqd);
1333 return 0;
1334}
1335
1336static int gic_irq_domain_translate(struct irq_domain *d,
1337 struct irq_fwspec *fwspec,
1338 unsigned long *hwirq,
1339 unsigned int *type)
1340{
1341 if (fwspec->param_count == 1 && fwspec->param[0] < 16) {
1342 *hwirq = fwspec->param[0];
1343 *type = IRQ_TYPE_EDGE_RISING;
1344 return 0;
1345 }
1346
1347 if (is_of_node(fwspec->fwnode)) {
1348 if (fwspec->param_count < 3)
1349 return -EINVAL;
1350
1351 switch (fwspec->param[0]) {
1352 case 0:
1353 *hwirq = fwspec->param[1] + 32;
1354 break;
1355 case 1:
1356 *hwirq = fwspec->param[1] + 16;
1357 break;
1358 case 2:
1359 *hwirq = fwspec->param[1] + ESPI_BASE_INTID;
1360 break;
1361 case 3:
1362 *hwirq = fwspec->param[1] + EPPI_BASE_INTID;
1363 break;
1364 case GIC_IRQ_TYPE_LPI:
1365 *hwirq = fwspec->param[1];
1366 break;
1367 case GIC_IRQ_TYPE_PARTITION:
1368 *hwirq = fwspec->param[1];
1369 if (fwspec->param[1] >= 16)
1370 *hwirq += EPPI_BASE_INTID - 16;
1371 else
1372 *hwirq += 16;
1373 break;
1374 default:
1375 return -EINVAL;
1376 }
1377
1378 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
1379
1380
1381
1382
1383
1384 WARN_ON(*type == IRQ_TYPE_NONE &&
1385 fwspec->param[0] != GIC_IRQ_TYPE_PARTITION);
1386 return 0;
1387 }
1388
1389 if (is_fwnode_irqchip(fwspec->fwnode)) {
1390 if(fwspec->param_count != 2)
1391 return -EINVAL;
1392
1393 *hwirq = fwspec->param[0];
1394 *type = fwspec->param[1];
1395
1396 WARN_ON(*type == IRQ_TYPE_NONE);
1397 return 0;
1398 }
1399
1400 return -EINVAL;
1401}
1402
1403static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1404 unsigned int nr_irqs, void *arg)
1405{
1406 int i, ret;
1407 irq_hw_number_t hwirq;
1408 unsigned int type = IRQ_TYPE_NONE;
1409 struct irq_fwspec *fwspec = arg;
1410
1411 ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type);
1412 if (ret)
1413 return ret;
1414
1415 for (i = 0; i < nr_irqs; i++) {
1416 ret = gic_irq_domain_map(domain, virq + i, hwirq + i);
1417 if (ret)
1418 return ret;
1419 }
1420
1421 return 0;
1422}
1423
1424static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
1425 unsigned int nr_irqs)
1426{
1427 int i;
1428
1429 for (i = 0; i < nr_irqs; i++) {
1430 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
1431 irq_set_handler(virq + i, NULL);
1432 irq_domain_reset_irq_data(d);
1433 }
1434}
1435
1436static int gic_irq_domain_select(struct irq_domain *d,
1437 struct irq_fwspec *fwspec,
1438 enum irq_domain_bus_token bus_token)
1439{
1440
1441 if (fwspec->fwnode != d->fwnode)
1442 return 0;
1443
1444
1445 if (!is_of_node(fwspec->fwnode))
1446 return 1;
1447
1448
1449
1450
1451
1452 if (fwspec->param_count >= 4 &&
1453 fwspec->param[0] == 1 && fwspec->param[3] != 0 &&
1454 gic_data.ppi_descs)
1455 return d == partition_get_domain(gic_data.ppi_descs[fwspec->param[1]]);
1456
1457 return d == gic_data.domain;
1458}
1459
1460static const struct irq_domain_ops gic_irq_domain_ops = {
1461 .translate = gic_irq_domain_translate,
1462 .alloc = gic_irq_domain_alloc,
1463 .free = gic_irq_domain_free,
1464 .select = gic_irq_domain_select,
1465};
1466
1467static int partition_domain_translate(struct irq_domain *d,
1468 struct irq_fwspec *fwspec,
1469 unsigned long *hwirq,
1470 unsigned int *type)
1471{
1472 struct device_node *np;
1473 int ret;
1474
1475 if (!gic_data.ppi_descs)
1476 return -ENOMEM;
1477
1478 np = of_find_node_by_phandle(fwspec->param[3]);
1479 if (WARN_ON(!np))
1480 return -EINVAL;
1481
1482 ret = partition_translate_id(gic_data.ppi_descs[fwspec->param[1]],
1483 of_node_to_fwnode(np));
1484 if (ret < 0)
1485 return ret;
1486
1487 *hwirq = ret;
1488 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
1489
1490 return 0;
1491}
1492
1493static const struct irq_domain_ops partition_domain_ops = {
1494 .translate = partition_domain_translate,
1495 .select = gic_irq_domain_select,
1496};
1497
1498static bool gic_enable_quirk_msm8996(void *data)
1499{
1500 struct gic_chip_data *d = data;
1501
1502 d->flags |= FLAGS_WORKAROUND_GICR_WAKER_MSM8996;
1503
1504 return true;
1505}
1506
1507static bool gic_enable_quirk_cavium_38539(void *data)
1508{
1509 struct gic_chip_data *d = data;
1510
1511 d->flags |= FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539;
1512
1513 return true;
1514}
1515
1516static bool gic_enable_quirk_hip06_07(void *data)
1517{
1518 struct gic_chip_data *d = data;
1519
1520
1521
1522
1523
1524
1525
1526
1527 if (d->rdists.gicd_typer & GICD_TYPER_ESPI) {
1528
1529 d->rdists.gicd_typer &= ~GENMASK(9, 8);
1530 return true;
1531 }
1532
1533 return false;
1534}
1535
1536static const struct gic_quirk gic_quirks[] = {
1537 {
1538 .desc = "GICv3: Qualcomm MSM8996 broken firmware",
1539 .compatible = "qcom,msm8996-gic-v3",
1540 .init = gic_enable_quirk_msm8996,
1541 },
1542 {
1543 .desc = "GICv3: HIP06 erratum 161010803",
1544 .iidr = 0x0204043b,
1545 .mask = 0xffffffff,
1546 .init = gic_enable_quirk_hip06_07,
1547 },
1548 {
1549 .desc = "GICv3: HIP07 erratum 161010803",
1550 .iidr = 0x00000000,
1551 .mask = 0xffffffff,
1552 .init = gic_enable_quirk_hip06_07,
1553 },
1554 {
1555
1556
1557
1558
1559
1560
1561
1562 .desc = "GICv3: Cavium erratum 38539",
1563 .iidr = 0xa000034c,
1564 .mask = 0xe8f00fff,
1565 .init = gic_enable_quirk_cavium_38539,
1566 },
1567 {
1568 }
1569};
1570
1571static void gic_enable_nmi_support(void)
1572{
1573 int i;
1574
1575 if (!gic_prio_masking_enabled())
1576 return;
1577
1578 ppi_nmi_refs = kcalloc(gic_data.ppi_nr, sizeof(*ppi_nmi_refs), GFP_KERNEL);
1579 if (!ppi_nmi_refs)
1580 return;
1581
1582 for (i = 0; i < gic_data.ppi_nr; i++)
1583 refcount_set(&ppi_nmi_refs[i], 0);
1584
1585
1586
1587
1588
1589
1590 if (gic_read_ctlr() & ICC_CTLR_EL1_PMHE_MASK)
1591 static_branch_enable(&gic_pmr_sync);
1592
1593 pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n",
1594 static_branch_unlikely(&gic_pmr_sync) ? "forced" : "relaxed");
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623 if (gic_has_group0() && !gic_dist_security_disabled())
1624 static_branch_enable(&gic_nonsecure_priorities);
1625
1626 static_branch_enable(&supports_pseudo_nmis);
1627
1628 if (static_branch_likely(&supports_deactivate_key))
1629 gic_eoimode1_chip.flags |= IRQCHIP_SUPPORTS_NMI;
1630 else
1631 gic_chip.flags |= IRQCHIP_SUPPORTS_NMI;
1632}
1633
1634static int __init gic_init_bases(void __iomem *dist_base,
1635 struct redist_region *rdist_regs,
1636 u32 nr_redist_regions,
1637 u64 redist_stride,
1638 struct fwnode_handle *handle)
1639{
1640 u32 typer;
1641 int err;
1642
1643 if (!is_hyp_mode_available())
1644 static_branch_disable(&supports_deactivate_key);
1645
1646 if (static_branch_likely(&supports_deactivate_key))
1647 pr_info("GIC: Using split EOI/Deactivate mode\n");
1648
1649 gic_data.fwnode = handle;
1650 gic_data.dist_base = dist_base;
1651 gic_data.redist_regions = rdist_regs;
1652 gic_data.nr_redist_regions = nr_redist_regions;
1653 gic_data.redist_stride = redist_stride;
1654
1655
1656
1657
1658 typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
1659 gic_data.rdists.gicd_typer = typer;
1660
1661 gic_enable_quirks(readl_relaxed(gic_data.dist_base + GICD_IIDR),
1662 gic_quirks, &gic_data);
1663
1664 pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32);
1665 pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR);
1666
1667
1668
1669
1670
1671 if (!(gic_data.flags & FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539))
1672 gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2);
1673
1674 gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
1675 &gic_data);
1676 gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
1677 gic_data.rdists.has_rvpeid = true;
1678 gic_data.rdists.has_vlpis = true;
1679 gic_data.rdists.has_direct_lpi = true;
1680 gic_data.rdists.has_vpend_valid_dirty = true;
1681
1682 if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
1683 err = -ENOMEM;
1684 goto out_free;
1685 }
1686
1687 irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
1688
1689 gic_data.has_rss = !!(typer & GICD_TYPER_RSS);
1690 pr_info("Distributor has %sRange Selector support\n",
1691 gic_data.has_rss ? "" : "no ");
1692
1693 if (typer & GICD_TYPER_MBIS) {
1694 err = mbi_init(handle, gic_data.domain);
1695 if (err)
1696 pr_err("Failed to initialize MBIs\n");
1697 }
1698
1699 set_handle_irq(gic_handle_irq);
1700
1701 gic_update_rdist_properties();
1702
1703 gic_dist_init();
1704 gic_cpu_init();
1705 gic_smp_init();
1706 gic_cpu_pm_init();
1707
1708 if (gic_dist_supports_lpis()) {
1709 its_init(handle, &gic_data.rdists, gic_data.domain);
1710 its_cpu_init();
1711 } else {
1712 if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
1713 gicv2m_init(handle, gic_data.domain);
1714 }
1715
1716 gic_enable_nmi_support();
1717
1718 return 0;
1719
1720out_free:
1721 if (gic_data.domain)
1722 irq_domain_remove(gic_data.domain);
1723 free_percpu(gic_data.rdists.rdist);
1724 return err;
1725}
1726
1727static int __init gic_validate_dist_version(void __iomem *dist_base)
1728{
1729 u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
1730
1731 if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4)
1732 return -ENODEV;
1733
1734 return 0;
1735}
1736
1737
1738static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
1739{
1740 struct device_node *parts_node, *child_part;
1741 int part_idx = 0, i;
1742 int nr_parts;
1743 struct partition_affinity *parts;
1744
1745 parts_node = of_get_child_by_name(gic_node, "ppi-partitions");
1746 if (!parts_node)
1747 return;
1748
1749 gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL);
1750 if (!gic_data.ppi_descs)
1751 return;
1752
1753 nr_parts = of_get_child_count(parts_node);
1754
1755 if (!nr_parts)
1756 goto out_put_node;
1757
1758 parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL);
1759 if (WARN_ON(!parts))
1760 goto out_put_node;
1761
1762 for_each_child_of_node(parts_node, child_part) {
1763 struct partition_affinity *part;
1764 int n;
1765
1766 part = &parts[part_idx];
1767
1768 part->partition_id = of_node_to_fwnode(child_part);
1769
1770 pr_info("GIC: PPI partition %pOFn[%d] { ",
1771 child_part, part_idx);
1772
1773 n = of_property_count_elems_of_size(child_part, "affinity",
1774 sizeof(u32));
1775 WARN_ON(n <= 0);
1776
1777 for (i = 0; i < n; i++) {
1778 int err, cpu;
1779 u32 cpu_phandle;
1780 struct device_node *cpu_node;
1781
1782 err = of_property_read_u32_index(child_part, "affinity",
1783 i, &cpu_phandle);
1784 if (WARN_ON(err))
1785 continue;
1786
1787 cpu_node = of_find_node_by_phandle(cpu_phandle);
1788 if (WARN_ON(!cpu_node))
1789 continue;
1790
1791 cpu = of_cpu_node_to_id(cpu_node);
1792 if (WARN_ON(cpu < 0))
1793 continue;
1794
1795 pr_cont("%pOF[%d] ", cpu_node, cpu);
1796
1797 cpumask_set_cpu(cpu, &part->mask);
1798 }
1799
1800 pr_cont("}\n");
1801 part_idx++;
1802 }
1803
1804 for (i = 0; i < gic_data.ppi_nr; i++) {
1805 unsigned int irq;
1806 struct partition_desc *desc;
1807 struct irq_fwspec ppi_fwspec = {
1808 .fwnode = gic_data.fwnode,
1809 .param_count = 3,
1810 .param = {
1811 [0] = GIC_IRQ_TYPE_PARTITION,
1812 [1] = i,
1813 [2] = IRQ_TYPE_NONE,
1814 },
1815 };
1816
1817 irq = irq_create_fwspec_mapping(&ppi_fwspec);
1818 if (WARN_ON(!irq))
1819 continue;
1820 desc = partition_create_desc(gic_data.fwnode, parts, nr_parts,
1821 irq, &partition_domain_ops);
1822 if (WARN_ON(!desc))
1823 continue;
1824
1825 gic_data.ppi_descs[i] = desc;
1826 }
1827
1828out_put_node:
1829 of_node_put(parts_node);
1830}
1831
1832static void __init gic_of_setup_kvm_info(struct device_node *node)
1833{
1834 int ret;
1835 struct resource r;
1836 u32 gicv_idx;
1837
1838 gic_v3_kvm_info.type = GIC_V3;
1839
1840 gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
1841 if (!gic_v3_kvm_info.maint_irq)
1842 return;
1843
1844 if (of_property_read_u32(node, "#redistributor-regions",
1845 &gicv_idx))
1846 gicv_idx = 1;
1847
1848 gicv_idx += 3;
1849 ret = of_address_to_resource(node, gicv_idx, &r);
1850 if (!ret)
1851 gic_v3_kvm_info.vcpu = r;
1852
1853 gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
1854 gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
1855 gic_set_kvm_info(&gic_v3_kvm_info);
1856}
1857
1858static int __init gic_of_init(struct device_node *node, struct device_node *parent)
1859{
1860 void __iomem *dist_base;
1861 struct redist_region *rdist_regs;
1862 u64 redist_stride;
1863 u32 nr_redist_regions;
1864 int err, i;
1865
1866 dist_base = of_iomap(node, 0);
1867 if (!dist_base) {
1868 pr_err("%pOF: unable to map gic dist registers\n", node);
1869 return -ENXIO;
1870 }
1871
1872 err = gic_validate_dist_version(dist_base);
1873 if (err) {
1874 pr_err("%pOF: no distributor detected, giving up\n", node);
1875 goto out_unmap_dist;
1876 }
1877
1878 if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions))
1879 nr_redist_regions = 1;
1880
1881 rdist_regs = kcalloc(nr_redist_regions, sizeof(*rdist_regs),
1882 GFP_KERNEL);
1883 if (!rdist_regs) {
1884 err = -ENOMEM;
1885 goto out_unmap_dist;
1886 }
1887
1888 for (i = 0; i < nr_redist_regions; i++) {
1889 struct resource res;
1890 int ret;
1891
1892 ret = of_address_to_resource(node, 1 + i, &res);
1893 rdist_regs[i].redist_base = of_iomap(node, 1 + i);
1894 if (ret || !rdist_regs[i].redist_base) {
1895 pr_err("%pOF: couldn't map region %d\n", node, i);
1896 err = -ENODEV;
1897 goto out_unmap_rdist;
1898 }
1899 rdist_regs[i].phys_base = res.start;
1900 }
1901
1902 if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
1903 redist_stride = 0;
1904
1905 gic_enable_of_quirks(node, gic_quirks, &gic_data);
1906
1907 err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions,
1908 redist_stride, &node->fwnode);
1909 if (err)
1910 goto out_unmap_rdist;
1911
1912 gic_populate_ppi_partitions(node);
1913
1914 if (static_branch_likely(&supports_deactivate_key))
1915 gic_of_setup_kvm_info(node);
1916 return 0;
1917
1918out_unmap_rdist:
1919 for (i = 0; i < nr_redist_regions; i++)
1920 if (rdist_regs[i].redist_base)
1921 iounmap(rdist_regs[i].redist_base);
1922 kfree(rdist_regs);
1923out_unmap_dist:
1924 iounmap(dist_base);
1925 return err;
1926}
1927
1928IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
1929
1930#ifdef CONFIG_ACPI
1931static struct
1932{
1933 void __iomem *dist_base;
1934 struct redist_region *redist_regs;
1935 u32 nr_redist_regions;
1936 bool single_redist;
1937 int enabled_rdists;
1938 u32 maint_irq;
1939 int maint_irq_mode;
1940 phys_addr_t vcpu_base;
1941} acpi_data __initdata;
1942
1943static void __init
1944gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base)
1945{
1946 static int count = 0;
1947
1948 acpi_data.redist_regs[count].phys_base = phys_base;
1949 acpi_data.redist_regs[count].redist_base = redist_base;
1950 acpi_data.redist_regs[count].single_redist = acpi_data.single_redist;
1951 count++;
1952}
1953
1954static int __init
1955gic_acpi_parse_madt_redist(union acpi_subtable_headers *header,
1956 const unsigned long end)
1957{
1958 struct acpi_madt_generic_redistributor *redist =
1959 (struct acpi_madt_generic_redistributor *)header;
1960 void __iomem *redist_base;
1961
1962 redist_base = ioremap(redist->base_address, redist->length);
1963 if (!redist_base) {
1964 pr_err("Couldn't map GICR region @%llx\n", redist->base_address);
1965 return -ENOMEM;
1966 }
1967
1968 gic_acpi_register_redist(redist->base_address, redist_base);
1969 return 0;
1970}
1971
1972static int __init
1973gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header,
1974 const unsigned long end)
1975{
1976 struct acpi_madt_generic_interrupt *gicc =
1977 (struct acpi_madt_generic_interrupt *)header;
1978 u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
1979 u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
1980 void __iomem *redist_base;
1981
1982
1983 if (!(gicc->flags & ACPI_MADT_ENABLED))
1984 return 0;
1985
1986 redist_base = ioremap(gicc->gicr_base_address, size);
1987 if (!redist_base)
1988 return -ENOMEM;
1989
1990 gic_acpi_register_redist(gicc->gicr_base_address, redist_base);
1991 return 0;
1992}
1993
1994static int __init gic_acpi_collect_gicr_base(void)
1995{
1996 acpi_tbl_entry_handler redist_parser;
1997 enum acpi_madt_type type;
1998
1999 if (acpi_data.single_redist) {
2000 type = ACPI_MADT_TYPE_GENERIC_INTERRUPT;
2001 redist_parser = gic_acpi_parse_madt_gicc;
2002 } else {
2003 type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR;
2004 redist_parser = gic_acpi_parse_madt_redist;
2005 }
2006
2007
2008 if (acpi_table_parse_madt(type, redist_parser, 0) > 0)
2009 return 0;
2010
2011 pr_info("No valid GICR entries exist\n");
2012 return -ENODEV;
2013}
2014
2015static int __init gic_acpi_match_gicr(union acpi_subtable_headers *header,
2016 const unsigned long end)
2017{
2018
2019 return 0;
2020}
2021
2022static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header,
2023 const unsigned long end)
2024{
2025 struct acpi_madt_generic_interrupt *gicc =
2026 (struct acpi_madt_generic_interrupt *)header;
2027
2028
2029
2030
2031
2032 if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) {
2033 acpi_data.enabled_rdists++;
2034 return 0;
2035 }
2036
2037
2038
2039
2040
2041 if (!(gicc->flags & ACPI_MADT_ENABLED))
2042 return 0;
2043
2044 return -ENODEV;
2045}
2046
2047static int __init gic_acpi_count_gicr_regions(void)
2048{
2049 int count;
2050
2051
2052
2053
2054
2055
2056 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
2057 gic_acpi_match_gicr, 0);
2058 if (count > 0) {
2059 acpi_data.single_redist = false;
2060 return count;
2061 }
2062
2063 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
2064 gic_acpi_match_gicc, 0);
2065 if (count > 0) {
2066 acpi_data.single_redist = true;
2067 count = acpi_data.enabled_rdists;
2068 }
2069
2070 return count;
2071}
2072
2073static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header,
2074 struct acpi_probe_entry *ape)
2075{
2076 struct acpi_madt_generic_distributor *dist;
2077 int count;
2078
2079 dist = (struct acpi_madt_generic_distributor *)header;
2080 if (dist->version != ape->driver_data)
2081 return false;
2082
2083
2084 count = gic_acpi_count_gicr_regions();
2085 if (count <= 0)
2086 return false;
2087
2088 acpi_data.nr_redist_regions = count;
2089 return true;
2090}
2091
2092static int __init gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers *header,
2093 const unsigned long end)
2094{
2095 struct acpi_madt_generic_interrupt *gicc =
2096 (struct acpi_madt_generic_interrupt *)header;
2097 int maint_irq_mode;
2098 static int first_madt = true;
2099
2100
2101 if (!(gicc->flags & ACPI_MADT_ENABLED))
2102 return 0;
2103
2104 maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ?
2105 ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE;
2106
2107 if (first_madt) {
2108 first_madt = false;
2109
2110 acpi_data.maint_irq = gicc->vgic_interrupt;
2111 acpi_data.maint_irq_mode = maint_irq_mode;
2112 acpi_data.vcpu_base = gicc->gicv_base_address;
2113
2114 return 0;
2115 }
2116
2117
2118
2119
2120 if ((acpi_data.maint_irq != gicc->vgic_interrupt) ||
2121 (acpi_data.maint_irq_mode != maint_irq_mode) ||
2122 (acpi_data.vcpu_base != gicc->gicv_base_address))
2123 return -EINVAL;
2124
2125 return 0;
2126}
2127
2128static bool __init gic_acpi_collect_virt_info(void)
2129{
2130 int count;
2131
2132 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
2133 gic_acpi_parse_virt_madt_gicc, 0);
2134
2135 return (count > 0);
2136}
2137
2138#define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K)
2139#define ACPI_GICV2_VCTRL_MEM_SIZE (SZ_4K)
2140#define ACPI_GICV2_VCPU_MEM_SIZE (SZ_8K)
2141
2142static void __init gic_acpi_setup_kvm_info(void)
2143{
2144 int irq;
2145
2146 if (!gic_acpi_collect_virt_info()) {
2147 pr_warn("Unable to get hardware information used for virtualization\n");
2148 return;
2149 }
2150
2151 gic_v3_kvm_info.type = GIC_V3;
2152
2153 irq = acpi_register_gsi(NULL, acpi_data.maint_irq,
2154 acpi_data.maint_irq_mode,
2155 ACPI_ACTIVE_HIGH);
2156 if (irq <= 0)
2157 return;
2158
2159 gic_v3_kvm_info.maint_irq = irq;
2160
2161 if (acpi_data.vcpu_base) {
2162 struct resource *vcpu = &gic_v3_kvm_info.vcpu;
2163
2164 vcpu->flags = IORESOURCE_MEM;
2165 vcpu->start = acpi_data.vcpu_base;
2166 vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
2167 }
2168
2169 gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
2170 gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
2171 gic_set_kvm_info(&gic_v3_kvm_info);
2172}
2173
2174static int __init
2175gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end)
2176{
2177 struct acpi_madt_generic_distributor *dist;
2178 struct fwnode_handle *domain_handle;
2179 size_t size;
2180 int i, err;
2181
2182
2183 dist = (struct acpi_madt_generic_distributor *)header;
2184 acpi_data.dist_base = ioremap(dist->base_address,
2185 ACPI_GICV3_DIST_MEM_SIZE);
2186 if (!acpi_data.dist_base) {
2187 pr_err("Unable to map GICD registers\n");
2188 return -ENOMEM;
2189 }
2190
2191 err = gic_validate_dist_version(acpi_data.dist_base);
2192 if (err) {
2193 pr_err("No distributor detected at @%p, giving up\n",
2194 acpi_data.dist_base);
2195 goto out_dist_unmap;
2196 }
2197
2198 size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions;
2199 acpi_data.redist_regs = kzalloc(size, GFP_KERNEL);
2200 if (!acpi_data.redist_regs) {
2201 err = -ENOMEM;
2202 goto out_dist_unmap;
2203 }
2204
2205 err = gic_acpi_collect_gicr_base();
2206 if (err)
2207 goto out_redist_unmap;
2208
2209 domain_handle = irq_domain_alloc_fwnode(&dist->base_address);
2210 if (!domain_handle) {
2211 err = -ENOMEM;
2212 goto out_redist_unmap;
2213 }
2214
2215 err = gic_init_bases(acpi_data.dist_base, acpi_data.redist_regs,
2216 acpi_data.nr_redist_regions, 0, domain_handle);
2217 if (err)
2218 goto out_fwhandle_free;
2219
2220 acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
2221
2222 if (static_branch_likely(&supports_deactivate_key))
2223 gic_acpi_setup_kvm_info();
2224
2225 return 0;
2226
2227out_fwhandle_free:
2228 irq_domain_free_fwnode(domain_handle);
2229out_redist_unmap:
2230 for (i = 0; i < acpi_data.nr_redist_regions; i++)
2231 if (acpi_data.redist_regs[i].redist_base)
2232 iounmap(acpi_data.redist_regs[i].redist_base);
2233 kfree(acpi_data.redist_regs);
2234out_dist_unmap:
2235 iounmap(acpi_data.dist_base);
2236 return err;
2237}
2238IRQCHIP_ACPI_DECLARE(gic_v3, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2239 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3,
2240 gic_acpi_init);
2241IRQCHIP_ACPI_DECLARE(gic_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2242 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V4,
2243 gic_acpi_init);
2244IRQCHIP_ACPI_DECLARE(gic_v3_or_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2245 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_NONE,
2246 gic_acpi_init);
2247#endif
2248