linux/drivers/irqchip/irq-gic-v3.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
   4 * Author: Marc Zyngier <marc.zyngier@arm.com>
   5 */
   6
   7#define pr_fmt(fmt)     "GICv3: " fmt
   8
   9#include <linux/acpi.h>
  10#include <linux/cpu.h>
  11#include <linux/cpu_pm.h>
  12#include <linux/delay.h>
  13#include <linux/interrupt.h>
  14#include <linux/irqdomain.h>
  15#include <linux/of.h>
  16#include <linux/of_address.h>
  17#include <linux/of_irq.h>
  18#include <linux/percpu.h>
  19#include <linux/refcount.h>
  20#include <linux/slab.h>
  21
  22#include <linux/irqchip.h>
  23#include <linux/irqchip/arm-gic-common.h>
  24#include <linux/irqchip/arm-gic-v3.h>
  25#include <linux/irqchip/irq-partition-percpu.h>
  26
  27#include <asm/cputype.h>
  28#include <asm/exception.h>
  29#include <asm/smp_plat.h>
  30#include <asm/virt.h>
  31
  32#include "irq-gic-common.h"
  33
  34#define GICD_INT_NMI_PRI        (GICD_INT_DEF_PRI & ~0x80)
  35
  36#define FLAGS_WORKAROUND_GICR_WAKER_MSM8996     (1ULL << 0)
  37
  38struct redist_region {
  39        void __iomem            *redist_base;
  40        phys_addr_t             phys_base;
  41        bool                    single_redist;
  42};
  43
  44struct gic_chip_data {
  45        struct fwnode_handle    *fwnode;
  46        void __iomem            *dist_base;
  47        struct redist_region    *redist_regions;
  48        struct rdists           rdists;
  49        struct irq_domain       *domain;
  50        u64                     redist_stride;
  51        u32                     nr_redist_regions;
  52        u64                     flags;
  53        bool                    has_rss;
  54        unsigned int            irq_nr;
  55        struct partition_desc   *ppi_descs[16];
  56};
  57
  58static struct gic_chip_data gic_data __read_mostly;
  59static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
  60
  61/*
  62 * The behaviours of RPR and PMR registers differ depending on the value of
  63 * SCR_EL3.FIQ, and the behaviour of non-secure priority registers of the
  64 * distributor and redistributors depends on whether security is enabled in the
  65 * GIC.
  66 *
  67 * When security is enabled, non-secure priority values from the (re)distributor
  68 * are presented to the GIC CPUIF as follow:
  69 *     (GIC_(R)DIST_PRI[irq] >> 1) | 0x80;
  70 *
  71 * If SCR_EL3.FIQ == 1, the values writen to/read from PMR and RPR at non-secure
  72 * EL1 are subject to a similar operation thus matching the priorities presented
  73 * from the (re)distributor when security is enabled.
  74 *
  75 * see GICv3/GICv4 Architecture Specification (IHI0069D):
  76 * - section 4.8.1 Non-secure accesses to register fields for Secure interrupt
  77 *   priorities.
  78 * - Figure 4-7 Secure read of the priority field for a Non-secure Group 1
  79 *   interrupt.
  80 *
  81 * For now, we only support pseudo-NMIs if we have non-secure view of
  82 * priorities.
  83 */
  84static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis);
  85
  86/* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */
  87static refcount_t ppi_nmi_refs[16];
  88
  89static struct gic_kvm_info gic_v3_kvm_info;
  90static DEFINE_PER_CPU(bool, has_rss);
  91
  92#define MPIDR_RS(mpidr)                 (((mpidr) & 0xF0UL) >> 4)
  93#define gic_data_rdist()                (this_cpu_ptr(gic_data.rdists.rdist))
  94#define gic_data_rdist_rd_base()        (gic_data_rdist()->rd_base)
  95#define gic_data_rdist_sgi_base()       (gic_data_rdist_rd_base() + SZ_64K)
  96
  97/* Our default, arbitrary priority value. Linux only uses one anyway. */
  98#define DEFAULT_PMR_VALUE       0xf0
  99
 100static inline unsigned int gic_irq(struct irq_data *d)
 101{
 102        return d->hwirq;
 103}
 104
 105static inline int gic_irq_in_rdist(struct irq_data *d)
 106{
 107        return gic_irq(d) < 32;
 108}
 109
 110static inline void __iomem *gic_dist_base(struct irq_data *d)
 111{
 112        if (gic_irq_in_rdist(d))        /* SGI+PPI -> SGI_base for this CPU */
 113                return gic_data_rdist_sgi_base();
 114
 115        if (d->hwirq <= 1023)           /* SPI -> dist_base */
 116                return gic_data.dist_base;
 117
 118        return NULL;
 119}
 120
 121static void gic_do_wait_for_rwp(void __iomem *base)
 122{
 123        u32 count = 1000000;    /* 1s! */
 124
 125        while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) {
 126                count--;
 127                if (!count) {
 128                        pr_err_ratelimited("RWP timeout, gone fishing\n");
 129                        return;
 130                }
 131                cpu_relax();
 132                udelay(1);
 133        };
 134}
 135
 136/* Wait for completion of a distributor change */
 137static void gic_dist_wait_for_rwp(void)
 138{
 139        gic_do_wait_for_rwp(gic_data.dist_base);
 140}
 141
 142/* Wait for completion of a redistributor change */
 143static void gic_redist_wait_for_rwp(void)
 144{
 145        gic_do_wait_for_rwp(gic_data_rdist_rd_base());
 146}
 147
 148#ifdef CONFIG_ARM64
 149
 150static u64 __maybe_unused gic_read_iar(void)
 151{
 152        if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154))
 153                return gic_read_iar_cavium_thunderx();
 154        else
 155                return gic_read_iar_common();
 156}
 157#endif
 158
 159static void gic_enable_redist(bool enable)
 160{
 161        void __iomem *rbase;
 162        u32 count = 1000000;    /* 1s! */
 163        u32 val;
 164
 165        if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996)
 166                return;
 167
 168        rbase = gic_data_rdist_rd_base();
 169
 170        val = readl_relaxed(rbase + GICR_WAKER);
 171        if (enable)
 172                /* Wake up this CPU redistributor */
 173                val &= ~GICR_WAKER_ProcessorSleep;
 174        else
 175                val |= GICR_WAKER_ProcessorSleep;
 176        writel_relaxed(val, rbase + GICR_WAKER);
 177
 178        if (!enable) {          /* Check that GICR_WAKER is writeable */
 179                val = readl_relaxed(rbase + GICR_WAKER);
 180                if (!(val & GICR_WAKER_ProcessorSleep))
 181                        return; /* No PM support in this redistributor */
 182        }
 183
 184        while (--count) {
 185                val = readl_relaxed(rbase + GICR_WAKER);
 186                if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
 187                        break;
 188                cpu_relax();
 189                udelay(1);
 190        };
 191        if (!count)
 192                pr_err_ratelimited("redistributor failed to %s...\n",
 193                                   enable ? "wakeup" : "sleep");
 194}
 195
 196/*
 197 * Routines to disable, enable, EOI and route interrupts
 198 */
 199static int gic_peek_irq(struct irq_data *d, u32 offset)
 200{
 201        u32 mask = 1 << (gic_irq(d) % 32);
 202        void __iomem *base;
 203
 204        if (gic_irq_in_rdist(d))
 205                base = gic_data_rdist_sgi_base();
 206        else
 207                base = gic_data.dist_base;
 208
 209        return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask);
 210}
 211
 212static void gic_poke_irq(struct irq_data *d, u32 offset)
 213{
 214        u32 mask = 1 << (gic_irq(d) % 32);
 215        void (*rwp_wait)(void);
 216        void __iomem *base;
 217
 218        if (gic_irq_in_rdist(d)) {
 219                base = gic_data_rdist_sgi_base();
 220                rwp_wait = gic_redist_wait_for_rwp;
 221        } else {
 222                base = gic_data.dist_base;
 223                rwp_wait = gic_dist_wait_for_rwp;
 224        }
 225
 226        writel_relaxed(mask, base + offset + (gic_irq(d) / 32) * 4);
 227        rwp_wait();
 228}
 229
 230static void gic_mask_irq(struct irq_data *d)
 231{
 232        gic_poke_irq(d, GICD_ICENABLER);
 233}
 234
 235static void gic_eoimode1_mask_irq(struct irq_data *d)
 236{
 237        gic_mask_irq(d);
 238        /*
 239         * When masking a forwarded interrupt, make sure it is
 240         * deactivated as well.
 241         *
 242         * This ensures that an interrupt that is getting
 243         * disabled/masked will not get "stuck", because there is
 244         * noone to deactivate it (guest is being terminated).
 245         */
 246        if (irqd_is_forwarded_to_vcpu(d))
 247                gic_poke_irq(d, GICD_ICACTIVER);
 248}
 249
 250static void gic_unmask_irq(struct irq_data *d)
 251{
 252        gic_poke_irq(d, GICD_ISENABLER);
 253}
 254
 255static inline bool gic_supports_nmi(void)
 256{
 257        return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
 258               static_branch_likely(&supports_pseudo_nmis);
 259}
 260
 261static int gic_irq_set_irqchip_state(struct irq_data *d,
 262                                     enum irqchip_irq_state which, bool val)
 263{
 264        u32 reg;
 265
 266        if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */
 267                return -EINVAL;
 268
 269        switch (which) {
 270        case IRQCHIP_STATE_PENDING:
 271                reg = val ? GICD_ISPENDR : GICD_ICPENDR;
 272                break;
 273
 274        case IRQCHIP_STATE_ACTIVE:
 275                reg = val ? GICD_ISACTIVER : GICD_ICACTIVER;
 276                break;
 277
 278        case IRQCHIP_STATE_MASKED:
 279                reg = val ? GICD_ICENABLER : GICD_ISENABLER;
 280                break;
 281
 282        default:
 283                return -EINVAL;
 284        }
 285
 286        gic_poke_irq(d, reg);
 287        return 0;
 288}
 289
 290static int gic_irq_get_irqchip_state(struct irq_data *d,
 291                                     enum irqchip_irq_state which, bool *val)
 292{
 293        if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */
 294                return -EINVAL;
 295
 296        switch (which) {
 297        case IRQCHIP_STATE_PENDING:
 298                *val = gic_peek_irq(d, GICD_ISPENDR);
 299                break;
 300
 301        case IRQCHIP_STATE_ACTIVE:
 302                *val = gic_peek_irq(d, GICD_ISACTIVER);
 303                break;
 304
 305        case IRQCHIP_STATE_MASKED:
 306                *val = !gic_peek_irq(d, GICD_ISENABLER);
 307                break;
 308
 309        default:
 310                return -EINVAL;
 311        }
 312
 313        return 0;
 314}
 315
 316static void gic_irq_set_prio(struct irq_data *d, u8 prio)
 317{
 318        void __iomem *base = gic_dist_base(d);
 319
 320        writeb_relaxed(prio, base + GICD_IPRIORITYR + gic_irq(d));
 321}
 322
 323static int gic_irq_nmi_setup(struct irq_data *d)
 324{
 325        struct irq_desc *desc = irq_to_desc(d->irq);
 326
 327        if (!gic_supports_nmi())
 328                return -EINVAL;
 329
 330        if (gic_peek_irq(d, GICD_ISENABLER)) {
 331                pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
 332                return -EINVAL;
 333        }
 334
 335        /*
 336         * A secondary irq_chip should be in charge of LPI request,
 337         * it should not be possible to get there
 338         */
 339        if (WARN_ON(gic_irq(d) >= 8192))
 340                return -EINVAL;
 341
 342        /* desc lock should already be held */
 343        if (gic_irq(d) < 32) {
 344                /* Setting up PPI as NMI, only switch handler for first NMI */
 345                if (!refcount_inc_not_zero(&ppi_nmi_refs[gic_irq(d) - 16])) {
 346                        refcount_set(&ppi_nmi_refs[gic_irq(d) - 16], 1);
 347                        desc->handle_irq = handle_percpu_devid_fasteoi_nmi;
 348                }
 349        } else {
 350                desc->handle_irq = handle_fasteoi_nmi;
 351        }
 352
 353        gic_irq_set_prio(d, GICD_INT_NMI_PRI);
 354
 355        return 0;
 356}
 357
 358static void gic_irq_nmi_teardown(struct irq_data *d)
 359{
 360        struct irq_desc *desc = irq_to_desc(d->irq);
 361
 362        if (WARN_ON(!gic_supports_nmi()))
 363                return;
 364
 365        if (gic_peek_irq(d, GICD_ISENABLER)) {
 366                pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
 367                return;
 368        }
 369
 370        /*
 371         * A secondary irq_chip should be in charge of LPI request,
 372         * it should not be possible to get there
 373         */
 374        if (WARN_ON(gic_irq(d) >= 8192))
 375                return;
 376
 377        /* desc lock should already be held */
 378        if (gic_irq(d) < 32) {
 379                /* Tearing down NMI, only switch handler for last NMI */
 380                if (refcount_dec_and_test(&ppi_nmi_refs[gic_irq(d) - 16]))
 381                        desc->handle_irq = handle_percpu_devid_irq;
 382        } else {
 383                desc->handle_irq = handle_fasteoi_irq;
 384        }
 385
 386        gic_irq_set_prio(d, GICD_INT_DEF_PRI);
 387}
 388
 389static void gic_eoi_irq(struct irq_data *d)
 390{
 391        gic_write_eoir(gic_irq(d));
 392}
 393
 394static void gic_eoimode1_eoi_irq(struct irq_data *d)
 395{
 396        /*
 397         * No need to deactivate an LPI, or an interrupt that
 398         * is is getting forwarded to a vcpu.
 399         */
 400        if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d))
 401                return;
 402        gic_write_dir(gic_irq(d));
 403}
 404
 405static int gic_set_type(struct irq_data *d, unsigned int type)
 406{
 407        unsigned int irq = gic_irq(d);
 408        void (*rwp_wait)(void);
 409        void __iomem *base;
 410
 411        /* Interrupt configuration for SGIs can't be changed */
 412        if (irq < 16)
 413                return -EINVAL;
 414
 415        /* SPIs have restrictions on the supported types */
 416        if (irq >= 32 && type != IRQ_TYPE_LEVEL_HIGH &&
 417                         type != IRQ_TYPE_EDGE_RISING)
 418                return -EINVAL;
 419
 420        if (gic_irq_in_rdist(d)) {
 421                base = gic_data_rdist_sgi_base();
 422                rwp_wait = gic_redist_wait_for_rwp;
 423        } else {
 424                base = gic_data.dist_base;
 425                rwp_wait = gic_dist_wait_for_rwp;
 426        }
 427
 428        return gic_configure_irq(irq, type, base, rwp_wait);
 429}
 430
 431static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
 432{
 433        if (vcpu)
 434                irqd_set_forwarded_to_vcpu(d);
 435        else
 436                irqd_clr_forwarded_to_vcpu(d);
 437        return 0;
 438}
 439
 440static u64 gic_mpidr_to_affinity(unsigned long mpidr)
 441{
 442        u64 aff;
 443
 444        aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
 445               MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
 446               MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8  |
 447               MPIDR_AFFINITY_LEVEL(mpidr, 0));
 448
 449        return aff;
 450}
 451
 452static void gic_deactivate_unhandled(u32 irqnr)
 453{
 454        if (static_branch_likely(&supports_deactivate_key)) {
 455                if (irqnr < 8192)
 456                        gic_write_dir(irqnr);
 457        } else {
 458                gic_write_eoir(irqnr);
 459        }
 460}
 461
 462static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
 463{
 464        bool irqs_enabled = interrupts_enabled(regs);
 465        int err;
 466
 467        if (irqs_enabled)
 468                nmi_enter();
 469
 470        if (static_branch_likely(&supports_deactivate_key))
 471                gic_write_eoir(irqnr);
 472        /*
 473         * Leave the PSR.I bit set to prevent other NMIs to be
 474         * received while handling this one.
 475         * PSR.I will be restored when we ERET to the
 476         * interrupted context.
 477         */
 478        err = handle_domain_nmi(gic_data.domain, irqnr, regs);
 479        if (err)
 480                gic_deactivate_unhandled(irqnr);
 481
 482        if (irqs_enabled)
 483                nmi_exit();
 484}
 485
 486static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
 487{
 488        u32 irqnr;
 489
 490        irqnr = gic_read_iar();
 491
 492        if (gic_supports_nmi() &&
 493            unlikely(gic_read_rpr() == GICD_INT_NMI_PRI)) {
 494                gic_handle_nmi(irqnr, regs);
 495                return;
 496        }
 497
 498        if (gic_prio_masking_enabled()) {
 499                gic_pmr_mask_irqs();
 500                gic_arch_enable_irqs();
 501        }
 502
 503        if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) {
 504                int err;
 505
 506                if (static_branch_likely(&supports_deactivate_key))
 507                        gic_write_eoir(irqnr);
 508                else
 509                        isb();
 510
 511                err = handle_domain_irq(gic_data.domain, irqnr, regs);
 512                if (err) {
 513                        WARN_ONCE(true, "Unexpected interrupt received!\n");
 514                        gic_deactivate_unhandled(irqnr);
 515                }
 516                return;
 517        }
 518        if (irqnr < 16) {
 519                gic_write_eoir(irqnr);
 520                if (static_branch_likely(&supports_deactivate_key))
 521                        gic_write_dir(irqnr);
 522#ifdef CONFIG_SMP
 523                /*
 524                 * Unlike GICv2, we don't need an smp_rmb() here.
 525                 * The control dependency from gic_read_iar to
 526                 * the ISB in gic_write_eoir is enough to ensure
 527                 * that any shared data read by handle_IPI will
 528                 * be read after the ACK.
 529                 */
 530                handle_IPI(irqnr, regs);
 531#else
 532                WARN_ONCE(true, "Unexpected SGI received!\n");
 533#endif
 534        }
 535}
 536
 537static u32 gic_get_pribits(void)
 538{
 539        u32 pribits;
 540
 541        pribits = gic_read_ctlr();
 542        pribits &= ICC_CTLR_EL1_PRI_BITS_MASK;
 543        pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT;
 544        pribits++;
 545
 546        return pribits;
 547}
 548
 549static bool gic_has_group0(void)
 550{
 551        u32 val;
 552        u32 old_pmr;
 553
 554        old_pmr = gic_read_pmr();
 555
 556        /*
 557         * Let's find out if Group0 is under control of EL3 or not by
 558         * setting the highest possible, non-zero priority in PMR.
 559         *
 560         * If SCR_EL3.FIQ is set, the priority gets shifted down in
 561         * order for the CPU interface to set bit 7, and keep the
 562         * actual priority in the non-secure range. In the process, it
 563         * looses the least significant bit and the actual priority
 564         * becomes 0x80. Reading it back returns 0, indicating that
 565         * we're don't have access to Group0.
 566         */
 567        gic_write_pmr(BIT(8 - gic_get_pribits()));
 568        val = gic_read_pmr();
 569
 570        gic_write_pmr(old_pmr);
 571
 572        return val != 0;
 573}
 574
 575static void __init gic_dist_init(void)
 576{
 577        unsigned int i;
 578        u64 affinity;
 579        void __iomem *base = gic_data.dist_base;
 580
 581        /* Disable the distributor */
 582        writel_relaxed(0, base + GICD_CTLR);
 583        gic_dist_wait_for_rwp();
 584
 585        /*
 586         * Configure SPIs as non-secure Group-1. This will only matter
 587         * if the GIC only has a single security state. This will not
 588         * do the right thing if the kernel is running in secure mode,
 589         * but that's not the intended use case anyway.
 590         */
 591        for (i = 32; i < gic_data.irq_nr; i += 32)
 592                writel_relaxed(~0, base + GICD_IGROUPR + i / 8);
 593
 594        gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp);
 595
 596        /* Enable distributor with ARE, Group1 */
 597        writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1,
 598                       base + GICD_CTLR);
 599
 600        /*
 601         * Set all global interrupts to the boot CPU only. ARE must be
 602         * enabled.
 603         */
 604        affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
 605        for (i = 32; i < gic_data.irq_nr; i++)
 606                gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
 607}
 608
 609static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))
 610{
 611        int ret = -ENODEV;
 612        int i;
 613
 614        for (i = 0; i < gic_data.nr_redist_regions; i++) {
 615                void __iomem *ptr = gic_data.redist_regions[i].redist_base;
 616                u64 typer;
 617                u32 reg;
 618
 619                reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
 620                if (reg != GIC_PIDR2_ARCH_GICv3 &&
 621                    reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */
 622                        pr_warn("No redistributor present @%p\n", ptr);
 623                        break;
 624                }
 625
 626                do {
 627                        typer = gic_read_typer(ptr + GICR_TYPER);
 628                        ret = fn(gic_data.redist_regions + i, ptr);
 629                        if (!ret)
 630                                return 0;
 631
 632                        if (gic_data.redist_regions[i].single_redist)
 633                                break;
 634
 635                        if (gic_data.redist_stride) {
 636                                ptr += gic_data.redist_stride;
 637                        } else {
 638                                ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
 639                                if (typer & GICR_TYPER_VLPIS)
 640                                        ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */
 641                        }
 642                } while (!(typer & GICR_TYPER_LAST));
 643        }
 644
 645        return ret ? -ENODEV : 0;
 646}
 647
 648static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
 649{
 650        unsigned long mpidr = cpu_logical_map(smp_processor_id());
 651        u64 typer;
 652        u32 aff;
 653
 654        /*
 655         * Convert affinity to a 32bit value that can be matched to
 656         * GICR_TYPER bits [63:32].
 657         */
 658        aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
 659               MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
 660               MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
 661               MPIDR_AFFINITY_LEVEL(mpidr, 0));
 662
 663        typer = gic_read_typer(ptr + GICR_TYPER);
 664        if ((typer >> 32) == aff) {
 665                u64 offset = ptr - region->redist_base;
 666                gic_data_rdist_rd_base() = ptr;
 667                gic_data_rdist()->phys_base = region->phys_base + offset;
 668
 669                pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
 670                        smp_processor_id(), mpidr,
 671                        (int)(region - gic_data.redist_regions),
 672                        &gic_data_rdist()->phys_base);
 673                return 0;
 674        }
 675
 676        /* Try next one */
 677        return 1;
 678}
 679
 680static int gic_populate_rdist(void)
 681{
 682        if (gic_iterate_rdists(__gic_populate_rdist) == 0)
 683                return 0;
 684
 685        /* We couldn't even deal with ourselves... */
 686        WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n",
 687             smp_processor_id(),
 688             (unsigned long)cpu_logical_map(smp_processor_id()));
 689        return -ENODEV;
 690}
 691
 692static int __gic_update_vlpi_properties(struct redist_region *region,
 693                                        void __iomem *ptr)
 694{
 695        u64 typer = gic_read_typer(ptr + GICR_TYPER);
 696        gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS);
 697        gic_data.rdists.has_direct_lpi &= !!(typer & GICR_TYPER_DirectLPIS);
 698
 699        return 1;
 700}
 701
 702static void gic_update_vlpi_properties(void)
 703{
 704        gic_iterate_rdists(__gic_update_vlpi_properties);
 705        pr_info("%sVLPI support, %sdirect LPI support\n",
 706                !gic_data.rdists.has_vlpis ? "no " : "",
 707                !gic_data.rdists.has_direct_lpi ? "no " : "");
 708}
 709
 710/* Check whether it's single security state view */
 711static inline bool gic_dist_security_disabled(void)
 712{
 713        return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS;
 714}
 715
 716static void gic_cpu_sys_reg_init(void)
 717{
 718        int i, cpu = smp_processor_id();
 719        u64 mpidr = cpu_logical_map(cpu);
 720        u64 need_rss = MPIDR_RS(mpidr);
 721        bool group0;
 722        u32 pribits;
 723
 724        /*
 725         * Need to check that the SRE bit has actually been set. If
 726         * not, it means that SRE is disabled at EL2. We're going to
 727         * die painfully, and there is nothing we can do about it.
 728         *
 729         * Kindly inform the luser.
 730         */
 731        if (!gic_enable_sre())
 732                pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
 733
 734        pribits = gic_get_pribits();
 735
 736        group0 = gic_has_group0();
 737
 738        /* Set priority mask register */
 739        if (!gic_prio_masking_enabled()) {
 740                write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1);
 741        } else {
 742                /*
 743                 * Mismatch configuration with boot CPU, the system is likely
 744                 * to die as interrupt masking will not work properly on all
 745                 * CPUs
 746                 */
 747                WARN_ON(gic_supports_nmi() && group0 &&
 748                        !gic_dist_security_disabled());
 749        }
 750
 751        /*
 752         * Some firmwares hand over to the kernel with the BPR changed from
 753         * its reset value (and with a value large enough to prevent
 754         * any pre-emptive interrupts from working at all). Writing a zero
 755         * to BPR restores is reset value.
 756         */
 757        gic_write_bpr1(0);
 758
 759        if (static_branch_likely(&supports_deactivate_key)) {
 760                /* EOI drops priority only (mode 1) */
 761                gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop);
 762        } else {
 763                /* EOI deactivates interrupt too (mode 0) */
 764                gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
 765        }
 766
 767        /* Always whack Group0 before Group1 */
 768        if (group0) {
 769                switch(pribits) {
 770                case 8:
 771                case 7:
 772                        write_gicreg(0, ICC_AP0R3_EL1);
 773                        write_gicreg(0, ICC_AP0R2_EL1);
 774                /* Fall through */
 775                case 6:
 776                        write_gicreg(0, ICC_AP0R1_EL1);
 777                /* Fall through */
 778                case 5:
 779                case 4:
 780                        write_gicreg(0, ICC_AP0R0_EL1);
 781                }
 782
 783                isb();
 784        }
 785
 786        switch(pribits) {
 787        case 8:
 788        case 7:
 789                write_gicreg(0, ICC_AP1R3_EL1);
 790                write_gicreg(0, ICC_AP1R2_EL1);
 791                /* Fall through */
 792        case 6:
 793                write_gicreg(0, ICC_AP1R1_EL1);
 794                /* Fall through */
 795        case 5:
 796        case 4:
 797                write_gicreg(0, ICC_AP1R0_EL1);
 798        }
 799
 800        isb();
 801
 802        /* ... and let's hit the road... */
 803        gic_write_grpen1(1);
 804
 805        /* Keep the RSS capability status in per_cpu variable */
 806        per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS);
 807
 808        /* Check all the CPUs have capable of sending SGIs to other CPUs */
 809        for_each_online_cpu(i) {
 810                bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu);
 811
 812                need_rss |= MPIDR_RS(cpu_logical_map(i));
 813                if (need_rss && (!have_rss))
 814                        pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n",
 815                                cpu, (unsigned long)mpidr,
 816                                i, (unsigned long)cpu_logical_map(i));
 817        }
 818
 819        /**
 820         * GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0,
 821         * writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED
 822         * UNPREDICTABLE choice of :
 823         *   - The write is ignored.
 824         *   - The RS field is treated as 0.
 825         */
 826        if (need_rss && (!gic_data.has_rss))
 827                pr_crit_once("RSS is required but GICD doesn't support it\n");
 828}
 829
 830static bool gicv3_nolpi;
 831
 832static int __init gicv3_nolpi_cfg(char *buf)
 833{
 834        return strtobool(buf, &gicv3_nolpi);
 835}
 836early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg);
 837
 838static int gic_dist_supports_lpis(void)
 839{
 840        return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) &&
 841                !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) &&
 842                !gicv3_nolpi);
 843}
 844
 845static void gic_cpu_init(void)
 846{
 847        void __iomem *rbase;
 848
 849        /* Register ourselves with the rest of the world */
 850        if (gic_populate_rdist())
 851                return;
 852
 853        gic_enable_redist(true);
 854
 855        rbase = gic_data_rdist_sgi_base();
 856
 857        /* Configure SGIs/PPIs as non-secure Group-1 */
 858        writel_relaxed(~0, rbase + GICR_IGROUPR0);
 859
 860        gic_cpu_config(rbase, gic_redist_wait_for_rwp);
 861
 862        /* initialise system registers */
 863        gic_cpu_sys_reg_init();
 864}
 865
 866#ifdef CONFIG_SMP
 867
 868#define MPIDR_TO_SGI_RS(mpidr)  (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT)
 869#define MPIDR_TO_SGI_CLUSTER_ID(mpidr)  ((mpidr) & ~0xFUL)
 870
 871static int gic_starting_cpu(unsigned int cpu)
 872{
 873        gic_cpu_init();
 874
 875        if (gic_dist_supports_lpis())
 876                its_cpu_init();
 877
 878        return 0;
 879}
 880
 881static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
 882                                   unsigned long cluster_id)
 883{
 884        int next_cpu, cpu = *base_cpu;
 885        unsigned long mpidr = cpu_logical_map(cpu);
 886        u16 tlist = 0;
 887
 888        while (cpu < nr_cpu_ids) {
 889                tlist |= 1 << (mpidr & 0xf);
 890
 891                next_cpu = cpumask_next(cpu, mask);
 892                if (next_cpu >= nr_cpu_ids)
 893                        goto out;
 894                cpu = next_cpu;
 895
 896                mpidr = cpu_logical_map(cpu);
 897
 898                if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) {
 899                        cpu--;
 900                        goto out;
 901                }
 902        }
 903out:
 904        *base_cpu = cpu;
 905        return tlist;
 906}
 907
 908#define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \
 909        (MPIDR_AFFINITY_LEVEL(cluster_id, level) \
 910                << ICC_SGI1R_AFFINITY_## level ##_SHIFT)
 911
 912static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
 913{
 914        u64 val;
 915
 916        val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3)     |
 917               MPIDR_TO_SGI_AFFINITY(cluster_id, 2)     |
 918               irq << ICC_SGI1R_SGI_ID_SHIFT            |
 919               MPIDR_TO_SGI_AFFINITY(cluster_id, 1)     |
 920               MPIDR_TO_SGI_RS(cluster_id)              |
 921               tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
 922
 923        pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
 924        gic_write_sgi1r(val);
 925}
 926
 927static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
 928{
 929        int cpu;
 930
 931        if (WARN_ON(irq >= 16))
 932                return;
 933
 934        /*
 935         * Ensure that stores to Normal memory are visible to the
 936         * other CPUs before issuing the IPI.
 937         */
 938        wmb();
 939
 940        for_each_cpu(cpu, mask) {
 941                u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu));
 942                u16 tlist;
 943
 944                tlist = gic_compute_target_list(&cpu, mask, cluster_id);
 945                gic_send_sgi(cluster_id, tlist, irq);
 946        }
 947
 948        /* Force the above writes to ICC_SGI1R_EL1 to be executed */
 949        isb();
 950}
 951
 952static void gic_smp_init(void)
 953{
 954        set_smp_cross_call(gic_raise_softirq);
 955        cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
 956                                  "irqchip/arm/gicv3:starting",
 957                                  gic_starting_cpu, NULL);
 958}
 959
 960static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
 961                            bool force)
 962{
 963        unsigned int cpu;
 964        void __iomem *reg;
 965        int enabled;
 966        u64 val;
 967
 968        if (force)
 969                cpu = cpumask_first(mask_val);
 970        else
 971                cpu = cpumask_any_and(mask_val, cpu_online_mask);
 972
 973        if (cpu >= nr_cpu_ids)
 974                return -EINVAL;
 975
 976        if (gic_irq_in_rdist(d))
 977                return -EINVAL;
 978
 979        /* If interrupt was enabled, disable it first */
 980        enabled = gic_peek_irq(d, GICD_ISENABLER);
 981        if (enabled)
 982                gic_mask_irq(d);
 983
 984        reg = gic_dist_base(d) + GICD_IROUTER + (gic_irq(d) * 8);
 985        val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
 986
 987        gic_write_irouter(val, reg);
 988
 989        /*
 990         * If the interrupt was enabled, enabled it again. Otherwise,
 991         * just wait for the distributor to have digested our changes.
 992         */
 993        if (enabled)
 994                gic_unmask_irq(d);
 995        else
 996                gic_dist_wait_for_rwp();
 997
 998        irq_data_update_effective_affinity(d, cpumask_of(cpu));
 999
1000        return IRQ_SET_MASK_OK_DONE;
1001}
1002#else
1003#define gic_set_affinity        NULL
1004#define gic_smp_init()          do { } while(0)
1005#endif
1006
1007#ifdef CONFIG_CPU_PM
1008static int gic_cpu_pm_notifier(struct notifier_block *self,
1009                               unsigned long cmd, void *v)
1010{
1011        if (cmd == CPU_PM_EXIT) {
1012                if (gic_dist_security_disabled())
1013                        gic_enable_redist(true);
1014                gic_cpu_sys_reg_init();
1015        } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) {
1016                gic_write_grpen1(0);
1017                gic_enable_redist(false);
1018        }
1019        return NOTIFY_OK;
1020}
1021
1022static struct notifier_block gic_cpu_pm_notifier_block = {
1023        .notifier_call = gic_cpu_pm_notifier,
1024};
1025
1026static void gic_cpu_pm_init(void)
1027{
1028        cpu_pm_register_notifier(&gic_cpu_pm_notifier_block);
1029}
1030
1031#else
1032static inline void gic_cpu_pm_init(void) { }
1033#endif /* CONFIG_CPU_PM */
1034
1035static struct irq_chip gic_chip = {
1036        .name                   = "GICv3",
1037        .irq_mask               = gic_mask_irq,
1038        .irq_unmask             = gic_unmask_irq,
1039        .irq_eoi                = gic_eoi_irq,
1040        .irq_set_type           = gic_set_type,
1041        .irq_set_affinity       = gic_set_affinity,
1042        .irq_get_irqchip_state  = gic_irq_get_irqchip_state,
1043        .irq_set_irqchip_state  = gic_irq_set_irqchip_state,
1044        .irq_nmi_setup          = gic_irq_nmi_setup,
1045        .irq_nmi_teardown       = gic_irq_nmi_teardown,
1046        .flags                  = IRQCHIP_SET_TYPE_MASKED |
1047                                  IRQCHIP_SKIP_SET_WAKE |
1048                                  IRQCHIP_MASK_ON_SUSPEND,
1049};
1050
1051static struct irq_chip gic_eoimode1_chip = {
1052        .name                   = "GICv3",
1053        .irq_mask               = gic_eoimode1_mask_irq,
1054        .irq_unmask             = gic_unmask_irq,
1055        .irq_eoi                = gic_eoimode1_eoi_irq,
1056        .irq_set_type           = gic_set_type,
1057        .irq_set_affinity       = gic_set_affinity,
1058        .irq_get_irqchip_state  = gic_irq_get_irqchip_state,
1059        .irq_set_irqchip_state  = gic_irq_set_irqchip_state,
1060        .irq_set_vcpu_affinity  = gic_irq_set_vcpu_affinity,
1061        .irq_nmi_setup          = gic_irq_nmi_setup,
1062        .irq_nmi_teardown       = gic_irq_nmi_teardown,
1063        .flags                  = IRQCHIP_SET_TYPE_MASKED |
1064                                  IRQCHIP_SKIP_SET_WAKE |
1065                                  IRQCHIP_MASK_ON_SUSPEND,
1066};
1067
1068#define GIC_ID_NR       (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
1069
1070static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
1071                              irq_hw_number_t hw)
1072{
1073        struct irq_chip *chip = &gic_chip;
1074
1075        if (static_branch_likely(&supports_deactivate_key))
1076                chip = &gic_eoimode1_chip;
1077
1078        /* SGIs are private to the core kernel */
1079        if (hw < 16)
1080                return -EPERM;
1081        /* Nothing here */
1082        if (hw >= gic_data.irq_nr && hw < 8192)
1083                return -EPERM;
1084        /* Off limits */
1085        if (hw >= GIC_ID_NR)
1086                return -EPERM;
1087
1088        /* PPIs */
1089        if (hw < 32) {
1090                irq_set_percpu_devid(irq);
1091                irq_domain_set_info(d, irq, hw, chip, d->host_data,
1092                                    handle_percpu_devid_irq, NULL, NULL);
1093                irq_set_status_flags(irq, IRQ_NOAUTOEN);
1094        }
1095        /* SPIs */
1096        if (hw >= 32 && hw < gic_data.irq_nr) {
1097                irq_domain_set_info(d, irq, hw, chip, d->host_data,
1098                                    handle_fasteoi_irq, NULL, NULL);
1099                irq_set_probe(irq);
1100                irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
1101        }
1102        /* LPIs */
1103        if (hw >= 8192 && hw < GIC_ID_NR) {
1104                if (!gic_dist_supports_lpis())
1105                        return -EPERM;
1106                irq_domain_set_info(d, irq, hw, chip, d->host_data,
1107                                    handle_fasteoi_irq, NULL, NULL);
1108        }
1109
1110        return 0;
1111}
1112
1113#define GIC_IRQ_TYPE_PARTITION  (GIC_IRQ_TYPE_LPI + 1)
1114
1115static int gic_irq_domain_translate(struct irq_domain *d,
1116                                    struct irq_fwspec *fwspec,
1117                                    unsigned long *hwirq,
1118                                    unsigned int *type)
1119{
1120        if (is_of_node(fwspec->fwnode)) {
1121                if (fwspec->param_count < 3)
1122                        return -EINVAL;
1123
1124                switch (fwspec->param[0]) {
1125                case 0:                 /* SPI */
1126                        *hwirq = fwspec->param[1] + 32;
1127                        break;
1128                case 1:                 /* PPI */
1129                case GIC_IRQ_TYPE_PARTITION:
1130                        *hwirq = fwspec->param[1] + 16;
1131                        break;
1132                case GIC_IRQ_TYPE_LPI:  /* LPI */
1133                        *hwirq = fwspec->param[1];
1134                        break;
1135                default:
1136                        return -EINVAL;
1137                }
1138
1139                *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
1140
1141                /*
1142                 * Make it clear that broken DTs are... broken.
1143                 * Partitionned PPIs are an unfortunate exception.
1144                 */
1145                WARN_ON(*type == IRQ_TYPE_NONE &&
1146                        fwspec->param[0] != GIC_IRQ_TYPE_PARTITION);
1147                return 0;
1148        }
1149
1150        if (is_fwnode_irqchip(fwspec->fwnode)) {
1151                if(fwspec->param_count != 2)
1152                        return -EINVAL;
1153
1154                *hwirq = fwspec->param[0];
1155                *type = fwspec->param[1];
1156
1157                WARN_ON(*type == IRQ_TYPE_NONE);
1158                return 0;
1159        }
1160
1161        return -EINVAL;
1162}
1163
1164static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1165                                unsigned int nr_irqs, void *arg)
1166{
1167        int i, ret;
1168        irq_hw_number_t hwirq;
1169        unsigned int type = IRQ_TYPE_NONE;
1170        struct irq_fwspec *fwspec = arg;
1171
1172        ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type);
1173        if (ret)
1174                return ret;
1175
1176        for (i = 0; i < nr_irqs; i++) {
1177                ret = gic_irq_domain_map(domain, virq + i, hwirq + i);
1178                if (ret)
1179                        return ret;
1180        }
1181
1182        return 0;
1183}
1184
1185static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
1186                                unsigned int nr_irqs)
1187{
1188        int i;
1189
1190        for (i = 0; i < nr_irqs; i++) {
1191                struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
1192                irq_set_handler(virq + i, NULL);
1193                irq_domain_reset_irq_data(d);
1194        }
1195}
1196
1197static int gic_irq_domain_select(struct irq_domain *d,
1198                                 struct irq_fwspec *fwspec,
1199                                 enum irq_domain_bus_token bus_token)
1200{
1201        /* Not for us */
1202        if (fwspec->fwnode != d->fwnode)
1203                return 0;
1204
1205        /* If this is not DT, then we have a single domain */
1206        if (!is_of_node(fwspec->fwnode))
1207                return 1;
1208
1209        /*
1210         * If this is a PPI and we have a 4th (non-null) parameter,
1211         * then we need to match the partition domain.
1212         */
1213        if (fwspec->param_count >= 4 &&
1214            fwspec->param[0] == 1 && fwspec->param[3] != 0)
1215                return d == partition_get_domain(gic_data.ppi_descs[fwspec->param[1]]);
1216
1217        return d == gic_data.domain;
1218}
1219
1220static const struct irq_domain_ops gic_irq_domain_ops = {
1221        .translate = gic_irq_domain_translate,
1222        .alloc = gic_irq_domain_alloc,
1223        .free = gic_irq_domain_free,
1224        .select = gic_irq_domain_select,
1225};
1226
1227static int partition_domain_translate(struct irq_domain *d,
1228                                      struct irq_fwspec *fwspec,
1229                                      unsigned long *hwirq,
1230                                      unsigned int *type)
1231{
1232        struct device_node *np;
1233        int ret;
1234
1235        np = of_find_node_by_phandle(fwspec->param[3]);
1236        if (WARN_ON(!np))
1237                return -EINVAL;
1238
1239        ret = partition_translate_id(gic_data.ppi_descs[fwspec->param[1]],
1240                                     of_node_to_fwnode(np));
1241        if (ret < 0)
1242                return ret;
1243
1244        *hwirq = ret;
1245        *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
1246
1247        return 0;
1248}
1249
1250static const struct irq_domain_ops partition_domain_ops = {
1251        .translate = partition_domain_translate,
1252        .select = gic_irq_domain_select,
1253};
1254
1255static bool gic_enable_quirk_msm8996(void *data)
1256{
1257        struct gic_chip_data *d = data;
1258
1259        d->flags |= FLAGS_WORKAROUND_GICR_WAKER_MSM8996;
1260
1261        return true;
1262}
1263
1264static void gic_enable_nmi_support(void)
1265{
1266        int i;
1267
1268        for (i = 0; i < 16; i++)
1269                refcount_set(&ppi_nmi_refs[i], 0);
1270
1271        static_branch_enable(&supports_pseudo_nmis);
1272
1273        if (static_branch_likely(&supports_deactivate_key))
1274                gic_eoimode1_chip.flags |= IRQCHIP_SUPPORTS_NMI;
1275        else
1276                gic_chip.flags |= IRQCHIP_SUPPORTS_NMI;
1277}
1278
1279static int __init gic_init_bases(void __iomem *dist_base,
1280                                 struct redist_region *rdist_regs,
1281                                 u32 nr_redist_regions,
1282                                 u64 redist_stride,
1283                                 struct fwnode_handle *handle)
1284{
1285        u32 typer;
1286        int gic_irqs;
1287        int err;
1288
1289        if (!is_hyp_mode_available())
1290                static_branch_disable(&supports_deactivate_key);
1291
1292        if (static_branch_likely(&supports_deactivate_key))
1293                pr_info("GIC: Using split EOI/Deactivate mode\n");
1294
1295        gic_data.fwnode = handle;
1296        gic_data.dist_base = dist_base;
1297        gic_data.redist_regions = rdist_regs;
1298        gic_data.nr_redist_regions = nr_redist_regions;
1299        gic_data.redist_stride = redist_stride;
1300
1301        /*
1302         * Find out how many interrupts are supported.
1303         * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
1304         */
1305        typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
1306        gic_data.rdists.gicd_typer = typer;
1307        gic_irqs = GICD_TYPER_IRQS(typer);
1308        if (gic_irqs > 1020)
1309                gic_irqs = 1020;
1310        gic_data.irq_nr = gic_irqs;
1311
1312        gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
1313                                                 &gic_data);
1314        irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
1315        gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
1316        gic_data.rdists.has_vlpis = true;
1317        gic_data.rdists.has_direct_lpi = true;
1318
1319        if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
1320                err = -ENOMEM;
1321                goto out_free;
1322        }
1323
1324        gic_data.has_rss = !!(typer & GICD_TYPER_RSS);
1325        pr_info("Distributor has %sRange Selector support\n",
1326                gic_data.has_rss ? "" : "no ");
1327
1328        if (typer & GICD_TYPER_MBIS) {
1329                err = mbi_init(handle, gic_data.domain);
1330                if (err)
1331                        pr_err("Failed to initialize MBIs\n");
1332        }
1333
1334        set_handle_irq(gic_handle_irq);
1335
1336        gic_update_vlpi_properties();
1337
1338        gic_smp_init();
1339        gic_dist_init();
1340        gic_cpu_init();
1341        gic_cpu_pm_init();
1342
1343        if (gic_dist_supports_lpis()) {
1344                its_init(handle, &gic_data.rdists, gic_data.domain);
1345                its_cpu_init();
1346        } else {
1347                if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
1348                        gicv2m_init(handle, gic_data.domain);
1349        }
1350
1351        if (gic_prio_masking_enabled()) {
1352                if (!gic_has_group0() || gic_dist_security_disabled())
1353                        gic_enable_nmi_support();
1354                else
1355                        pr_warn("SCR_EL3.FIQ is cleared, cannot enable use of pseudo-NMIs\n");
1356        }
1357
1358        return 0;
1359
1360out_free:
1361        if (gic_data.domain)
1362                irq_domain_remove(gic_data.domain);
1363        free_percpu(gic_data.rdists.rdist);
1364        return err;
1365}
1366
1367static int __init gic_validate_dist_version(void __iomem *dist_base)
1368{
1369        u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
1370
1371        if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4)
1372                return -ENODEV;
1373
1374        return 0;
1375}
1376
1377/* Create all possible partitions at boot time */
1378static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
1379{
1380        struct device_node *parts_node, *child_part;
1381        int part_idx = 0, i;
1382        int nr_parts;
1383        struct partition_affinity *parts;
1384
1385        parts_node = of_get_child_by_name(gic_node, "ppi-partitions");
1386        if (!parts_node)
1387                return;
1388
1389        nr_parts = of_get_child_count(parts_node);
1390
1391        if (!nr_parts)
1392                goto out_put_node;
1393
1394        parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL);
1395        if (WARN_ON(!parts))
1396                goto out_put_node;
1397
1398        for_each_child_of_node(parts_node, child_part) {
1399                struct partition_affinity *part;
1400                int n;
1401
1402                part = &parts[part_idx];
1403
1404                part->partition_id = of_node_to_fwnode(child_part);
1405
1406                pr_info("GIC: PPI partition %pOFn[%d] { ",
1407                        child_part, part_idx);
1408
1409                n = of_property_count_elems_of_size(child_part, "affinity",
1410                                                    sizeof(u32));
1411                WARN_ON(n <= 0);
1412
1413                for (i = 0; i < n; i++) {
1414                        int err, cpu;
1415                        u32 cpu_phandle;
1416                        struct device_node *cpu_node;
1417
1418                        err = of_property_read_u32_index(child_part, "affinity",
1419                                                         i, &cpu_phandle);
1420                        if (WARN_ON(err))
1421                                continue;
1422
1423                        cpu_node = of_find_node_by_phandle(cpu_phandle);
1424                        if (WARN_ON(!cpu_node))
1425                                continue;
1426
1427                        cpu = of_cpu_node_to_id(cpu_node);
1428                        if (WARN_ON(cpu < 0))
1429                                continue;
1430
1431                        pr_cont("%pOF[%d] ", cpu_node, cpu);
1432
1433                        cpumask_set_cpu(cpu, &part->mask);
1434                }
1435
1436                pr_cont("}\n");
1437                part_idx++;
1438        }
1439
1440        for (i = 0; i < 16; i++) {
1441                unsigned int irq;
1442                struct partition_desc *desc;
1443                struct irq_fwspec ppi_fwspec = {
1444                        .fwnode         = gic_data.fwnode,
1445                        .param_count    = 3,
1446                        .param          = {
1447                                [0]     = GIC_IRQ_TYPE_PARTITION,
1448                                [1]     = i,
1449                                [2]     = IRQ_TYPE_NONE,
1450                        },
1451                };
1452
1453                irq = irq_create_fwspec_mapping(&ppi_fwspec);
1454                if (WARN_ON(!irq))
1455                        continue;
1456                desc = partition_create_desc(gic_data.fwnode, parts, nr_parts,
1457                                             irq, &partition_domain_ops);
1458                if (WARN_ON(!desc))
1459                        continue;
1460
1461                gic_data.ppi_descs[i] = desc;
1462        }
1463
1464out_put_node:
1465        of_node_put(parts_node);
1466}
1467
1468static void __init gic_of_setup_kvm_info(struct device_node *node)
1469{
1470        int ret;
1471        struct resource r;
1472        u32 gicv_idx;
1473
1474        gic_v3_kvm_info.type = GIC_V3;
1475
1476        gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
1477        if (!gic_v3_kvm_info.maint_irq)
1478                return;
1479
1480        if (of_property_read_u32(node, "#redistributor-regions",
1481                                 &gicv_idx))
1482                gicv_idx = 1;
1483
1484        gicv_idx += 3;  /* Also skip GICD, GICC, GICH */
1485        ret = of_address_to_resource(node, gicv_idx, &r);
1486        if (!ret)
1487                gic_v3_kvm_info.vcpu = r;
1488
1489        gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
1490        gic_set_kvm_info(&gic_v3_kvm_info);
1491}
1492
1493static const struct gic_quirk gic_quirks[] = {
1494        {
1495                .desc   = "GICv3: Qualcomm MSM8996 broken firmware",
1496                .compatible = "qcom,msm8996-gic-v3",
1497                .init   = gic_enable_quirk_msm8996,
1498        },
1499        {
1500        }
1501};
1502
1503static int __init gic_of_init(struct device_node *node, struct device_node *parent)
1504{
1505        void __iomem *dist_base;
1506        struct redist_region *rdist_regs;
1507        u64 redist_stride;
1508        u32 nr_redist_regions;
1509        int err, i;
1510
1511        dist_base = of_iomap(node, 0);
1512        if (!dist_base) {
1513                pr_err("%pOF: unable to map gic dist registers\n", node);
1514                return -ENXIO;
1515        }
1516
1517        err = gic_validate_dist_version(dist_base);
1518        if (err) {
1519                pr_err("%pOF: no distributor detected, giving up\n", node);
1520                goto out_unmap_dist;
1521        }
1522
1523        if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions))
1524                nr_redist_regions = 1;
1525
1526        rdist_regs = kcalloc(nr_redist_regions, sizeof(*rdist_regs),
1527                             GFP_KERNEL);
1528        if (!rdist_regs) {
1529                err = -ENOMEM;
1530                goto out_unmap_dist;
1531        }
1532
1533        for (i = 0; i < nr_redist_regions; i++) {
1534                struct resource res;
1535                int ret;
1536
1537                ret = of_address_to_resource(node, 1 + i, &res);
1538                rdist_regs[i].redist_base = of_iomap(node, 1 + i);
1539                if (ret || !rdist_regs[i].redist_base) {
1540                        pr_err("%pOF: couldn't map region %d\n", node, i);
1541                        err = -ENODEV;
1542                        goto out_unmap_rdist;
1543                }
1544                rdist_regs[i].phys_base = res.start;
1545        }
1546
1547        if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
1548                redist_stride = 0;
1549
1550        gic_enable_of_quirks(node, gic_quirks, &gic_data);
1551
1552        err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions,
1553                             redist_stride, &node->fwnode);
1554        if (err)
1555                goto out_unmap_rdist;
1556
1557        gic_populate_ppi_partitions(node);
1558
1559        if (static_branch_likely(&supports_deactivate_key))
1560                gic_of_setup_kvm_info(node);
1561        return 0;
1562
1563out_unmap_rdist:
1564        for (i = 0; i < nr_redist_regions; i++)
1565                if (rdist_regs[i].redist_base)
1566                        iounmap(rdist_regs[i].redist_base);
1567        kfree(rdist_regs);
1568out_unmap_dist:
1569        iounmap(dist_base);
1570        return err;
1571}
1572
1573IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
1574
1575#ifdef CONFIG_ACPI
1576static struct
1577{
1578        void __iomem *dist_base;
1579        struct redist_region *redist_regs;
1580        u32 nr_redist_regions;
1581        bool single_redist;
1582        u32 maint_irq;
1583        int maint_irq_mode;
1584        phys_addr_t vcpu_base;
1585} acpi_data __initdata;
1586
1587static void __init
1588gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base)
1589{
1590        static int count = 0;
1591
1592        acpi_data.redist_regs[count].phys_base = phys_base;
1593        acpi_data.redist_regs[count].redist_base = redist_base;
1594        acpi_data.redist_regs[count].single_redist = acpi_data.single_redist;
1595        count++;
1596}
1597
1598static int __init
1599gic_acpi_parse_madt_redist(union acpi_subtable_headers *header,
1600                           const unsigned long end)
1601{
1602        struct acpi_madt_generic_redistributor *redist =
1603                        (struct acpi_madt_generic_redistributor *)header;
1604        void __iomem *redist_base;
1605
1606        redist_base = ioremap(redist->base_address, redist->length);
1607        if (!redist_base) {
1608                pr_err("Couldn't map GICR region @%llx\n", redist->base_address);
1609                return -ENOMEM;
1610        }
1611
1612        gic_acpi_register_redist(redist->base_address, redist_base);
1613        return 0;
1614}
1615
1616static int __init
1617gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header,
1618                         const unsigned long end)
1619{
1620        struct acpi_madt_generic_interrupt *gicc =
1621                                (struct acpi_madt_generic_interrupt *)header;
1622        u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
1623        u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
1624        void __iomem *redist_base;
1625
1626        /* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */
1627        if (!(gicc->flags & ACPI_MADT_ENABLED))
1628                return 0;
1629
1630        redist_base = ioremap(gicc->gicr_base_address, size);
1631        if (!redist_base)
1632                return -ENOMEM;
1633
1634        gic_acpi_register_redist(gicc->gicr_base_address, redist_base);
1635        return 0;
1636}
1637
1638static int __init gic_acpi_collect_gicr_base(void)
1639{
1640        acpi_tbl_entry_handler redist_parser;
1641        enum acpi_madt_type type;
1642
1643        if (acpi_data.single_redist) {
1644                type = ACPI_MADT_TYPE_GENERIC_INTERRUPT;
1645                redist_parser = gic_acpi_parse_madt_gicc;
1646        } else {
1647                type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR;
1648                redist_parser = gic_acpi_parse_madt_redist;
1649        }
1650
1651        /* Collect redistributor base addresses in GICR entries */
1652        if (acpi_table_parse_madt(type, redist_parser, 0) > 0)
1653                return 0;
1654
1655        pr_info("No valid GICR entries exist\n");
1656        return -ENODEV;
1657}
1658
1659static int __init gic_acpi_match_gicr(union acpi_subtable_headers *header,
1660                                  const unsigned long end)
1661{
1662        /* Subtable presence means that redist exists, that's it */
1663        return 0;
1664}
1665
1666static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header,
1667                                      const unsigned long end)
1668{
1669        struct acpi_madt_generic_interrupt *gicc =
1670                                (struct acpi_madt_generic_interrupt *)header;
1671
1672        /*
1673         * If GICC is enabled and has valid gicr base address, then it means
1674         * GICR base is presented via GICC
1675         */
1676        if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address)
1677                return 0;
1678
1679        /*
1680         * It's perfectly valid firmware can pass disabled GICC entry, driver
1681         * should not treat as errors, skip the entry instead of probe fail.
1682         */
1683        if (!(gicc->flags & ACPI_MADT_ENABLED))
1684                return 0;
1685
1686        return -ENODEV;
1687}
1688
1689static int __init gic_acpi_count_gicr_regions(void)
1690{
1691        int count;
1692
1693        /*
1694         * Count how many redistributor regions we have. It is not allowed
1695         * to mix redistributor description, GICR and GICC subtables have to be
1696         * mutually exclusive.
1697         */
1698        count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
1699                                      gic_acpi_match_gicr, 0);
1700        if (count > 0) {
1701                acpi_data.single_redist = false;
1702                return count;
1703        }
1704
1705        count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
1706                                      gic_acpi_match_gicc, 0);
1707        if (count > 0)
1708                acpi_data.single_redist = true;
1709
1710        return count;
1711}
1712
1713static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header,
1714                                           struct acpi_probe_entry *ape)
1715{
1716        struct acpi_madt_generic_distributor *dist;
1717        int count;
1718
1719        dist = (struct acpi_madt_generic_distributor *)header;
1720        if (dist->version != ape->driver_data)
1721                return false;
1722
1723        /* We need to do that exercise anyway, the sooner the better */
1724        count = gic_acpi_count_gicr_regions();
1725        if (count <= 0)
1726                return false;
1727
1728        acpi_data.nr_redist_regions = count;
1729        return true;
1730}
1731
1732static int __init gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers *header,
1733                                                const unsigned long end)
1734{
1735        struct acpi_madt_generic_interrupt *gicc =
1736                (struct acpi_madt_generic_interrupt *)header;
1737        int maint_irq_mode;
1738        static int first_madt = true;
1739
1740        /* Skip unusable CPUs */
1741        if (!(gicc->flags & ACPI_MADT_ENABLED))
1742                return 0;
1743
1744        maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ?
1745                ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE;
1746
1747        if (first_madt) {
1748                first_madt = false;
1749
1750                acpi_data.maint_irq = gicc->vgic_interrupt;
1751                acpi_data.maint_irq_mode = maint_irq_mode;
1752                acpi_data.vcpu_base = gicc->gicv_base_address;
1753
1754                return 0;
1755        }
1756
1757        /*
1758         * The maintenance interrupt and GICV should be the same for every CPU
1759         */
1760        if ((acpi_data.maint_irq != gicc->vgic_interrupt) ||
1761            (acpi_data.maint_irq_mode != maint_irq_mode) ||
1762            (acpi_data.vcpu_base != gicc->gicv_base_address))
1763                return -EINVAL;
1764
1765        return 0;
1766}
1767
1768static bool __init gic_acpi_collect_virt_info(void)
1769{
1770        int count;
1771
1772        count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
1773                                      gic_acpi_parse_virt_madt_gicc, 0);
1774
1775        return (count > 0);
1776}
1777
1778#define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K)
1779#define ACPI_GICV2_VCTRL_MEM_SIZE       (SZ_4K)
1780#define ACPI_GICV2_VCPU_MEM_SIZE        (SZ_8K)
1781
1782static void __init gic_acpi_setup_kvm_info(void)
1783{
1784        int irq;
1785
1786        if (!gic_acpi_collect_virt_info()) {
1787                pr_warn("Unable to get hardware information used for virtualization\n");
1788                return;
1789        }
1790
1791        gic_v3_kvm_info.type = GIC_V3;
1792
1793        irq = acpi_register_gsi(NULL, acpi_data.maint_irq,
1794                                acpi_data.maint_irq_mode,
1795                                ACPI_ACTIVE_HIGH);
1796        if (irq <= 0)
1797                return;
1798
1799        gic_v3_kvm_info.maint_irq = irq;
1800
1801        if (acpi_data.vcpu_base) {
1802                struct resource *vcpu = &gic_v3_kvm_info.vcpu;
1803
1804                vcpu->flags = IORESOURCE_MEM;
1805                vcpu->start = acpi_data.vcpu_base;
1806                vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
1807        }
1808
1809        gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
1810        gic_set_kvm_info(&gic_v3_kvm_info);
1811}
1812
1813static int __init
1814gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end)
1815{
1816        struct acpi_madt_generic_distributor *dist;
1817        struct fwnode_handle *domain_handle;
1818        size_t size;
1819        int i, err;
1820
1821        /* Get distributor base address */
1822        dist = (struct acpi_madt_generic_distributor *)header;
1823        acpi_data.dist_base = ioremap(dist->base_address,
1824                                      ACPI_GICV3_DIST_MEM_SIZE);
1825        if (!acpi_data.dist_base) {
1826                pr_err("Unable to map GICD registers\n");
1827                return -ENOMEM;
1828        }
1829
1830        err = gic_validate_dist_version(acpi_data.dist_base);
1831        if (err) {
1832                pr_err("No distributor detected at @%p, giving up\n",
1833                       acpi_data.dist_base);
1834                goto out_dist_unmap;
1835        }
1836
1837        size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions;
1838        acpi_data.redist_regs = kzalloc(size, GFP_KERNEL);
1839        if (!acpi_data.redist_regs) {
1840                err = -ENOMEM;
1841                goto out_dist_unmap;
1842        }
1843
1844        err = gic_acpi_collect_gicr_base();
1845        if (err)
1846                goto out_redist_unmap;
1847
1848        domain_handle = irq_domain_alloc_fwnode(acpi_data.dist_base);
1849        if (!domain_handle) {
1850                err = -ENOMEM;
1851                goto out_redist_unmap;
1852        }
1853
1854        err = gic_init_bases(acpi_data.dist_base, acpi_data.redist_regs,
1855                             acpi_data.nr_redist_regions, 0, domain_handle);
1856        if (err)
1857                goto out_fwhandle_free;
1858
1859        acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
1860
1861        if (static_branch_likely(&supports_deactivate_key))
1862                gic_acpi_setup_kvm_info();
1863
1864        return 0;
1865
1866out_fwhandle_free:
1867        irq_domain_free_fwnode(domain_handle);
1868out_redist_unmap:
1869        for (i = 0; i < acpi_data.nr_redist_regions; i++)
1870                if (acpi_data.redist_regs[i].redist_base)
1871                        iounmap(acpi_data.redist_regs[i].redist_base);
1872        kfree(acpi_data.redist_regs);
1873out_dist_unmap:
1874        iounmap(acpi_data.dist_base);
1875        return err;
1876}
1877IRQCHIP_ACPI_DECLARE(gic_v3, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
1878                     acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3,
1879                     gic_acpi_init);
1880IRQCHIP_ACPI_DECLARE(gic_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
1881                     acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V4,
1882                     gic_acpi_init);
1883IRQCHIP_ACPI_DECLARE(gic_v3_or_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
1884                     acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_NONE,
1885                     gic_acpi_init);
1886#endif
1887