linux/drivers/irqchip/irq-gic-v3.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
   4 * Author: Marc Zyngier <marc.zyngier@arm.com>
   5 */
   6
   7#define pr_fmt(fmt)     "GICv3: " fmt
   8
   9#include <linux/acpi.h>
  10#include <linux/cpu.h>
  11#include <linux/cpu_pm.h>
  12#include <linux/delay.h>
  13#include <linux/interrupt.h>
  14#include <linux/irqdomain.h>
  15#include <linux/of.h>
  16#include <linux/of_address.h>
  17#include <linux/of_irq.h>
  18#include <linux/percpu.h>
  19#include <linux/refcount.h>
  20#include <linux/slab.h>
  21
  22#include <linux/irqchip.h>
  23#include <linux/irqchip/arm-gic-common.h>
  24#include <linux/irqchip/arm-gic-v3.h>
  25#include <linux/irqchip/irq-partition-percpu.h>
  26
  27#include <asm/cputype.h>
  28#include <asm/exception.h>
  29#include <asm/smp_plat.h>
  30#include <asm/virt.h>
  31
  32#include "irq-gic-common.h"
  33
  34#define GICD_INT_NMI_PRI        (GICD_INT_DEF_PRI & ~0x80)
  35
  36#define FLAGS_WORKAROUND_GICR_WAKER_MSM8996     (1ULL << 0)
  37
  38struct redist_region {
  39        void __iomem            *redist_base;
  40        phys_addr_t             phys_base;
  41        bool                    single_redist;
  42};
  43
  44struct gic_chip_data {
  45        struct fwnode_handle    *fwnode;
  46        void __iomem            *dist_base;
  47        struct redist_region    *redist_regions;
  48        struct rdists           rdists;
  49        struct irq_domain       *domain;
  50        u64                     redist_stride;
  51        u32                     nr_redist_regions;
  52        u64                     flags;
  53        bool                    has_rss;
  54        unsigned int            ppi_nr;
  55        struct partition_desc   **ppi_descs;
  56};
  57
  58static struct gic_chip_data gic_data __read_mostly;
  59static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
  60
  61#define GIC_ID_NR       (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
  62#define GIC_LINE_NR     min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
  63#define GIC_ESPI_NR     GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer)
  64
  65/*
  66 * The behaviours of RPR and PMR registers differ depending on the value of
  67 * SCR_EL3.FIQ, and the behaviour of non-secure priority registers of the
  68 * distributor and redistributors depends on whether security is enabled in the
  69 * GIC.
  70 *
  71 * When security is enabled, non-secure priority values from the (re)distributor
  72 * are presented to the GIC CPUIF as follow:
  73 *     (GIC_(R)DIST_PRI[irq] >> 1) | 0x80;
  74 *
  75 * If SCR_EL3.FIQ == 1, the values writen to/read from PMR and RPR at non-secure
  76 * EL1 are subject to a similar operation thus matching the priorities presented
  77 * from the (re)distributor when security is enabled.
  78 *
  79 * see GICv3/GICv4 Architecture Specification (IHI0069D):
  80 * - section 4.8.1 Non-secure accesses to register fields for Secure interrupt
  81 *   priorities.
  82 * - Figure 4-7 Secure read of the priority field for a Non-secure Group 1
  83 *   interrupt.
  84 *
  85 * For now, we only support pseudo-NMIs if we have non-secure view of
  86 * priorities.
  87 */
  88static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis);
  89
  90/* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */
  91static refcount_t *ppi_nmi_refs;
  92
  93static struct gic_kvm_info gic_v3_kvm_info;
  94static DEFINE_PER_CPU(bool, has_rss);
  95
  96#define MPIDR_RS(mpidr)                 (((mpidr) & 0xF0UL) >> 4)
  97#define gic_data_rdist()                (this_cpu_ptr(gic_data.rdists.rdist))
  98#define gic_data_rdist_rd_base()        (gic_data_rdist()->rd_base)
  99#define gic_data_rdist_sgi_base()       (gic_data_rdist_rd_base() + SZ_64K)
 100
 101/* Our default, arbitrary priority value. Linux only uses one anyway. */
 102#define DEFAULT_PMR_VALUE       0xf0
 103
 104enum gic_intid_range {
 105        PPI_RANGE,
 106        SPI_RANGE,
 107        EPPI_RANGE,
 108        ESPI_RANGE,
 109        LPI_RANGE,
 110        __INVALID_RANGE__
 111};
 112
 113static enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq)
 114{
 115        switch (hwirq) {
 116        case 16 ... 31:
 117                return PPI_RANGE;
 118        case 32 ... 1019:
 119                return SPI_RANGE;
 120        case EPPI_BASE_INTID ... (EPPI_BASE_INTID + 63):
 121                return EPPI_RANGE;
 122        case ESPI_BASE_INTID ... (ESPI_BASE_INTID + 1023):
 123                return ESPI_RANGE;
 124        case 8192 ... GENMASK(23, 0):
 125                return LPI_RANGE;
 126        default:
 127                return __INVALID_RANGE__;
 128        }
 129}
 130
 131static enum gic_intid_range get_intid_range(struct irq_data *d)
 132{
 133        return __get_intid_range(d->hwirq);
 134}
 135
 136static inline unsigned int gic_irq(struct irq_data *d)
 137{
 138        return d->hwirq;
 139}
 140
 141static inline int gic_irq_in_rdist(struct irq_data *d)
 142{
 143        enum gic_intid_range range = get_intid_range(d);
 144        return range == PPI_RANGE || range == EPPI_RANGE;
 145}
 146
 147static inline void __iomem *gic_dist_base(struct irq_data *d)
 148{
 149        switch (get_intid_range(d)) {
 150        case PPI_RANGE:
 151        case EPPI_RANGE:
 152                /* SGI+PPI -> SGI_base for this CPU */
 153                return gic_data_rdist_sgi_base();
 154
 155        case SPI_RANGE:
 156        case ESPI_RANGE:
 157                /* SPI -> dist_base */
 158                return gic_data.dist_base;
 159
 160        default:
 161                return NULL;
 162        }
 163}
 164
 165static void gic_do_wait_for_rwp(void __iomem *base)
 166{
 167        u32 count = 1000000;    /* 1s! */
 168
 169        while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) {
 170                count--;
 171                if (!count) {
 172                        pr_err_ratelimited("RWP timeout, gone fishing\n");
 173                        return;
 174                }
 175                cpu_relax();
 176                udelay(1);
 177        };
 178}
 179
 180/* Wait for completion of a distributor change */
 181static void gic_dist_wait_for_rwp(void)
 182{
 183        gic_do_wait_for_rwp(gic_data.dist_base);
 184}
 185
 186/* Wait for completion of a redistributor change */
 187static void gic_redist_wait_for_rwp(void)
 188{
 189        gic_do_wait_for_rwp(gic_data_rdist_rd_base());
 190}
 191
 192#ifdef CONFIG_ARM64
 193
 194static u64 __maybe_unused gic_read_iar(void)
 195{
 196        if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154))
 197                return gic_read_iar_cavium_thunderx();
 198        else
 199                return gic_read_iar_common();
 200}
 201#endif
 202
 203static void gic_enable_redist(bool enable)
 204{
 205        void __iomem *rbase;
 206        u32 count = 1000000;    /* 1s! */
 207        u32 val;
 208
 209        if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996)
 210                return;
 211
 212        rbase = gic_data_rdist_rd_base();
 213
 214        val = readl_relaxed(rbase + GICR_WAKER);
 215        if (enable)
 216                /* Wake up this CPU redistributor */
 217                val &= ~GICR_WAKER_ProcessorSleep;
 218        else
 219                val |= GICR_WAKER_ProcessorSleep;
 220        writel_relaxed(val, rbase + GICR_WAKER);
 221
 222        if (!enable) {          /* Check that GICR_WAKER is writeable */
 223                val = readl_relaxed(rbase + GICR_WAKER);
 224                if (!(val & GICR_WAKER_ProcessorSleep))
 225                        return; /* No PM support in this redistributor */
 226        }
 227
 228        while (--count) {
 229                val = readl_relaxed(rbase + GICR_WAKER);
 230                if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
 231                        break;
 232                cpu_relax();
 233                udelay(1);
 234        };
 235        if (!count)
 236                pr_err_ratelimited("redistributor failed to %s...\n",
 237                                   enable ? "wakeup" : "sleep");
 238}
 239
 240/*
 241 * Routines to disable, enable, EOI and route interrupts
 242 */
 243static u32 convert_offset_index(struct irq_data *d, u32 offset, u32 *index)
 244{
 245        switch (get_intid_range(d)) {
 246        case PPI_RANGE:
 247        case SPI_RANGE:
 248                *index = d->hwirq;
 249                return offset;
 250        case EPPI_RANGE:
 251                /*
 252                 * Contrary to the ESPI range, the EPPI range is contiguous
 253                 * to the PPI range in the registers, so let's adjust the
 254                 * displacement accordingly. Consistency is overrated.
 255                 */
 256                *index = d->hwirq - EPPI_BASE_INTID + 32;
 257                return offset;
 258        case ESPI_RANGE:
 259                *index = d->hwirq - ESPI_BASE_INTID;
 260                switch (offset) {
 261                case GICD_ISENABLER:
 262                        return GICD_ISENABLERnE;
 263                case GICD_ICENABLER:
 264                        return GICD_ICENABLERnE;
 265                case GICD_ISPENDR:
 266                        return GICD_ISPENDRnE;
 267                case GICD_ICPENDR:
 268                        return GICD_ICPENDRnE;
 269                case GICD_ISACTIVER:
 270                        return GICD_ISACTIVERnE;
 271                case GICD_ICACTIVER:
 272                        return GICD_ICACTIVERnE;
 273                case GICD_IPRIORITYR:
 274                        return GICD_IPRIORITYRnE;
 275                case GICD_ICFGR:
 276                        return GICD_ICFGRnE;
 277                case GICD_IROUTER:
 278                        return GICD_IROUTERnE;
 279                default:
 280                        break;
 281                }
 282                break;
 283        default:
 284                break;
 285        }
 286
 287        WARN_ON(1);
 288        *index = d->hwirq;
 289        return offset;
 290}
 291
 292static int gic_peek_irq(struct irq_data *d, u32 offset)
 293{
 294        void __iomem *base;
 295        u32 index, mask;
 296
 297        offset = convert_offset_index(d, offset, &index);
 298        mask = 1 << (index % 32);
 299
 300        if (gic_irq_in_rdist(d))
 301                base = gic_data_rdist_sgi_base();
 302        else
 303                base = gic_data.dist_base;
 304
 305        return !!(readl_relaxed(base + offset + (index / 32) * 4) & mask);
 306}
 307
 308static void gic_poke_irq(struct irq_data *d, u32 offset)
 309{
 310        void (*rwp_wait)(void);
 311        void __iomem *base;
 312        u32 index, mask;
 313
 314        offset = convert_offset_index(d, offset, &index);
 315        mask = 1 << (index % 32);
 316
 317        if (gic_irq_in_rdist(d)) {
 318                base = gic_data_rdist_sgi_base();
 319                rwp_wait = gic_redist_wait_for_rwp;
 320        } else {
 321                base = gic_data.dist_base;
 322                rwp_wait = gic_dist_wait_for_rwp;
 323        }
 324
 325        writel_relaxed(mask, base + offset + (index / 32) * 4);
 326        rwp_wait();
 327}
 328
 329static void gic_mask_irq(struct irq_data *d)
 330{
 331        gic_poke_irq(d, GICD_ICENABLER);
 332}
 333
 334static void gic_eoimode1_mask_irq(struct irq_data *d)
 335{
 336        gic_mask_irq(d);
 337        /*
 338         * When masking a forwarded interrupt, make sure it is
 339         * deactivated as well.
 340         *
 341         * This ensures that an interrupt that is getting
 342         * disabled/masked will not get "stuck", because there is
 343         * noone to deactivate it (guest is being terminated).
 344         */
 345        if (irqd_is_forwarded_to_vcpu(d))
 346                gic_poke_irq(d, GICD_ICACTIVER);
 347}
 348
 349static void gic_unmask_irq(struct irq_data *d)
 350{
 351        gic_poke_irq(d, GICD_ISENABLER);
 352}
 353
 354static inline bool gic_supports_nmi(void)
 355{
 356        return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
 357               static_branch_likely(&supports_pseudo_nmis);
 358}
 359
 360static int gic_irq_set_irqchip_state(struct irq_data *d,
 361                                     enum irqchip_irq_state which, bool val)
 362{
 363        u32 reg;
 364
 365        if (d->hwirq >= 8192) /* PPI/SPI only */
 366                return -EINVAL;
 367
 368        switch (which) {
 369        case IRQCHIP_STATE_PENDING:
 370                reg = val ? GICD_ISPENDR : GICD_ICPENDR;
 371                break;
 372
 373        case IRQCHIP_STATE_ACTIVE:
 374                reg = val ? GICD_ISACTIVER : GICD_ICACTIVER;
 375                break;
 376
 377        case IRQCHIP_STATE_MASKED:
 378                reg = val ? GICD_ICENABLER : GICD_ISENABLER;
 379                break;
 380
 381        default:
 382                return -EINVAL;
 383        }
 384
 385        gic_poke_irq(d, reg);
 386        return 0;
 387}
 388
 389static int gic_irq_get_irqchip_state(struct irq_data *d,
 390                                     enum irqchip_irq_state which, bool *val)
 391{
 392        if (d->hwirq >= 8192) /* PPI/SPI only */
 393                return -EINVAL;
 394
 395        switch (which) {
 396        case IRQCHIP_STATE_PENDING:
 397                *val = gic_peek_irq(d, GICD_ISPENDR);
 398                break;
 399
 400        case IRQCHIP_STATE_ACTIVE:
 401                *val = gic_peek_irq(d, GICD_ISACTIVER);
 402                break;
 403
 404        case IRQCHIP_STATE_MASKED:
 405                *val = !gic_peek_irq(d, GICD_ISENABLER);
 406                break;
 407
 408        default:
 409                return -EINVAL;
 410        }
 411
 412        return 0;
 413}
 414
 415static void gic_irq_set_prio(struct irq_data *d, u8 prio)
 416{
 417        void __iomem *base = gic_dist_base(d);
 418        u32 offset, index;
 419
 420        offset = convert_offset_index(d, GICD_IPRIORITYR, &index);
 421
 422        writeb_relaxed(prio, base + offset + index);
 423}
 424
 425static u32 gic_get_ppi_index(struct irq_data *d)
 426{
 427        switch (get_intid_range(d)) {
 428        case PPI_RANGE:
 429                return d->hwirq - 16;
 430        case EPPI_RANGE:
 431                return d->hwirq - EPPI_BASE_INTID + 16;
 432        default:
 433                unreachable();
 434        }
 435}
 436
 437static int gic_irq_nmi_setup(struct irq_data *d)
 438{
 439        struct irq_desc *desc = irq_to_desc(d->irq);
 440
 441        if (!gic_supports_nmi())
 442                return -EINVAL;
 443
 444        if (gic_peek_irq(d, GICD_ISENABLER)) {
 445                pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
 446                return -EINVAL;
 447        }
 448
 449        /*
 450         * A secondary irq_chip should be in charge of LPI request,
 451         * it should not be possible to get there
 452         */
 453        if (WARN_ON(gic_irq(d) >= 8192))
 454                return -EINVAL;
 455
 456        /* desc lock should already be held */
 457        if (gic_irq_in_rdist(d)) {
 458                u32 idx = gic_get_ppi_index(d);
 459
 460                /* Setting up PPI as NMI, only switch handler for first NMI */
 461                if (!refcount_inc_not_zero(&ppi_nmi_refs[idx])) {
 462                        refcount_set(&ppi_nmi_refs[idx], 1);
 463                        desc->handle_irq = handle_percpu_devid_fasteoi_nmi;
 464                }
 465        } else {
 466                desc->handle_irq = handle_fasteoi_nmi;
 467        }
 468
 469        gic_irq_set_prio(d, GICD_INT_NMI_PRI);
 470
 471        return 0;
 472}
 473
 474static void gic_irq_nmi_teardown(struct irq_data *d)
 475{
 476        struct irq_desc *desc = irq_to_desc(d->irq);
 477
 478        if (WARN_ON(!gic_supports_nmi()))
 479                return;
 480
 481        if (gic_peek_irq(d, GICD_ISENABLER)) {
 482                pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
 483                return;
 484        }
 485
 486        /*
 487         * A secondary irq_chip should be in charge of LPI request,
 488         * it should not be possible to get there
 489         */
 490        if (WARN_ON(gic_irq(d) >= 8192))
 491                return;
 492
 493        /* desc lock should already be held */
 494        if (gic_irq_in_rdist(d)) {
 495                u32 idx = gic_get_ppi_index(d);
 496
 497                /* Tearing down NMI, only switch handler for last NMI */
 498                if (refcount_dec_and_test(&ppi_nmi_refs[idx]))
 499                        desc->handle_irq = handle_percpu_devid_irq;
 500        } else {
 501                desc->handle_irq = handle_fasteoi_irq;
 502        }
 503
 504        gic_irq_set_prio(d, GICD_INT_DEF_PRI);
 505}
 506
 507static void gic_eoi_irq(struct irq_data *d)
 508{
 509        gic_write_eoir(gic_irq(d));
 510}
 511
 512static void gic_eoimode1_eoi_irq(struct irq_data *d)
 513{
 514        /*
 515         * No need to deactivate an LPI, or an interrupt that
 516         * is is getting forwarded to a vcpu.
 517         */
 518        if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d))
 519                return;
 520        gic_write_dir(gic_irq(d));
 521}
 522
 523static int gic_set_type(struct irq_data *d, unsigned int type)
 524{
 525        enum gic_intid_range range;
 526        unsigned int irq = gic_irq(d);
 527        void (*rwp_wait)(void);
 528        void __iomem *base;
 529        u32 offset, index;
 530        int ret;
 531
 532        /* Interrupt configuration for SGIs can't be changed */
 533        if (irq < 16)
 534                return -EINVAL;
 535
 536        range = get_intid_range(d);
 537
 538        /* SPIs have restrictions on the supported types */
 539        if ((range == SPI_RANGE || range == ESPI_RANGE) &&
 540            type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
 541                return -EINVAL;
 542
 543        if (gic_irq_in_rdist(d)) {
 544                base = gic_data_rdist_sgi_base();
 545                rwp_wait = gic_redist_wait_for_rwp;
 546        } else {
 547                base = gic_data.dist_base;
 548                rwp_wait = gic_dist_wait_for_rwp;
 549        }
 550
 551        offset = convert_offset_index(d, GICD_ICFGR, &index);
 552
 553        ret = gic_configure_irq(index, type, base + offset, rwp_wait);
 554        if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) {
 555                /* Misconfigured PPIs are usually not fatal */
 556                pr_warn("GIC: PPI INTID%d is secure or misconfigured\n", irq);
 557                ret = 0;
 558        }
 559
 560        return ret;
 561}
 562
 563static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
 564{
 565        if (vcpu)
 566                irqd_set_forwarded_to_vcpu(d);
 567        else
 568                irqd_clr_forwarded_to_vcpu(d);
 569        return 0;
 570}
 571
 572static u64 gic_mpidr_to_affinity(unsigned long mpidr)
 573{
 574        u64 aff;
 575
 576        aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
 577               MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
 578               MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8  |
 579               MPIDR_AFFINITY_LEVEL(mpidr, 0));
 580
 581        return aff;
 582}
 583
 584static void gic_deactivate_unhandled(u32 irqnr)
 585{
 586        if (static_branch_likely(&supports_deactivate_key)) {
 587                if (irqnr < 8192)
 588                        gic_write_dir(irqnr);
 589        } else {
 590                gic_write_eoir(irqnr);
 591        }
 592}
 593
 594static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
 595{
 596        bool irqs_enabled = interrupts_enabled(regs);
 597        int err;
 598
 599        if (irqs_enabled)
 600                nmi_enter();
 601
 602        if (static_branch_likely(&supports_deactivate_key))
 603                gic_write_eoir(irqnr);
 604        /*
 605         * Leave the PSR.I bit set to prevent other NMIs to be
 606         * received while handling this one.
 607         * PSR.I will be restored when we ERET to the
 608         * interrupted context.
 609         */
 610        err = handle_domain_nmi(gic_data.domain, irqnr, regs);
 611        if (err)
 612                gic_deactivate_unhandled(irqnr);
 613
 614        if (irqs_enabled)
 615                nmi_exit();
 616}
 617
 618static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
 619{
 620        u32 irqnr;
 621
 622        irqnr = gic_read_iar();
 623
 624        if (gic_supports_nmi() &&
 625            unlikely(gic_read_rpr() == GICD_INT_NMI_PRI)) {
 626                gic_handle_nmi(irqnr, regs);
 627                return;
 628        }
 629
 630        if (gic_prio_masking_enabled()) {
 631                gic_pmr_mask_irqs();
 632                gic_arch_enable_irqs();
 633        }
 634
 635        /* Check for special IDs first */
 636        if ((irqnr >= 1020 && irqnr <= 1023))
 637                return;
 638
 639        /* Treat anything but SGIs in a uniform way */
 640        if (likely(irqnr > 15)) {
 641                int err;
 642
 643                if (static_branch_likely(&supports_deactivate_key))
 644                        gic_write_eoir(irqnr);
 645                else
 646                        isb();
 647
 648                err = handle_domain_irq(gic_data.domain, irqnr, regs);
 649                if (err) {
 650                        WARN_ONCE(true, "Unexpected interrupt received!\n");
 651                        gic_deactivate_unhandled(irqnr);
 652                }
 653                return;
 654        }
 655        if (irqnr < 16) {
 656                gic_write_eoir(irqnr);
 657                if (static_branch_likely(&supports_deactivate_key))
 658                        gic_write_dir(irqnr);
 659#ifdef CONFIG_SMP
 660                /*
 661                 * Unlike GICv2, we don't need an smp_rmb() here.
 662                 * The control dependency from gic_read_iar to
 663                 * the ISB in gic_write_eoir is enough to ensure
 664                 * that any shared data read by handle_IPI will
 665                 * be read after the ACK.
 666                 */
 667                handle_IPI(irqnr, regs);
 668#else
 669                WARN_ONCE(true, "Unexpected SGI received!\n");
 670#endif
 671        }
 672}
 673
 674static u32 gic_get_pribits(void)
 675{
 676        u32 pribits;
 677
 678        pribits = gic_read_ctlr();
 679        pribits &= ICC_CTLR_EL1_PRI_BITS_MASK;
 680        pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT;
 681        pribits++;
 682
 683        return pribits;
 684}
 685
 686static bool gic_has_group0(void)
 687{
 688        u32 val;
 689        u32 old_pmr;
 690
 691        old_pmr = gic_read_pmr();
 692
 693        /*
 694         * Let's find out if Group0 is under control of EL3 or not by
 695         * setting the highest possible, non-zero priority in PMR.
 696         *
 697         * If SCR_EL3.FIQ is set, the priority gets shifted down in
 698         * order for the CPU interface to set bit 7, and keep the
 699         * actual priority in the non-secure range. In the process, it
 700         * looses the least significant bit and the actual priority
 701         * becomes 0x80. Reading it back returns 0, indicating that
 702         * we're don't have access to Group0.
 703         */
 704        gic_write_pmr(BIT(8 - gic_get_pribits()));
 705        val = gic_read_pmr();
 706
 707        gic_write_pmr(old_pmr);
 708
 709        return val != 0;
 710}
 711
 712static void __init gic_dist_init(void)
 713{
 714        unsigned int i;
 715        u64 affinity;
 716        void __iomem *base = gic_data.dist_base;
 717
 718        /* Disable the distributor */
 719        writel_relaxed(0, base + GICD_CTLR);
 720        gic_dist_wait_for_rwp();
 721
 722        /*
 723         * Configure SPIs as non-secure Group-1. This will only matter
 724         * if the GIC only has a single security state. This will not
 725         * do the right thing if the kernel is running in secure mode,
 726         * but that's not the intended use case anyway.
 727         */
 728        for (i = 32; i < GIC_LINE_NR; i += 32)
 729                writel_relaxed(~0, base + GICD_IGROUPR + i / 8);
 730
 731        /* Extended SPI range, not handled by the GICv2/GICv3 common code */
 732        for (i = 0; i < GIC_ESPI_NR; i += 32) {
 733                writel_relaxed(~0U, base + GICD_ICENABLERnE + i / 8);
 734                writel_relaxed(~0U, base + GICD_ICACTIVERnE + i / 8);
 735        }
 736
 737        for (i = 0; i < GIC_ESPI_NR; i += 32)
 738                writel_relaxed(~0U, base + GICD_IGROUPRnE + i / 8);
 739
 740        for (i = 0; i < GIC_ESPI_NR; i += 16)
 741                writel_relaxed(0, base + GICD_ICFGRnE + i / 4);
 742
 743        for (i = 0; i < GIC_ESPI_NR; i += 4)
 744                writel_relaxed(GICD_INT_DEF_PRI_X4, base + GICD_IPRIORITYRnE + i);
 745
 746        /* Now do the common stuff, and wait for the distributor to drain */
 747        gic_dist_config(base, GIC_LINE_NR, gic_dist_wait_for_rwp);
 748
 749        /* Enable distributor with ARE, Group1 */
 750        writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1,
 751                       base + GICD_CTLR);
 752
 753        /*
 754         * Set all global interrupts to the boot CPU only. ARE must be
 755         * enabled.
 756         */
 757        affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
 758        for (i = 32; i < GIC_LINE_NR; i++)
 759                gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
 760
 761        for (i = 0; i < GIC_ESPI_NR; i++)
 762                gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8);
 763}
 764
 765static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))
 766{
 767        int ret = -ENODEV;
 768        int i;
 769
 770        for (i = 0; i < gic_data.nr_redist_regions; i++) {
 771                void __iomem *ptr = gic_data.redist_regions[i].redist_base;
 772                u64 typer;
 773                u32 reg;
 774
 775                reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
 776                if (reg != GIC_PIDR2_ARCH_GICv3 &&
 777                    reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */
 778                        pr_warn("No redistributor present @%p\n", ptr);
 779                        break;
 780                }
 781
 782                do {
 783                        typer = gic_read_typer(ptr + GICR_TYPER);
 784                        ret = fn(gic_data.redist_regions + i, ptr);
 785                        if (!ret)
 786                                return 0;
 787
 788                        if (gic_data.redist_regions[i].single_redist)
 789                                break;
 790
 791                        if (gic_data.redist_stride) {
 792                                ptr += gic_data.redist_stride;
 793                        } else {
 794                                ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
 795                                if (typer & GICR_TYPER_VLPIS)
 796                                        ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */
 797                        }
 798                } while (!(typer & GICR_TYPER_LAST));
 799        }
 800
 801        return ret ? -ENODEV : 0;
 802}
 803
 804static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
 805{
 806        unsigned long mpidr = cpu_logical_map(smp_processor_id());
 807        u64 typer;
 808        u32 aff;
 809
 810        /*
 811         * Convert affinity to a 32bit value that can be matched to
 812         * GICR_TYPER bits [63:32].
 813         */
 814        aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
 815               MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
 816               MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
 817               MPIDR_AFFINITY_LEVEL(mpidr, 0));
 818
 819        typer = gic_read_typer(ptr + GICR_TYPER);
 820        if ((typer >> 32) == aff) {
 821                u64 offset = ptr - region->redist_base;
 822                gic_data_rdist_rd_base() = ptr;
 823                gic_data_rdist()->phys_base = region->phys_base + offset;
 824
 825                pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
 826                        smp_processor_id(), mpidr,
 827                        (int)(region - gic_data.redist_regions),
 828                        &gic_data_rdist()->phys_base);
 829                return 0;
 830        }
 831
 832        /* Try next one */
 833        return 1;
 834}
 835
 836static int gic_populate_rdist(void)
 837{
 838        if (gic_iterate_rdists(__gic_populate_rdist) == 0)
 839                return 0;
 840
 841        /* We couldn't even deal with ourselves... */
 842        WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n",
 843             smp_processor_id(),
 844             (unsigned long)cpu_logical_map(smp_processor_id()));
 845        return -ENODEV;
 846}
 847
 848static int __gic_update_rdist_properties(struct redist_region *region,
 849                                         void __iomem *ptr)
 850{
 851        u64 typer = gic_read_typer(ptr + GICR_TYPER);
 852        gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS);
 853        gic_data.rdists.has_direct_lpi &= !!(typer & GICR_TYPER_DirectLPIS);
 854        gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr);
 855
 856        return 1;
 857}
 858
 859static void gic_update_rdist_properties(void)
 860{
 861        gic_data.ppi_nr = UINT_MAX;
 862        gic_iterate_rdists(__gic_update_rdist_properties);
 863        if (WARN_ON(gic_data.ppi_nr == UINT_MAX))
 864                gic_data.ppi_nr = 0;
 865        pr_info("%d PPIs implemented\n", gic_data.ppi_nr);
 866        pr_info("%sVLPI support, %sdirect LPI support\n",
 867                !gic_data.rdists.has_vlpis ? "no " : "",
 868                !gic_data.rdists.has_direct_lpi ? "no " : "");
 869}
 870
 871/* Check whether it's single security state view */
 872static inline bool gic_dist_security_disabled(void)
 873{
 874        return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS;
 875}
 876
 877static void gic_cpu_sys_reg_init(void)
 878{
 879        int i, cpu = smp_processor_id();
 880        u64 mpidr = cpu_logical_map(cpu);
 881        u64 need_rss = MPIDR_RS(mpidr);
 882        bool group0;
 883        u32 pribits;
 884
 885        /*
 886         * Need to check that the SRE bit has actually been set. If
 887         * not, it means that SRE is disabled at EL2. We're going to
 888         * die painfully, and there is nothing we can do about it.
 889         *
 890         * Kindly inform the luser.
 891         */
 892        if (!gic_enable_sre())
 893                pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
 894
 895        pribits = gic_get_pribits();
 896
 897        group0 = gic_has_group0();
 898
 899        /* Set priority mask register */
 900        if (!gic_prio_masking_enabled()) {
 901                write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1);
 902        } else {
 903                /*
 904                 * Mismatch configuration with boot CPU, the system is likely
 905                 * to die as interrupt masking will not work properly on all
 906                 * CPUs
 907                 */
 908                WARN_ON(gic_supports_nmi() && group0 &&
 909                        !gic_dist_security_disabled());
 910        }
 911
 912        /*
 913         * Some firmwares hand over to the kernel with the BPR changed from
 914         * its reset value (and with a value large enough to prevent
 915         * any pre-emptive interrupts from working at all). Writing a zero
 916         * to BPR restores is reset value.
 917         */
 918        gic_write_bpr1(0);
 919
 920        if (static_branch_likely(&supports_deactivate_key)) {
 921                /* EOI drops priority only (mode 1) */
 922                gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop);
 923        } else {
 924                /* EOI deactivates interrupt too (mode 0) */
 925                gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
 926        }
 927
 928        /* Always whack Group0 before Group1 */
 929        if (group0) {
 930                switch(pribits) {
 931                case 8:
 932                case 7:
 933                        write_gicreg(0, ICC_AP0R3_EL1);
 934                        write_gicreg(0, ICC_AP0R2_EL1);
 935                /* Fall through */
 936                case 6:
 937                        write_gicreg(0, ICC_AP0R1_EL1);
 938                /* Fall through */
 939                case 5:
 940                case 4:
 941                        write_gicreg(0, ICC_AP0R0_EL1);
 942                }
 943
 944                isb();
 945        }
 946
 947        switch(pribits) {
 948        case 8:
 949        case 7:
 950                write_gicreg(0, ICC_AP1R3_EL1);
 951                write_gicreg(0, ICC_AP1R2_EL1);
 952                /* Fall through */
 953        case 6:
 954                write_gicreg(0, ICC_AP1R1_EL1);
 955                /* Fall through */
 956        case 5:
 957        case 4:
 958                write_gicreg(0, ICC_AP1R0_EL1);
 959        }
 960
 961        isb();
 962
 963        /* ... and let's hit the road... */
 964        gic_write_grpen1(1);
 965
 966        /* Keep the RSS capability status in per_cpu variable */
 967        per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS);
 968
 969        /* Check all the CPUs have capable of sending SGIs to other CPUs */
 970        for_each_online_cpu(i) {
 971                bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu);
 972
 973                need_rss |= MPIDR_RS(cpu_logical_map(i));
 974                if (need_rss && (!have_rss))
 975                        pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n",
 976                                cpu, (unsigned long)mpidr,
 977                                i, (unsigned long)cpu_logical_map(i));
 978        }
 979
 980        /**
 981         * GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0,
 982         * writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED
 983         * UNPREDICTABLE choice of :
 984         *   - The write is ignored.
 985         *   - The RS field is treated as 0.
 986         */
 987        if (need_rss && (!gic_data.has_rss))
 988                pr_crit_once("RSS is required but GICD doesn't support it\n");
 989}
 990
 991static bool gicv3_nolpi;
 992
 993static int __init gicv3_nolpi_cfg(char *buf)
 994{
 995        return strtobool(buf, &gicv3_nolpi);
 996}
 997early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg);
 998
 999static int gic_dist_supports_lpis(void)
1000{
1001        return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) &&
1002                !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) &&
1003                !gicv3_nolpi);
1004}
1005
1006static void gic_cpu_init(void)
1007{
1008        void __iomem *rbase;
1009        int i;
1010
1011        /* Register ourselves with the rest of the world */
1012        if (gic_populate_rdist())
1013                return;
1014
1015        gic_enable_redist(true);
1016
1017        WARN((gic_data.ppi_nr > 16 || GIC_ESPI_NR != 0) &&
1018             !(gic_read_ctlr() & ICC_CTLR_EL1_ExtRange),
1019             "Distributor has extended ranges, but CPU%d doesn't\n",
1020             smp_processor_id());
1021
1022        rbase = gic_data_rdist_sgi_base();
1023
1024        /* Configure SGIs/PPIs as non-secure Group-1 */
1025        for (i = 0; i < gic_data.ppi_nr + 16; i += 32)
1026                writel_relaxed(~0, rbase + GICR_IGROUPR0 + i / 8);
1027
1028        gic_cpu_config(rbase, gic_data.ppi_nr + 16, gic_redist_wait_for_rwp);
1029
1030        /* initialise system registers */
1031        gic_cpu_sys_reg_init();
1032}
1033
1034#ifdef CONFIG_SMP
1035
1036#define MPIDR_TO_SGI_RS(mpidr)  (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT)
1037#define MPIDR_TO_SGI_CLUSTER_ID(mpidr)  ((mpidr) & ~0xFUL)
1038
1039static int gic_starting_cpu(unsigned int cpu)
1040{
1041        gic_cpu_init();
1042
1043        if (gic_dist_supports_lpis())
1044                its_cpu_init();
1045
1046        return 0;
1047}
1048
1049static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
1050                                   unsigned long cluster_id)
1051{
1052        int next_cpu, cpu = *base_cpu;
1053        unsigned long mpidr = cpu_logical_map(cpu);
1054        u16 tlist = 0;
1055
1056        while (cpu < nr_cpu_ids) {
1057                tlist |= 1 << (mpidr & 0xf);
1058
1059                next_cpu = cpumask_next(cpu, mask);
1060                if (next_cpu >= nr_cpu_ids)
1061                        goto out;
1062                cpu = next_cpu;
1063
1064                mpidr = cpu_logical_map(cpu);
1065
1066                if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) {
1067                        cpu--;
1068                        goto out;
1069                }
1070        }
1071out:
1072        *base_cpu = cpu;
1073        return tlist;
1074}
1075
1076#define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \
1077        (MPIDR_AFFINITY_LEVEL(cluster_id, level) \
1078                << ICC_SGI1R_AFFINITY_## level ##_SHIFT)
1079
1080static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
1081{
1082        u64 val;
1083
1084        val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3)     |
1085               MPIDR_TO_SGI_AFFINITY(cluster_id, 2)     |
1086               irq << ICC_SGI1R_SGI_ID_SHIFT            |
1087               MPIDR_TO_SGI_AFFINITY(cluster_id, 1)     |
1088               MPIDR_TO_SGI_RS(cluster_id)              |
1089               tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
1090
1091        pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
1092        gic_write_sgi1r(val);
1093}
1094
1095static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
1096{
1097        int cpu;
1098
1099        if (WARN_ON(irq >= 16))
1100                return;
1101
1102        /*
1103         * Ensure that stores to Normal memory are visible to the
1104         * other CPUs before issuing the IPI.
1105         */
1106        wmb();
1107
1108        for_each_cpu(cpu, mask) {
1109                u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu));
1110                u16 tlist;
1111
1112                tlist = gic_compute_target_list(&cpu, mask, cluster_id);
1113                gic_send_sgi(cluster_id, tlist, irq);
1114        }
1115
1116        /* Force the above writes to ICC_SGI1R_EL1 to be executed */
1117        isb();
1118}
1119
1120static void gic_smp_init(void)
1121{
1122        set_smp_cross_call(gic_raise_softirq);
1123        cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
1124                                  "irqchip/arm/gicv3:starting",
1125                                  gic_starting_cpu, NULL);
1126}
1127
1128static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1129                            bool force)
1130{
1131        unsigned int cpu;
1132        u32 offset, index;
1133        void __iomem *reg;
1134        int enabled;
1135        u64 val;
1136
1137        if (force)
1138                cpu = cpumask_first(mask_val);
1139        else
1140                cpu = cpumask_any_and(mask_val, cpu_online_mask);
1141
1142        if (cpu >= nr_cpu_ids)
1143                return -EINVAL;
1144
1145        if (gic_irq_in_rdist(d))
1146                return -EINVAL;
1147
1148        /* If interrupt was enabled, disable it first */
1149        enabled = gic_peek_irq(d, GICD_ISENABLER);
1150        if (enabled)
1151                gic_mask_irq(d);
1152
1153        offset = convert_offset_index(d, GICD_IROUTER, &index);
1154        reg = gic_dist_base(d) + offset + (index * 8);
1155        val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
1156
1157        gic_write_irouter(val, reg);
1158
1159        /*
1160         * If the interrupt was enabled, enabled it again. Otherwise,
1161         * just wait for the distributor to have digested our changes.
1162         */
1163        if (enabled)
1164                gic_unmask_irq(d);
1165        else
1166                gic_dist_wait_for_rwp();
1167
1168        irq_data_update_effective_affinity(d, cpumask_of(cpu));
1169
1170        return IRQ_SET_MASK_OK_DONE;
1171}
1172#else
1173#define gic_set_affinity        NULL
1174#define gic_smp_init()          do { } while(0)
1175#endif
1176
1177#ifdef CONFIG_CPU_PM
1178static int gic_cpu_pm_notifier(struct notifier_block *self,
1179                               unsigned long cmd, void *v)
1180{
1181        if (cmd == CPU_PM_EXIT) {
1182                if (gic_dist_security_disabled())
1183                        gic_enable_redist(true);
1184                gic_cpu_sys_reg_init();
1185        } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) {
1186                gic_write_grpen1(0);
1187                gic_enable_redist(false);
1188        }
1189        return NOTIFY_OK;
1190}
1191
1192static struct notifier_block gic_cpu_pm_notifier_block = {
1193        .notifier_call = gic_cpu_pm_notifier,
1194};
1195
1196static void gic_cpu_pm_init(void)
1197{
1198        cpu_pm_register_notifier(&gic_cpu_pm_notifier_block);
1199}
1200
1201#else
1202static inline void gic_cpu_pm_init(void) { }
1203#endif /* CONFIG_CPU_PM */
1204
1205static struct irq_chip gic_chip = {
1206        .name                   = "GICv3",
1207        .irq_mask               = gic_mask_irq,
1208        .irq_unmask             = gic_unmask_irq,
1209        .irq_eoi                = gic_eoi_irq,
1210        .irq_set_type           = gic_set_type,
1211        .irq_set_affinity       = gic_set_affinity,
1212        .irq_get_irqchip_state  = gic_irq_get_irqchip_state,
1213        .irq_set_irqchip_state  = gic_irq_set_irqchip_state,
1214        .irq_nmi_setup          = gic_irq_nmi_setup,
1215        .irq_nmi_teardown       = gic_irq_nmi_teardown,
1216        .flags                  = IRQCHIP_SET_TYPE_MASKED |
1217                                  IRQCHIP_SKIP_SET_WAKE |
1218                                  IRQCHIP_MASK_ON_SUSPEND,
1219};
1220
1221static struct irq_chip gic_eoimode1_chip = {
1222        .name                   = "GICv3",
1223        .irq_mask               = gic_eoimode1_mask_irq,
1224        .irq_unmask             = gic_unmask_irq,
1225        .irq_eoi                = gic_eoimode1_eoi_irq,
1226        .irq_set_type           = gic_set_type,
1227        .irq_set_affinity       = gic_set_affinity,
1228        .irq_get_irqchip_state  = gic_irq_get_irqchip_state,
1229        .irq_set_irqchip_state  = gic_irq_set_irqchip_state,
1230        .irq_set_vcpu_affinity  = gic_irq_set_vcpu_affinity,
1231        .irq_nmi_setup          = gic_irq_nmi_setup,
1232        .irq_nmi_teardown       = gic_irq_nmi_teardown,
1233        .flags                  = IRQCHIP_SET_TYPE_MASKED |
1234                                  IRQCHIP_SKIP_SET_WAKE |
1235                                  IRQCHIP_MASK_ON_SUSPEND,
1236};
1237
1238static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
1239                              irq_hw_number_t hw)
1240{
1241        struct irq_chip *chip = &gic_chip;
1242
1243        if (static_branch_likely(&supports_deactivate_key))
1244                chip = &gic_eoimode1_chip;
1245
1246        switch (__get_intid_range(hw)) {
1247        case PPI_RANGE:
1248        case EPPI_RANGE:
1249                irq_set_percpu_devid(irq);
1250                irq_domain_set_info(d, irq, hw, chip, d->host_data,
1251                                    handle_percpu_devid_irq, NULL, NULL);
1252                irq_set_status_flags(irq, IRQ_NOAUTOEN);
1253                break;
1254
1255        case SPI_RANGE:
1256        case ESPI_RANGE:
1257                irq_domain_set_info(d, irq, hw, chip, d->host_data,
1258                                    handle_fasteoi_irq, NULL, NULL);
1259                irq_set_probe(irq);
1260                irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
1261                break;
1262
1263        case LPI_RANGE:
1264                if (!gic_dist_supports_lpis())
1265                        return -EPERM;
1266                irq_domain_set_info(d, irq, hw, chip, d->host_data,
1267                                    handle_fasteoi_irq, NULL, NULL);
1268                break;
1269
1270        default:
1271                return -EPERM;
1272        }
1273
1274        return 0;
1275}
1276
1277#define GIC_IRQ_TYPE_PARTITION  (GIC_IRQ_TYPE_LPI + 1)
1278
1279static int gic_irq_domain_translate(struct irq_domain *d,
1280                                    struct irq_fwspec *fwspec,
1281                                    unsigned long *hwirq,
1282                                    unsigned int *type)
1283{
1284        if (is_of_node(fwspec->fwnode)) {
1285                if (fwspec->param_count < 3)
1286                        return -EINVAL;
1287
1288                switch (fwspec->param[0]) {
1289                case 0:                 /* SPI */
1290                        *hwirq = fwspec->param[1] + 32;
1291                        break;
1292                case 1:                 /* PPI */
1293                        *hwirq = fwspec->param[1] + 16;
1294                        break;
1295                case 2:                 /* ESPI */
1296                        *hwirq = fwspec->param[1] + ESPI_BASE_INTID;
1297                        break;
1298                case 3:                 /* EPPI */
1299                        *hwirq = fwspec->param[1] + EPPI_BASE_INTID;
1300                        break;
1301                case GIC_IRQ_TYPE_LPI:  /* LPI */
1302                        *hwirq = fwspec->param[1];
1303                        break;
1304                case GIC_IRQ_TYPE_PARTITION:
1305                        *hwirq = fwspec->param[1];
1306                        if (fwspec->param[1] >= 16)
1307                                *hwirq += EPPI_BASE_INTID - 16;
1308                        else
1309                                *hwirq += 16;
1310                        break;
1311                default:
1312                        return -EINVAL;
1313                }
1314
1315                *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
1316
1317                /*
1318                 * Make it clear that broken DTs are... broken.
1319                 * Partitionned PPIs are an unfortunate exception.
1320                 */
1321                WARN_ON(*type == IRQ_TYPE_NONE &&
1322                        fwspec->param[0] != GIC_IRQ_TYPE_PARTITION);
1323                return 0;
1324        }
1325
1326        if (is_fwnode_irqchip(fwspec->fwnode)) {
1327                if(fwspec->param_count != 2)
1328                        return -EINVAL;
1329
1330                *hwirq = fwspec->param[0];
1331                *type = fwspec->param[1];
1332
1333                WARN_ON(*type == IRQ_TYPE_NONE);
1334                return 0;
1335        }
1336
1337        return -EINVAL;
1338}
1339
1340static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1341                                unsigned int nr_irqs, void *arg)
1342{
1343        int i, ret;
1344        irq_hw_number_t hwirq;
1345        unsigned int type = IRQ_TYPE_NONE;
1346        struct irq_fwspec *fwspec = arg;
1347
1348        ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type);
1349        if (ret)
1350                return ret;
1351
1352        for (i = 0; i < nr_irqs; i++) {
1353                ret = gic_irq_domain_map(domain, virq + i, hwirq + i);
1354                if (ret)
1355                        return ret;
1356        }
1357
1358        return 0;
1359}
1360
1361static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
1362                                unsigned int nr_irqs)
1363{
1364        int i;
1365
1366        for (i = 0; i < nr_irqs; i++) {
1367                struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
1368                irq_set_handler(virq + i, NULL);
1369                irq_domain_reset_irq_data(d);
1370        }
1371}
1372
1373static int gic_irq_domain_select(struct irq_domain *d,
1374                                 struct irq_fwspec *fwspec,
1375                                 enum irq_domain_bus_token bus_token)
1376{
1377        /* Not for us */
1378        if (fwspec->fwnode != d->fwnode)
1379                return 0;
1380
1381        /* If this is not DT, then we have a single domain */
1382        if (!is_of_node(fwspec->fwnode))
1383                return 1;
1384
1385        /*
1386         * If this is a PPI and we have a 4th (non-null) parameter,
1387         * then we need to match the partition domain.
1388         */
1389        if (fwspec->param_count >= 4 &&
1390            fwspec->param[0] == 1 && fwspec->param[3] != 0 &&
1391            gic_data.ppi_descs)
1392                return d == partition_get_domain(gic_data.ppi_descs[fwspec->param[1]]);
1393
1394        return d == gic_data.domain;
1395}
1396
1397static const struct irq_domain_ops gic_irq_domain_ops = {
1398        .translate = gic_irq_domain_translate,
1399        .alloc = gic_irq_domain_alloc,
1400        .free = gic_irq_domain_free,
1401        .select = gic_irq_domain_select,
1402};
1403
1404static int partition_domain_translate(struct irq_domain *d,
1405                                      struct irq_fwspec *fwspec,
1406                                      unsigned long *hwirq,
1407                                      unsigned int *type)
1408{
1409        struct device_node *np;
1410        int ret;
1411
1412        if (!gic_data.ppi_descs)
1413                return -ENOMEM;
1414
1415        np = of_find_node_by_phandle(fwspec->param[3]);
1416        if (WARN_ON(!np))
1417                return -EINVAL;
1418
1419        ret = partition_translate_id(gic_data.ppi_descs[fwspec->param[1]],
1420                                     of_node_to_fwnode(np));
1421        if (ret < 0)
1422                return ret;
1423
1424        *hwirq = ret;
1425        *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
1426
1427        return 0;
1428}
1429
1430static const struct irq_domain_ops partition_domain_ops = {
1431        .translate = partition_domain_translate,
1432        .select = gic_irq_domain_select,
1433};
1434
1435static bool gic_enable_quirk_msm8996(void *data)
1436{
1437        struct gic_chip_data *d = data;
1438
1439        d->flags |= FLAGS_WORKAROUND_GICR_WAKER_MSM8996;
1440
1441        return true;
1442}
1443
1444static bool gic_enable_quirk_hip06_07(void *data)
1445{
1446        struct gic_chip_data *d = data;
1447
1448        /*
1449         * HIP06 GICD_IIDR clashes with GIC-600 product number (despite
1450         * not being an actual ARM implementation). The saving grace is
1451         * that GIC-600 doesn't have ESPI, so nothing to do in that case.
1452         * HIP07 doesn't even have a proper IIDR, and still pretends to
1453         * have ESPI. In both cases, put them right.
1454         */
1455        if (d->rdists.gicd_typer & GICD_TYPER_ESPI) {
1456                /* Zero both ESPI and the RES0 field next to it... */
1457                d->rdists.gicd_typer &= ~GENMASK(9, 8);
1458                return true;
1459        }
1460
1461        return false;
1462}
1463
1464static const struct gic_quirk gic_quirks[] = {
1465        {
1466                .desc   = "GICv3: Qualcomm MSM8996 broken firmware",
1467                .compatible = "qcom,msm8996-gic-v3",
1468                .init   = gic_enable_quirk_msm8996,
1469        },
1470        {
1471                .desc   = "GICv3: HIP06 erratum 161010803",
1472                .iidr   = 0x0204043b,
1473                .mask   = 0xffffffff,
1474                .init   = gic_enable_quirk_hip06_07,
1475        },
1476        {
1477                .desc   = "GICv3: HIP07 erratum 161010803",
1478                .iidr   = 0x00000000,
1479                .mask   = 0xffffffff,
1480                .init   = gic_enable_quirk_hip06_07,
1481        },
1482        {
1483        }
1484};
1485
1486static void gic_enable_nmi_support(void)
1487{
1488        int i;
1489
1490        if (!gic_prio_masking_enabled())
1491                return;
1492
1493        if (gic_has_group0() && !gic_dist_security_disabled()) {
1494                pr_warn("SCR_EL3.FIQ is cleared, cannot enable use of pseudo-NMIs\n");
1495                return;
1496        }
1497
1498        ppi_nmi_refs = kcalloc(gic_data.ppi_nr, sizeof(*ppi_nmi_refs), GFP_KERNEL);
1499        if (!ppi_nmi_refs)
1500                return;
1501
1502        for (i = 0; i < gic_data.ppi_nr; i++)
1503                refcount_set(&ppi_nmi_refs[i], 0);
1504
1505        static_branch_enable(&supports_pseudo_nmis);
1506
1507        if (static_branch_likely(&supports_deactivate_key))
1508                gic_eoimode1_chip.flags |= IRQCHIP_SUPPORTS_NMI;
1509        else
1510                gic_chip.flags |= IRQCHIP_SUPPORTS_NMI;
1511}
1512
1513static int __init gic_init_bases(void __iomem *dist_base,
1514                                 struct redist_region *rdist_regs,
1515                                 u32 nr_redist_regions,
1516                                 u64 redist_stride,
1517                                 struct fwnode_handle *handle)
1518{
1519        u32 typer;
1520        int err;
1521
1522        if (!is_hyp_mode_available())
1523                static_branch_disable(&supports_deactivate_key);
1524
1525        if (static_branch_likely(&supports_deactivate_key))
1526                pr_info("GIC: Using split EOI/Deactivate mode\n");
1527
1528        gic_data.fwnode = handle;
1529        gic_data.dist_base = dist_base;
1530        gic_data.redist_regions = rdist_regs;
1531        gic_data.nr_redist_regions = nr_redist_regions;
1532        gic_data.redist_stride = redist_stride;
1533
1534        /*
1535         * Find out how many interrupts are supported.
1536         */
1537        typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
1538        gic_data.rdists.gicd_typer = typer;
1539
1540        gic_enable_quirks(readl_relaxed(gic_data.dist_base + GICD_IIDR),
1541                          gic_quirks, &gic_data);
1542
1543        pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32);
1544        pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR);
1545        gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
1546                                                 &gic_data);
1547        irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
1548        gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
1549        gic_data.rdists.has_vlpis = true;
1550        gic_data.rdists.has_direct_lpi = true;
1551
1552        if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
1553                err = -ENOMEM;
1554                goto out_free;
1555        }
1556
1557        gic_data.has_rss = !!(typer & GICD_TYPER_RSS);
1558        pr_info("Distributor has %sRange Selector support\n",
1559                gic_data.has_rss ? "" : "no ");
1560
1561        if (typer & GICD_TYPER_MBIS) {
1562                err = mbi_init(handle, gic_data.domain);
1563                if (err)
1564                        pr_err("Failed to initialize MBIs\n");
1565        }
1566
1567        set_handle_irq(gic_handle_irq);
1568
1569        gic_update_rdist_properties();
1570
1571        gic_smp_init();
1572        gic_dist_init();
1573        gic_cpu_init();
1574        gic_cpu_pm_init();
1575
1576        if (gic_dist_supports_lpis()) {
1577                its_init(handle, &gic_data.rdists, gic_data.domain);
1578                its_cpu_init();
1579        } else {
1580                if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
1581                        gicv2m_init(handle, gic_data.domain);
1582        }
1583
1584        gic_enable_nmi_support();
1585
1586        return 0;
1587
1588out_free:
1589        if (gic_data.domain)
1590                irq_domain_remove(gic_data.domain);
1591        free_percpu(gic_data.rdists.rdist);
1592        return err;
1593}
1594
1595static int __init gic_validate_dist_version(void __iomem *dist_base)
1596{
1597        u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
1598
1599        if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4)
1600                return -ENODEV;
1601
1602        return 0;
1603}
1604
1605/* Create all possible partitions at boot time */
1606static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
1607{
1608        struct device_node *parts_node, *child_part;
1609        int part_idx = 0, i;
1610        int nr_parts;
1611        struct partition_affinity *parts;
1612
1613        parts_node = of_get_child_by_name(gic_node, "ppi-partitions");
1614        if (!parts_node)
1615                return;
1616
1617        gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL);
1618        if (!gic_data.ppi_descs)
1619                return;
1620
1621        nr_parts = of_get_child_count(parts_node);
1622
1623        if (!nr_parts)
1624                goto out_put_node;
1625
1626        parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL);
1627        if (WARN_ON(!parts))
1628                goto out_put_node;
1629
1630        for_each_child_of_node(parts_node, child_part) {
1631                struct partition_affinity *part;
1632                int n;
1633
1634                part = &parts[part_idx];
1635
1636                part->partition_id = of_node_to_fwnode(child_part);
1637
1638                pr_info("GIC: PPI partition %pOFn[%d] { ",
1639                        child_part, part_idx);
1640
1641                n = of_property_count_elems_of_size(child_part, "affinity",
1642                                                    sizeof(u32));
1643                WARN_ON(n <= 0);
1644
1645                for (i = 0; i < n; i++) {
1646                        int err, cpu;
1647                        u32 cpu_phandle;
1648                        struct device_node *cpu_node;
1649
1650                        err = of_property_read_u32_index(child_part, "affinity",
1651                                                         i, &cpu_phandle);
1652                        if (WARN_ON(err))
1653                                continue;
1654
1655                        cpu_node = of_find_node_by_phandle(cpu_phandle);
1656                        if (WARN_ON(!cpu_node))
1657                                continue;
1658
1659                        cpu = of_cpu_node_to_id(cpu_node);
1660                        if (WARN_ON(cpu < 0))
1661                                continue;
1662
1663                        pr_cont("%pOF[%d] ", cpu_node, cpu);
1664
1665                        cpumask_set_cpu(cpu, &part->mask);
1666                }
1667
1668                pr_cont("}\n");
1669                part_idx++;
1670        }
1671
1672        for (i = 0; i < gic_data.ppi_nr; i++) {
1673                unsigned int irq;
1674                struct partition_desc *desc;
1675                struct irq_fwspec ppi_fwspec = {
1676                        .fwnode         = gic_data.fwnode,
1677                        .param_count    = 3,
1678                        .param          = {
1679                                [0]     = GIC_IRQ_TYPE_PARTITION,
1680                                [1]     = i,
1681                                [2]     = IRQ_TYPE_NONE,
1682                        },
1683                };
1684
1685                irq = irq_create_fwspec_mapping(&ppi_fwspec);
1686                if (WARN_ON(!irq))
1687                        continue;
1688                desc = partition_create_desc(gic_data.fwnode, parts, nr_parts,
1689                                             irq, &partition_domain_ops);
1690                if (WARN_ON(!desc))
1691                        continue;
1692
1693                gic_data.ppi_descs[i] = desc;
1694        }
1695
1696out_put_node:
1697        of_node_put(parts_node);
1698}
1699
1700static void __init gic_of_setup_kvm_info(struct device_node *node)
1701{
1702        int ret;
1703        struct resource r;
1704        u32 gicv_idx;
1705
1706        gic_v3_kvm_info.type = GIC_V3;
1707
1708        gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
1709        if (!gic_v3_kvm_info.maint_irq)
1710                return;
1711
1712        if (of_property_read_u32(node, "#redistributor-regions",
1713                                 &gicv_idx))
1714                gicv_idx = 1;
1715
1716        gicv_idx += 3;  /* Also skip GICD, GICC, GICH */
1717        ret = of_address_to_resource(node, gicv_idx, &r);
1718        if (!ret)
1719                gic_v3_kvm_info.vcpu = r;
1720
1721        gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
1722        gic_set_kvm_info(&gic_v3_kvm_info);
1723}
1724
1725static int __init gic_of_init(struct device_node *node, struct device_node *parent)
1726{
1727        void __iomem *dist_base;
1728        struct redist_region *rdist_regs;
1729        u64 redist_stride;
1730        u32 nr_redist_regions;
1731        int err, i;
1732
1733        dist_base = of_iomap(node, 0);
1734        if (!dist_base) {
1735                pr_err("%pOF: unable to map gic dist registers\n", node);
1736                return -ENXIO;
1737        }
1738
1739        err = gic_validate_dist_version(dist_base);
1740        if (err) {
1741                pr_err("%pOF: no distributor detected, giving up\n", node);
1742                goto out_unmap_dist;
1743        }
1744
1745        if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions))
1746                nr_redist_regions = 1;
1747
1748        rdist_regs = kcalloc(nr_redist_regions, sizeof(*rdist_regs),
1749                             GFP_KERNEL);
1750        if (!rdist_regs) {
1751                err = -ENOMEM;
1752                goto out_unmap_dist;
1753        }
1754
1755        for (i = 0; i < nr_redist_regions; i++) {
1756                struct resource res;
1757                int ret;
1758
1759                ret = of_address_to_resource(node, 1 + i, &res);
1760                rdist_regs[i].redist_base = of_iomap(node, 1 + i);
1761                if (ret || !rdist_regs[i].redist_base) {
1762                        pr_err("%pOF: couldn't map region %d\n", node, i);
1763                        err = -ENODEV;
1764                        goto out_unmap_rdist;
1765                }
1766                rdist_regs[i].phys_base = res.start;
1767        }
1768
1769        if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
1770                redist_stride = 0;
1771
1772        gic_enable_of_quirks(node, gic_quirks, &gic_data);
1773
1774        err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions,
1775                             redist_stride, &node->fwnode);
1776        if (err)
1777                goto out_unmap_rdist;
1778
1779        gic_populate_ppi_partitions(node);
1780
1781        if (static_branch_likely(&supports_deactivate_key))
1782                gic_of_setup_kvm_info(node);
1783        return 0;
1784
1785out_unmap_rdist:
1786        for (i = 0; i < nr_redist_regions; i++)
1787                if (rdist_regs[i].redist_base)
1788                        iounmap(rdist_regs[i].redist_base);
1789        kfree(rdist_regs);
1790out_unmap_dist:
1791        iounmap(dist_base);
1792        return err;
1793}
1794
1795IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
1796
1797#ifdef CONFIG_ACPI
1798static struct
1799{
1800        void __iomem *dist_base;
1801        struct redist_region *redist_regs;
1802        u32 nr_redist_regions;
1803        bool single_redist;
1804        u32 maint_irq;
1805        int maint_irq_mode;
1806        phys_addr_t vcpu_base;
1807} acpi_data __initdata;
1808
1809static void __init
1810gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base)
1811{
1812        static int count = 0;
1813
1814        acpi_data.redist_regs[count].phys_base = phys_base;
1815        acpi_data.redist_regs[count].redist_base = redist_base;
1816        acpi_data.redist_regs[count].single_redist = acpi_data.single_redist;
1817        count++;
1818}
1819
1820static int __init
1821gic_acpi_parse_madt_redist(union acpi_subtable_headers *header,
1822                           const unsigned long end)
1823{
1824        struct acpi_madt_generic_redistributor *redist =
1825                        (struct acpi_madt_generic_redistributor *)header;
1826        void __iomem *redist_base;
1827
1828        redist_base = ioremap(redist->base_address, redist->length);
1829        if (!redist_base) {
1830                pr_err("Couldn't map GICR region @%llx\n", redist->base_address);
1831                return -ENOMEM;
1832        }
1833
1834        gic_acpi_register_redist(redist->base_address, redist_base);
1835        return 0;
1836}
1837
1838static int __init
1839gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header,
1840                         const unsigned long end)
1841{
1842        struct acpi_madt_generic_interrupt *gicc =
1843                                (struct acpi_madt_generic_interrupt *)header;
1844        u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
1845        u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
1846        void __iomem *redist_base;
1847
1848        /* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */
1849        if (!(gicc->flags & ACPI_MADT_ENABLED))
1850                return 0;
1851
1852        redist_base = ioremap(gicc->gicr_base_address, size);
1853        if (!redist_base)
1854                return -ENOMEM;
1855
1856        gic_acpi_register_redist(gicc->gicr_base_address, redist_base);
1857        return 0;
1858}
1859
1860static int __init gic_acpi_collect_gicr_base(void)
1861{
1862        acpi_tbl_entry_handler redist_parser;
1863        enum acpi_madt_type type;
1864
1865        if (acpi_data.single_redist) {
1866                type = ACPI_MADT_TYPE_GENERIC_INTERRUPT;
1867                redist_parser = gic_acpi_parse_madt_gicc;
1868        } else {
1869                type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR;
1870                redist_parser = gic_acpi_parse_madt_redist;
1871        }
1872
1873        /* Collect redistributor base addresses in GICR entries */
1874        if (acpi_table_parse_madt(type, redist_parser, 0) > 0)
1875                return 0;
1876
1877        pr_info("No valid GICR entries exist\n");
1878        return -ENODEV;
1879}
1880
1881static int __init gic_acpi_match_gicr(union acpi_subtable_headers *header,
1882                                  const unsigned long end)
1883{
1884        /* Subtable presence means that redist exists, that's it */
1885        return 0;
1886}
1887
1888static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header,
1889                                      const unsigned long end)
1890{
1891        struct acpi_madt_generic_interrupt *gicc =
1892                                (struct acpi_madt_generic_interrupt *)header;
1893
1894        /*
1895         * If GICC is enabled and has valid gicr base address, then it means
1896         * GICR base is presented via GICC
1897         */
1898        if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address)
1899                return 0;
1900
1901        /*
1902         * It's perfectly valid firmware can pass disabled GICC entry, driver
1903         * should not treat as errors, skip the entry instead of probe fail.
1904         */
1905        if (!(gicc->flags & ACPI_MADT_ENABLED))
1906                return 0;
1907
1908        return -ENODEV;
1909}
1910
1911static int __init gic_acpi_count_gicr_regions(void)
1912{
1913        int count;
1914
1915        /*
1916         * Count how many redistributor regions we have. It is not allowed
1917         * to mix redistributor description, GICR and GICC subtables have to be
1918         * mutually exclusive.
1919         */
1920        count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
1921                                      gic_acpi_match_gicr, 0);
1922        if (count > 0) {
1923                acpi_data.single_redist = false;
1924                return count;
1925        }
1926
1927        count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
1928                                      gic_acpi_match_gicc, 0);
1929        if (count > 0)
1930                acpi_data.single_redist = true;
1931
1932        return count;
1933}
1934
1935static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header,
1936                                           struct acpi_probe_entry *ape)
1937{
1938        struct acpi_madt_generic_distributor *dist;
1939        int count;
1940
1941        dist = (struct acpi_madt_generic_distributor *)header;
1942        if (dist->version != ape->driver_data)
1943                return false;
1944
1945        /* We need to do that exercise anyway, the sooner the better */
1946        count = gic_acpi_count_gicr_regions();
1947        if (count <= 0)
1948                return false;
1949
1950        acpi_data.nr_redist_regions = count;
1951        return true;
1952}
1953
1954static int __init gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers *header,
1955                                                const unsigned long end)
1956{
1957        struct acpi_madt_generic_interrupt *gicc =
1958                (struct acpi_madt_generic_interrupt *)header;
1959        int maint_irq_mode;
1960        static int first_madt = true;
1961
1962        /* Skip unusable CPUs */
1963        if (!(gicc->flags & ACPI_MADT_ENABLED))
1964                return 0;
1965
1966        maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ?
1967                ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE;
1968
1969        if (first_madt) {
1970                first_madt = false;
1971
1972                acpi_data.maint_irq = gicc->vgic_interrupt;
1973                acpi_data.maint_irq_mode = maint_irq_mode;
1974                acpi_data.vcpu_base = gicc->gicv_base_address;
1975
1976                return 0;
1977        }
1978
1979        /*
1980         * The maintenance interrupt and GICV should be the same for every CPU
1981         */
1982        if ((acpi_data.maint_irq != gicc->vgic_interrupt) ||
1983            (acpi_data.maint_irq_mode != maint_irq_mode) ||
1984            (acpi_data.vcpu_base != gicc->gicv_base_address))
1985                return -EINVAL;
1986
1987        return 0;
1988}
1989
1990static bool __init gic_acpi_collect_virt_info(void)
1991{
1992        int count;
1993
1994        count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
1995                                      gic_acpi_parse_virt_madt_gicc, 0);
1996
1997        return (count > 0);
1998}
1999
2000#define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K)
2001#define ACPI_GICV2_VCTRL_MEM_SIZE       (SZ_4K)
2002#define ACPI_GICV2_VCPU_MEM_SIZE        (SZ_8K)
2003
2004static void __init gic_acpi_setup_kvm_info(void)
2005{
2006        int irq;
2007
2008        if (!gic_acpi_collect_virt_info()) {
2009                pr_warn("Unable to get hardware information used for virtualization\n");
2010                return;
2011        }
2012
2013        gic_v3_kvm_info.type = GIC_V3;
2014
2015        irq = acpi_register_gsi(NULL, acpi_data.maint_irq,
2016                                acpi_data.maint_irq_mode,
2017                                ACPI_ACTIVE_HIGH);
2018        if (irq <= 0)
2019                return;
2020
2021        gic_v3_kvm_info.maint_irq = irq;
2022
2023        if (acpi_data.vcpu_base) {
2024                struct resource *vcpu = &gic_v3_kvm_info.vcpu;
2025
2026                vcpu->flags = IORESOURCE_MEM;
2027                vcpu->start = acpi_data.vcpu_base;
2028                vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
2029        }
2030
2031        gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
2032        gic_set_kvm_info(&gic_v3_kvm_info);
2033}
2034
2035static int __init
2036gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end)
2037{
2038        struct acpi_madt_generic_distributor *dist;
2039        struct fwnode_handle *domain_handle;
2040        size_t size;
2041        int i, err;
2042
2043        /* Get distributor base address */
2044        dist = (struct acpi_madt_generic_distributor *)header;
2045        acpi_data.dist_base = ioremap(dist->base_address,
2046                                      ACPI_GICV3_DIST_MEM_SIZE);
2047        if (!acpi_data.dist_base) {
2048                pr_err("Unable to map GICD registers\n");
2049                return -ENOMEM;
2050        }
2051
2052        err = gic_validate_dist_version(acpi_data.dist_base);
2053        if (err) {
2054                pr_err("No distributor detected at @%p, giving up\n",
2055                       acpi_data.dist_base);
2056                goto out_dist_unmap;
2057        }
2058
2059        size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions;
2060        acpi_data.redist_regs = kzalloc(size, GFP_KERNEL);
2061        if (!acpi_data.redist_regs) {
2062                err = -ENOMEM;
2063                goto out_dist_unmap;
2064        }
2065
2066        err = gic_acpi_collect_gicr_base();
2067        if (err)
2068                goto out_redist_unmap;
2069
2070        domain_handle = irq_domain_alloc_fwnode(&dist->base_address);
2071        if (!domain_handle) {
2072                err = -ENOMEM;
2073                goto out_redist_unmap;
2074        }
2075
2076        err = gic_init_bases(acpi_data.dist_base, acpi_data.redist_regs,
2077                             acpi_data.nr_redist_regions, 0, domain_handle);
2078        if (err)
2079                goto out_fwhandle_free;
2080
2081        acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
2082
2083        if (static_branch_likely(&supports_deactivate_key))
2084                gic_acpi_setup_kvm_info();
2085
2086        return 0;
2087
2088out_fwhandle_free:
2089        irq_domain_free_fwnode(domain_handle);
2090out_redist_unmap:
2091        for (i = 0; i < acpi_data.nr_redist_regions; i++)
2092                if (acpi_data.redist_regs[i].redist_base)
2093                        iounmap(acpi_data.redist_regs[i].redist_base);
2094        kfree(acpi_data.redist_regs);
2095out_dist_unmap:
2096        iounmap(acpi_data.dist_base);
2097        return err;
2098}
2099IRQCHIP_ACPI_DECLARE(gic_v3, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2100                     acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3,
2101                     gic_acpi_init);
2102IRQCHIP_ACPI_DECLARE(gic_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2103                     acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V4,
2104                     gic_acpi_init);
2105IRQCHIP_ACPI_DECLARE(gic_v3_or_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2106                     acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_NONE,
2107                     gic_acpi_init);
2108#endif
2109