linux/drivers/irqchip/irq-apple-aic.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright The Asahi Linux Contributors
   4 *
   5 * Based on irq-lpc32xx:
   6 *   Copyright 2015-2016 Vladimir Zapolskiy <vz@mleia.com>
   7 * Based on irq-bcm2836:
   8 *   Copyright 2015 Broadcom
   9 */
  10
  11/*
  12 * AIC is a fairly simple interrupt controller with the following features:
  13 *
  14 * - 896 level-triggered hardware IRQs
  15 *   - Single mask bit per IRQ
  16 *   - Per-IRQ affinity setting
  17 *   - Automatic masking on event delivery (auto-ack)
  18 *   - Software triggering (ORed with hw line)
  19 * - 2 per-CPU IPIs (meant as "self" and "other", but they are
  20 *   interchangeable if not symmetric)
  21 * - Automatic prioritization (single event/ack register per CPU, lower IRQs =
  22 *   higher priority)
  23 * - Automatic masking on ack
  24 * - Default "this CPU" register view and explicit per-CPU views
  25 *
  26 * In addition, this driver also handles FIQs, as these are routed to the same
  27 * IRQ vector. These are used for Fast IPIs (TODO), the ARMv8 timer IRQs, and
  28 * performance counters (TODO).
  29 *
  30 * Implementation notes:
  31 *
  32 * - This driver creates two IRQ domains, one for HW IRQs and internal FIQs,
  33 *   and one for IPIs.
  34 * - Since Linux needs more than 2 IPIs, we implement a software IRQ controller
  35 *   and funnel all IPIs into one per-CPU IPI (the second "self" IPI is unused).
  36 * - FIQ hwirq numbers are assigned after true hwirqs, and are per-cpu.
  37 * - DT bindings use 3-cell form (like GIC):
  38 *   - <0 nr flags> - hwirq #nr
  39 *   - <1 nr flags> - FIQ #nr
  40 *     - nr=0  Physical HV timer
  41 *     - nr=1  Virtual HV timer
  42 *     - nr=2  Physical guest timer
  43 *     - nr=3  Virtual guest timer
  44 */
  45
  46#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  47
  48#include <linux/bits.h>
  49#include <linux/bitfield.h>
  50#include <linux/cpuhotplug.h>
  51#include <linux/io.h>
  52#include <linux/irqchip.h>
  53#include <linux/irqchip/arm-vgic-info.h>
  54#include <linux/irqdomain.h>
  55#include <linux/limits.h>
  56#include <linux/of_address.h>
  57#include <linux/slab.h>
  58#include <asm/exception.h>
  59#include <asm/sysreg.h>
  60#include <asm/virt.h>
  61
  62#include <dt-bindings/interrupt-controller/apple-aic.h>
  63
  64/*
  65 * AIC registers (MMIO)
  66 */
  67
  68#define AIC_INFO                0x0004
  69#define AIC_INFO_NR_HW          GENMASK(15, 0)
  70
  71#define AIC_CONFIG              0x0010
  72
  73#define AIC_WHOAMI              0x2000
  74#define AIC_EVENT               0x2004
  75#define AIC_EVENT_TYPE          GENMASK(31, 16)
  76#define AIC_EVENT_NUM           GENMASK(15, 0)
  77
  78#define AIC_EVENT_TYPE_HW       1
  79#define AIC_EVENT_TYPE_IPI      4
  80#define AIC_EVENT_IPI_OTHER     1
  81#define AIC_EVENT_IPI_SELF      2
  82
  83#define AIC_IPI_SEND            0x2008
  84#define AIC_IPI_ACK             0x200c
  85#define AIC_IPI_MASK_SET        0x2024
  86#define AIC_IPI_MASK_CLR        0x2028
  87
  88#define AIC_IPI_SEND_CPU(cpu)   BIT(cpu)
  89
  90#define AIC_IPI_OTHER           BIT(0)
  91#define AIC_IPI_SELF            BIT(31)
  92
  93#define AIC_TARGET_CPU          0x3000
  94#define AIC_SW_SET              0x4000
  95#define AIC_SW_CLR              0x4080
  96#define AIC_MASK_SET            0x4100
  97#define AIC_MASK_CLR            0x4180
  98
  99#define AIC_CPU_IPI_SET(cpu)    (0x5008 + ((cpu) << 7))
 100#define AIC_CPU_IPI_CLR(cpu)    (0x500c + ((cpu) << 7))
 101#define AIC_CPU_IPI_MASK_SET(cpu) (0x5024 + ((cpu) << 7))
 102#define AIC_CPU_IPI_MASK_CLR(cpu) (0x5028 + ((cpu) << 7))
 103
 104#define MASK_REG(x)             (4 * ((x) >> 5))
 105#define MASK_BIT(x)             BIT((x) & GENMASK(4, 0))
 106
 107/*
 108 * IMP-DEF sysregs that control FIQ sources
 109 * Note: sysreg-based IPIs are not supported yet.
 110 */
 111
 112/* Core PMC control register */
 113#define SYS_IMP_APL_PMCR0_EL1           sys_reg(3, 1, 15, 0, 0)
 114#define PMCR0_IMODE                     GENMASK(10, 8)
 115#define PMCR0_IMODE_OFF                 0
 116#define PMCR0_IMODE_PMI                 1
 117#define PMCR0_IMODE_AIC                 2
 118#define PMCR0_IMODE_HALT                3
 119#define PMCR0_IMODE_FIQ                 4
 120#define PMCR0_IACT                      BIT(11)
 121
 122/* IPI request registers */
 123#define SYS_IMP_APL_IPI_RR_LOCAL_EL1    sys_reg(3, 5, 15, 0, 0)
 124#define SYS_IMP_APL_IPI_RR_GLOBAL_EL1   sys_reg(3, 5, 15, 0, 1)
 125#define IPI_RR_CPU                      GENMASK(7, 0)
 126/* Cluster only used for the GLOBAL register */
 127#define IPI_RR_CLUSTER                  GENMASK(23, 16)
 128#define IPI_RR_TYPE                     GENMASK(29, 28)
 129#define IPI_RR_IMMEDIATE                0
 130#define IPI_RR_RETRACT                  1
 131#define IPI_RR_DEFERRED                 2
 132#define IPI_RR_NOWAKE                   3
 133
 134/* IPI status register */
 135#define SYS_IMP_APL_IPI_SR_EL1          sys_reg(3, 5, 15, 1, 1)
 136#define IPI_SR_PENDING                  BIT(0)
 137
 138/* Guest timer FIQ enable register */
 139#define SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2  sys_reg(3, 5, 15, 1, 3)
 140#define VM_TMR_FIQ_ENABLE_V             BIT(0)
 141#define VM_TMR_FIQ_ENABLE_P             BIT(1)
 142
 143/* Deferred IPI countdown register */
 144#define SYS_IMP_APL_IPI_CR_EL1          sys_reg(3, 5, 15, 3, 1)
 145
 146/* Uncore PMC control register */
 147#define SYS_IMP_APL_UPMCR0_EL1          sys_reg(3, 7, 15, 0, 4)
 148#define UPMCR0_IMODE                    GENMASK(18, 16)
 149#define UPMCR0_IMODE_OFF                0
 150#define UPMCR0_IMODE_AIC                2
 151#define UPMCR0_IMODE_HALT               3
 152#define UPMCR0_IMODE_FIQ                4
 153
 154/* Uncore PMC status register */
 155#define SYS_IMP_APL_UPMSR_EL1           sys_reg(3, 7, 15, 6, 4)
 156#define UPMSR_IACT                      BIT(0)
 157
 158#define AIC_NR_FIQ              4
 159#define AIC_NR_SWIPI            32
 160
 161/*
 162 * FIQ hwirq index definitions: FIQ sources use the DT binding defines
 163 * directly, except that timers are special. At the irqchip level, the
 164 * two timer types are represented by their access method: _EL0 registers
 165 * or _EL02 registers. In the DT binding, the timers are represented
 166 * by their purpose (HV or guest). This mapping is for when the kernel is
 167 * running at EL2 (with VHE). When the kernel is running at EL1, the
 168 * mapping differs and aic_irq_domain_translate() performs the remapping.
 169 */
 170
 171#define AIC_TMR_EL0_PHYS        AIC_TMR_HV_PHYS
 172#define AIC_TMR_EL0_VIRT        AIC_TMR_HV_VIRT
 173#define AIC_TMR_EL02_PHYS       AIC_TMR_GUEST_PHYS
 174#define AIC_TMR_EL02_VIRT       AIC_TMR_GUEST_VIRT
 175
 176struct aic_irq_chip {
 177        void __iomem *base;
 178        struct irq_domain *hw_domain;
 179        struct irq_domain *ipi_domain;
 180        int nr_hw;
 181        int ipi_hwirq;
 182};
 183
 184static DEFINE_PER_CPU(uint32_t, aic_fiq_unmasked);
 185
 186static DEFINE_PER_CPU(atomic_t, aic_vipi_flag);
 187static DEFINE_PER_CPU(atomic_t, aic_vipi_enable);
 188
 189static struct aic_irq_chip *aic_irqc;
 190
 191static void aic_handle_ipi(struct pt_regs *regs);
 192
 193static u32 aic_ic_read(struct aic_irq_chip *ic, u32 reg)
 194{
 195        return readl_relaxed(ic->base + reg);
 196}
 197
 198static void aic_ic_write(struct aic_irq_chip *ic, u32 reg, u32 val)
 199{
 200        writel_relaxed(val, ic->base + reg);
 201}
 202
 203/*
 204 * IRQ irqchip
 205 */
 206
 207static void aic_irq_mask(struct irq_data *d)
 208{
 209        struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
 210
 211        aic_ic_write(ic, AIC_MASK_SET + MASK_REG(irqd_to_hwirq(d)),
 212                     MASK_BIT(irqd_to_hwirq(d)));
 213}
 214
 215static void aic_irq_unmask(struct irq_data *d)
 216{
 217        struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
 218
 219        aic_ic_write(ic, AIC_MASK_CLR + MASK_REG(d->hwirq),
 220                     MASK_BIT(irqd_to_hwirq(d)));
 221}
 222
 223static void aic_irq_eoi(struct irq_data *d)
 224{
 225        /*
 226         * Reading the interrupt reason automatically acknowledges and masks
 227         * the IRQ, so we just unmask it here if needed.
 228         */
 229        if (!irqd_irq_masked(d))
 230                aic_irq_unmask(d);
 231}
 232
 233static void __exception_irq_entry aic_handle_irq(struct pt_regs *regs)
 234{
 235        struct aic_irq_chip *ic = aic_irqc;
 236        u32 event, type, irq;
 237
 238        do {
 239                /*
 240                 * We cannot use a relaxed read here, as reads from DMA buffers
 241                 * need to be ordered after the IRQ fires.
 242                 */
 243                event = readl(ic->base + AIC_EVENT);
 244                type = FIELD_GET(AIC_EVENT_TYPE, event);
 245                irq = FIELD_GET(AIC_EVENT_NUM, event);
 246
 247                if (type == AIC_EVENT_TYPE_HW)
 248                        handle_domain_irq(aic_irqc->hw_domain, irq, regs);
 249                else if (type == AIC_EVENT_TYPE_IPI && irq == 1)
 250                        aic_handle_ipi(regs);
 251                else if (event != 0)
 252                        pr_err_ratelimited("Unknown IRQ event %d, %d\n", type, irq);
 253        } while (event);
 254
 255        /*
 256         * vGIC maintenance interrupts end up here too, so we need to check
 257         * for them separately. This should never trigger if KVM is working
 258         * properly, because it will have already taken care of clearing it
 259         * on guest exit before this handler runs.
 260         */
 261        if (is_kernel_in_hyp_mode() && (read_sysreg_s(SYS_ICH_HCR_EL2) & ICH_HCR_EN) &&
 262                read_sysreg_s(SYS_ICH_MISR_EL2) != 0) {
 263                pr_err_ratelimited("vGIC IRQ fired and not handled by KVM, disabling.\n");
 264                sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EN, 0);
 265        }
 266}
 267
 268static int aic_irq_set_affinity(struct irq_data *d,
 269                                const struct cpumask *mask_val, bool force)
 270{
 271        irq_hw_number_t hwirq = irqd_to_hwirq(d);
 272        struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
 273        int cpu;
 274
 275        if (force)
 276                cpu = cpumask_first(mask_val);
 277        else
 278                cpu = cpumask_any_and(mask_val, cpu_online_mask);
 279
 280        aic_ic_write(ic, AIC_TARGET_CPU + hwirq * 4, BIT(cpu));
 281        irq_data_update_effective_affinity(d, cpumask_of(cpu));
 282
 283        return IRQ_SET_MASK_OK;
 284}
 285
 286static int aic_irq_set_type(struct irq_data *d, unsigned int type)
 287{
 288        /*
 289         * Some IRQs (e.g. MSIs) implicitly have edge semantics, and we don't
 290         * have a way to find out the type of any given IRQ, so just allow both.
 291         */
 292        return (type == IRQ_TYPE_LEVEL_HIGH || type == IRQ_TYPE_EDGE_RISING) ? 0 : -EINVAL;
 293}
 294
 295static struct irq_chip aic_chip = {
 296        .name = "AIC",
 297        .irq_mask = aic_irq_mask,
 298        .irq_unmask = aic_irq_unmask,
 299        .irq_eoi = aic_irq_eoi,
 300        .irq_set_affinity = aic_irq_set_affinity,
 301        .irq_set_type = aic_irq_set_type,
 302};
 303
 304/*
 305 * FIQ irqchip
 306 */
 307
 308static unsigned long aic_fiq_get_idx(struct irq_data *d)
 309{
 310        struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
 311
 312        return irqd_to_hwirq(d) - ic->nr_hw;
 313}
 314
 315static void aic_fiq_set_mask(struct irq_data *d)
 316{
 317        /* Only the guest timers have real mask bits, unfortunately. */
 318        switch (aic_fiq_get_idx(d)) {
 319        case AIC_TMR_EL02_PHYS:
 320                sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, VM_TMR_FIQ_ENABLE_P, 0);
 321                isb();
 322                break;
 323        case AIC_TMR_EL02_VIRT:
 324                sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, VM_TMR_FIQ_ENABLE_V, 0);
 325                isb();
 326                break;
 327        default:
 328                break;
 329        }
 330}
 331
 332static void aic_fiq_clear_mask(struct irq_data *d)
 333{
 334        switch (aic_fiq_get_idx(d)) {
 335        case AIC_TMR_EL02_PHYS:
 336                sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, 0, VM_TMR_FIQ_ENABLE_P);
 337                isb();
 338                break;
 339        case AIC_TMR_EL02_VIRT:
 340                sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, 0, VM_TMR_FIQ_ENABLE_V);
 341                isb();
 342                break;
 343        default:
 344                break;
 345        }
 346}
 347
 348static void aic_fiq_mask(struct irq_data *d)
 349{
 350        aic_fiq_set_mask(d);
 351        __this_cpu_and(aic_fiq_unmasked, ~BIT(aic_fiq_get_idx(d)));
 352}
 353
 354static void aic_fiq_unmask(struct irq_data *d)
 355{
 356        aic_fiq_clear_mask(d);
 357        __this_cpu_or(aic_fiq_unmasked, BIT(aic_fiq_get_idx(d)));
 358}
 359
 360static void aic_fiq_eoi(struct irq_data *d)
 361{
 362        /* We mask to ack (where we can), so we need to unmask at EOI. */
 363        if (__this_cpu_read(aic_fiq_unmasked) & BIT(aic_fiq_get_idx(d)))
 364                aic_fiq_clear_mask(d);
 365}
 366
 367#define TIMER_FIRING(x)                                                        \
 368        (((x) & (ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_MASK |            \
 369                 ARCH_TIMER_CTRL_IT_STAT)) ==                                  \
 370         (ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_STAT))
 371
 372static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs)
 373{
 374        /*
 375         * It would be really nice if we had a system register that lets us get
 376         * the FIQ source state without having to peek down into sources...
 377         * but such a register does not seem to exist.
 378         *
 379         * So, we have these potential sources to test for:
 380         *  - Fast IPIs (not yet used)
 381         *  - The 4 timers (CNTP, CNTV for each of HV and guest)
 382         *  - Per-core PMCs (not yet supported)
 383         *  - Per-cluster uncore PMCs (not yet supported)
 384         *
 385         * Since not dealing with any of these results in a FIQ storm,
 386         * we check for everything here, even things we don't support yet.
 387         */
 388
 389        if (read_sysreg_s(SYS_IMP_APL_IPI_SR_EL1) & IPI_SR_PENDING) {
 390                pr_err_ratelimited("Fast IPI fired. Acking.\n");
 391                write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
 392        }
 393
 394        if (TIMER_FIRING(read_sysreg(cntp_ctl_el0)))
 395                handle_domain_irq(aic_irqc->hw_domain,
 396                                  aic_irqc->nr_hw + AIC_TMR_EL0_PHYS, regs);
 397
 398        if (TIMER_FIRING(read_sysreg(cntv_ctl_el0)))
 399                handle_domain_irq(aic_irqc->hw_domain,
 400                                  aic_irqc->nr_hw + AIC_TMR_EL0_VIRT, regs);
 401
 402        if (is_kernel_in_hyp_mode()) {
 403                uint64_t enabled = read_sysreg_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2);
 404
 405                if ((enabled & VM_TMR_FIQ_ENABLE_P) &&
 406                    TIMER_FIRING(read_sysreg_s(SYS_CNTP_CTL_EL02)))
 407                        handle_domain_irq(aic_irqc->hw_domain,
 408                                          aic_irqc->nr_hw + AIC_TMR_EL02_PHYS, regs);
 409
 410                if ((enabled & VM_TMR_FIQ_ENABLE_V) &&
 411                    TIMER_FIRING(read_sysreg_s(SYS_CNTV_CTL_EL02)))
 412                        handle_domain_irq(aic_irqc->hw_domain,
 413                                          aic_irqc->nr_hw + AIC_TMR_EL02_VIRT, regs);
 414        }
 415
 416        if ((read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & (PMCR0_IMODE | PMCR0_IACT)) ==
 417                        (FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_FIQ) | PMCR0_IACT)) {
 418                /*
 419                 * Not supported yet, let's figure out how to handle this when
 420                 * we implement these proprietary performance counters. For now,
 421                 * just mask it and move on.
 422                 */
 423                pr_err_ratelimited("PMC FIQ fired. Masking.\n");
 424                sysreg_clear_set_s(SYS_IMP_APL_PMCR0_EL1, PMCR0_IMODE | PMCR0_IACT,
 425                                   FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_OFF));
 426        }
 427
 428        if (FIELD_GET(UPMCR0_IMODE, read_sysreg_s(SYS_IMP_APL_UPMCR0_EL1)) == UPMCR0_IMODE_FIQ &&
 429                        (read_sysreg_s(SYS_IMP_APL_UPMSR_EL1) & UPMSR_IACT)) {
 430                /* Same story with uncore PMCs */
 431                pr_err_ratelimited("Uncore PMC FIQ fired. Masking.\n");
 432                sysreg_clear_set_s(SYS_IMP_APL_UPMCR0_EL1, UPMCR0_IMODE,
 433                                   FIELD_PREP(UPMCR0_IMODE, UPMCR0_IMODE_OFF));
 434        }
 435}
 436
 437static int aic_fiq_set_type(struct irq_data *d, unsigned int type)
 438{
 439        return (type == IRQ_TYPE_LEVEL_HIGH) ? 0 : -EINVAL;
 440}
 441
 442static struct irq_chip fiq_chip = {
 443        .name = "AIC-FIQ",
 444        .irq_mask = aic_fiq_mask,
 445        .irq_unmask = aic_fiq_unmask,
 446        .irq_ack = aic_fiq_set_mask,
 447        .irq_eoi = aic_fiq_eoi,
 448        .irq_set_type = aic_fiq_set_type,
 449};
 450
 451/*
 452 * Main IRQ domain
 453 */
 454
 455static int aic_irq_domain_map(struct irq_domain *id, unsigned int irq,
 456                              irq_hw_number_t hw)
 457{
 458        struct aic_irq_chip *ic = id->host_data;
 459
 460        if (hw < ic->nr_hw) {
 461                irq_domain_set_info(id, irq, hw, &aic_chip, id->host_data,
 462                                    handle_fasteoi_irq, NULL, NULL);
 463                irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
 464        } else {
 465                irq_set_percpu_devid(irq);
 466                irq_domain_set_info(id, irq, hw, &fiq_chip, id->host_data,
 467                                    handle_percpu_devid_irq, NULL, NULL);
 468        }
 469
 470        return 0;
 471}
 472
 473static int aic_irq_domain_translate(struct irq_domain *id,
 474                                    struct irq_fwspec *fwspec,
 475                                    unsigned long *hwirq,
 476                                    unsigned int *type)
 477{
 478        struct aic_irq_chip *ic = id->host_data;
 479
 480        if (fwspec->param_count != 3 || !is_of_node(fwspec->fwnode))
 481                return -EINVAL;
 482
 483        switch (fwspec->param[0]) {
 484        case AIC_IRQ:
 485                if (fwspec->param[1] >= ic->nr_hw)
 486                        return -EINVAL;
 487                *hwirq = fwspec->param[1];
 488                break;
 489        case AIC_FIQ:
 490                if (fwspec->param[1] >= AIC_NR_FIQ)
 491                        return -EINVAL;
 492                *hwirq = ic->nr_hw + fwspec->param[1];
 493
 494                /*
 495                 * In EL1 the non-redirected registers are the guest's,
 496                 * not EL2's, so remap the hwirqs to match.
 497                 */
 498                if (!is_kernel_in_hyp_mode()) {
 499                        switch (fwspec->param[1]) {
 500                        case AIC_TMR_GUEST_PHYS:
 501                                *hwirq = ic->nr_hw + AIC_TMR_EL0_PHYS;
 502                                break;
 503                        case AIC_TMR_GUEST_VIRT:
 504                                *hwirq = ic->nr_hw + AIC_TMR_EL0_VIRT;
 505                                break;
 506                        case AIC_TMR_HV_PHYS:
 507                        case AIC_TMR_HV_VIRT:
 508                                return -ENOENT;
 509                        default:
 510                                break;
 511                        }
 512                }
 513                break;
 514        default:
 515                return -EINVAL;
 516        }
 517
 518        *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
 519
 520        return 0;
 521}
 522
 523static int aic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
 524                                unsigned int nr_irqs, void *arg)
 525{
 526        unsigned int type = IRQ_TYPE_NONE;
 527        struct irq_fwspec *fwspec = arg;
 528        irq_hw_number_t hwirq;
 529        int i, ret;
 530
 531        ret = aic_irq_domain_translate(domain, fwspec, &hwirq, &type);
 532        if (ret)
 533                return ret;
 534
 535        for (i = 0; i < nr_irqs; i++) {
 536                ret = aic_irq_domain_map(domain, virq + i, hwirq + i);
 537                if (ret)
 538                        return ret;
 539        }
 540
 541        return 0;
 542}
 543
 544static void aic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
 545                                unsigned int nr_irqs)
 546{
 547        int i;
 548
 549        for (i = 0; i < nr_irqs; i++) {
 550                struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
 551
 552                irq_set_handler(virq + i, NULL);
 553                irq_domain_reset_irq_data(d);
 554        }
 555}
 556
 557static const struct irq_domain_ops aic_irq_domain_ops = {
 558        .translate      = aic_irq_domain_translate,
 559        .alloc          = aic_irq_domain_alloc,
 560        .free           = aic_irq_domain_free,
 561};
 562
 563/*
 564 * IPI irqchip
 565 */
 566
 567static void aic_ipi_mask(struct irq_data *d)
 568{
 569        u32 irq_bit = BIT(irqd_to_hwirq(d));
 570
 571        /* No specific ordering requirements needed here. */
 572        atomic_andnot(irq_bit, this_cpu_ptr(&aic_vipi_enable));
 573}
 574
 575static void aic_ipi_unmask(struct irq_data *d)
 576{
 577        struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
 578        u32 irq_bit = BIT(irqd_to_hwirq(d));
 579
 580        atomic_or(irq_bit, this_cpu_ptr(&aic_vipi_enable));
 581
 582        /*
 583         * The atomic_or() above must complete before the atomic_read()
 584         * below to avoid racing aic_ipi_send_mask().
 585         */
 586        smp_mb__after_atomic();
 587
 588        /*
 589         * If a pending vIPI was unmasked, raise a HW IPI to ourselves.
 590         * No barriers needed here since this is a self-IPI.
 591         */
 592        if (atomic_read(this_cpu_ptr(&aic_vipi_flag)) & irq_bit)
 593                aic_ic_write(ic, AIC_IPI_SEND, AIC_IPI_SEND_CPU(smp_processor_id()));
 594}
 595
 596static void aic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
 597{
 598        struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
 599        u32 irq_bit = BIT(irqd_to_hwirq(d));
 600        u32 send = 0;
 601        int cpu;
 602        unsigned long pending;
 603
 604        for_each_cpu(cpu, mask) {
 605                /*
 606                 * This sequence is the mirror of the one in aic_ipi_unmask();
 607                 * see the comment there. Additionally, release semantics
 608                 * ensure that the vIPI flag set is ordered after any shared
 609                 * memory accesses that precede it. This therefore also pairs
 610                 * with the atomic_fetch_andnot in aic_handle_ipi().
 611                 */
 612                pending = atomic_fetch_or_release(irq_bit, per_cpu_ptr(&aic_vipi_flag, cpu));
 613
 614                /*
 615                 * The atomic_fetch_or_release() above must complete before the
 616                 * atomic_read() below to avoid racing aic_ipi_unmask().
 617                 */
 618                smp_mb__after_atomic();
 619
 620                if (!(pending & irq_bit) &&
 621                    (atomic_read(per_cpu_ptr(&aic_vipi_enable, cpu)) & irq_bit))
 622                        send |= AIC_IPI_SEND_CPU(cpu);
 623        }
 624
 625        /*
 626         * The flag writes must complete before the physical IPI is issued
 627         * to another CPU. This is implied by the control dependency on
 628         * the result of atomic_read_acquire() above, which is itself
 629         * already ordered after the vIPI flag write.
 630         */
 631        if (send)
 632                aic_ic_write(ic, AIC_IPI_SEND, send);
 633}
 634
 635static struct irq_chip ipi_chip = {
 636        .name = "AIC-IPI",
 637        .irq_mask = aic_ipi_mask,
 638        .irq_unmask = aic_ipi_unmask,
 639        .ipi_send_mask = aic_ipi_send_mask,
 640};
 641
 642/*
 643 * IPI IRQ domain
 644 */
 645
 646static void aic_handle_ipi(struct pt_regs *regs)
 647{
 648        int i;
 649        unsigned long enabled, firing;
 650
 651        /*
 652         * Ack the IPI. We need to order this after the AIC event read, but
 653         * that is enforced by normal MMIO ordering guarantees.
 654         */
 655        aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_OTHER);
 656
 657        /*
 658         * The mask read does not need to be ordered. Only we can change
 659         * our own mask anyway, so no races are possible here, as long as
 660         * we are properly in the interrupt handler (which is covered by
 661         * the barrier that is part of the top-level AIC handler's readl()).
 662         */
 663        enabled = atomic_read(this_cpu_ptr(&aic_vipi_enable));
 664
 665        /*
 666         * Clear the IPIs we are about to handle. This pairs with the
 667         * atomic_fetch_or_release() in aic_ipi_send_mask(), and needs to be
 668         * ordered after the aic_ic_write() above (to avoid dropping vIPIs) and
 669         * before IPI handling code (to avoid races handling vIPIs before they
 670         * are signaled). The former is taken care of by the release semantics
 671         * of the write portion, while the latter is taken care of by the
 672         * acquire semantics of the read portion.
 673         */
 674        firing = atomic_fetch_andnot(enabled, this_cpu_ptr(&aic_vipi_flag)) & enabled;
 675
 676        for_each_set_bit(i, &firing, AIC_NR_SWIPI)
 677                handle_domain_irq(aic_irqc->ipi_domain, i, regs);
 678
 679        /*
 680         * No ordering needed here; at worst this just changes the timing of
 681         * when the next IPI will be delivered.
 682         */
 683        aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
 684}
 685
 686static int aic_ipi_alloc(struct irq_domain *d, unsigned int virq,
 687                         unsigned int nr_irqs, void *args)
 688{
 689        int i;
 690
 691        for (i = 0; i < nr_irqs; i++) {
 692                irq_set_percpu_devid(virq + i);
 693                irq_domain_set_info(d, virq + i, i, &ipi_chip, d->host_data,
 694                                    handle_percpu_devid_irq, NULL, NULL);
 695        }
 696
 697        return 0;
 698}
 699
 700static void aic_ipi_free(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs)
 701{
 702        /* Not freeing IPIs */
 703}
 704
 705static const struct irq_domain_ops aic_ipi_domain_ops = {
 706        .alloc = aic_ipi_alloc,
 707        .free = aic_ipi_free,
 708};
 709
 710static int aic_init_smp(struct aic_irq_chip *irqc, struct device_node *node)
 711{
 712        struct irq_domain *ipi_domain;
 713        int base_ipi;
 714
 715        ipi_domain = irq_domain_create_linear(irqc->hw_domain->fwnode, AIC_NR_SWIPI,
 716                                              &aic_ipi_domain_ops, irqc);
 717        if (WARN_ON(!ipi_domain))
 718                return -ENODEV;
 719
 720        ipi_domain->flags |= IRQ_DOMAIN_FLAG_IPI_SINGLE;
 721        irq_domain_update_bus_token(ipi_domain, DOMAIN_BUS_IPI);
 722
 723        base_ipi = __irq_domain_alloc_irqs(ipi_domain, -1, AIC_NR_SWIPI,
 724                                           NUMA_NO_NODE, NULL, false, NULL);
 725
 726        if (WARN_ON(!base_ipi)) {
 727                irq_domain_remove(ipi_domain);
 728                return -ENODEV;
 729        }
 730
 731        set_smp_ipi_range(base_ipi, AIC_NR_SWIPI);
 732
 733        irqc->ipi_domain = ipi_domain;
 734
 735        return 0;
 736}
 737
 738static int aic_init_cpu(unsigned int cpu)
 739{
 740        /* Mask all hard-wired per-CPU IRQ/FIQ sources */
 741
 742        /* Pending Fast IPI FIQs */
 743        write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
 744
 745        /* Timer FIQs */
 746        sysreg_clear_set(cntp_ctl_el0, 0, ARCH_TIMER_CTRL_IT_MASK);
 747        sysreg_clear_set(cntv_ctl_el0, 0, ARCH_TIMER_CTRL_IT_MASK);
 748
 749        /* EL2-only (VHE mode) IRQ sources */
 750        if (is_kernel_in_hyp_mode()) {
 751                /* Guest timers */
 752                sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2,
 753                                   VM_TMR_FIQ_ENABLE_V | VM_TMR_FIQ_ENABLE_P, 0);
 754
 755                /* vGIC maintenance IRQ */
 756                sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EN, 0);
 757        }
 758
 759        /* PMC FIQ */
 760        sysreg_clear_set_s(SYS_IMP_APL_PMCR0_EL1, PMCR0_IMODE | PMCR0_IACT,
 761                           FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_OFF));
 762
 763        /* Uncore PMC FIQ */
 764        sysreg_clear_set_s(SYS_IMP_APL_UPMCR0_EL1, UPMCR0_IMODE,
 765                           FIELD_PREP(UPMCR0_IMODE, UPMCR0_IMODE_OFF));
 766
 767        /* Commit all of the above */
 768        isb();
 769
 770        /*
 771         * Make sure the kernel's idea of logical CPU order is the same as AIC's
 772         * If we ever end up with a mismatch here, we will have to introduce
 773         * a mapping table similar to what other irqchip drivers do.
 774         */
 775        WARN_ON(aic_ic_read(aic_irqc, AIC_WHOAMI) != smp_processor_id());
 776
 777        /*
 778         * Always keep IPIs unmasked at the hardware level (except auto-masking
 779         * by AIC during processing). We manage masks at the vIPI level.
 780         */
 781        aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_SELF | AIC_IPI_OTHER);
 782        aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF);
 783        aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
 784
 785        /* Initialize the local mask state */
 786        __this_cpu_write(aic_fiq_unmasked, 0);
 787
 788        return 0;
 789}
 790
 791static struct gic_kvm_info vgic_info __initdata = {
 792        .type                   = GIC_V3,
 793        .no_maint_irq_mask      = true,
 794        .no_hw_deactivation     = true,
 795};
 796
 797static int __init aic_of_ic_init(struct device_node *node, struct device_node *parent)
 798{
 799        int i;
 800        void __iomem *regs;
 801        u32 info;
 802        struct aic_irq_chip *irqc;
 803
 804        regs = of_iomap(node, 0);
 805        if (WARN_ON(!regs))
 806                return -EIO;
 807
 808        irqc = kzalloc(sizeof(*irqc), GFP_KERNEL);
 809        if (!irqc)
 810                return -ENOMEM;
 811
 812        aic_irqc = irqc;
 813        irqc->base = regs;
 814
 815        info = aic_ic_read(irqc, AIC_INFO);
 816        irqc->nr_hw = FIELD_GET(AIC_INFO_NR_HW, info);
 817
 818        irqc->hw_domain = irq_domain_create_linear(of_node_to_fwnode(node),
 819                                                   irqc->nr_hw + AIC_NR_FIQ,
 820                                                   &aic_irq_domain_ops, irqc);
 821        if (WARN_ON(!irqc->hw_domain)) {
 822                iounmap(irqc->base);
 823                kfree(irqc);
 824                return -ENODEV;
 825        }
 826
 827        irq_domain_update_bus_token(irqc->hw_domain, DOMAIN_BUS_WIRED);
 828
 829        if (aic_init_smp(irqc, node)) {
 830                irq_domain_remove(irqc->hw_domain);
 831                iounmap(irqc->base);
 832                kfree(irqc);
 833                return -ENODEV;
 834        }
 835
 836        set_handle_irq(aic_handle_irq);
 837        set_handle_fiq(aic_handle_fiq);
 838
 839        for (i = 0; i < BITS_TO_U32(irqc->nr_hw); i++)
 840                aic_ic_write(irqc, AIC_MASK_SET + i * 4, U32_MAX);
 841        for (i = 0; i < BITS_TO_U32(irqc->nr_hw); i++)
 842                aic_ic_write(irqc, AIC_SW_CLR + i * 4, U32_MAX);
 843        for (i = 0; i < irqc->nr_hw; i++)
 844                aic_ic_write(irqc, AIC_TARGET_CPU + i * 4, 1);
 845
 846        if (!is_kernel_in_hyp_mode())
 847                pr_info("Kernel running in EL1, mapping interrupts");
 848
 849        cpuhp_setup_state(CPUHP_AP_IRQ_APPLE_AIC_STARTING,
 850                          "irqchip/apple-aic/ipi:starting",
 851                          aic_init_cpu, NULL);
 852
 853        vgic_set_kvm_info(&vgic_info);
 854
 855        pr_info("Initialized with %d IRQs, %d FIQs, %d vIPIs\n",
 856                irqc->nr_hw, AIC_NR_FIQ, AIC_NR_SWIPI);
 857
 858        return 0;
 859}
 860
 861IRQCHIP_DECLARE(apple_m1_aic, "apple,aic", aic_of_ic_init);
 862