linux/drivers/irqchip/irq-bcm6345-l1.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Broadcom BCM6345 style Level 1 interrupt controller driver
   4 *
   5 * Copyright (C) 2014 Broadcom Corporation
   6 * Copyright 2015 Simon Arlott
   7 *
   8 * This is based on the BCM7038 (which supports SMP) but with a single
   9 * enable register instead of separate mask/set/clear registers.
  10 *
  11 * The BCM3380 has a similar mask/status register layout, but each pair
  12 * of words is at separate locations (and SMP is not supported).
  13 *
  14 * ENABLE/STATUS words are packed next to each other for each CPU:
  15 *
  16 * BCM6368:
  17 *   0x1000_0020: CPU0_W0_ENABLE
  18 *   0x1000_0024: CPU0_W1_ENABLE
  19 *   0x1000_0028: CPU0_W0_STATUS                IRQs 31-63
  20 *   0x1000_002c: CPU0_W1_STATUS                IRQs 0-31
  21 *   0x1000_0030: CPU1_W0_ENABLE
  22 *   0x1000_0034: CPU1_W1_ENABLE
  23 *   0x1000_0038: CPU1_W0_STATUS                IRQs 31-63
  24 *   0x1000_003c: CPU1_W1_STATUS                IRQs 0-31
  25 *
  26 * BCM63168:
  27 *   0x1000_0020: CPU0_W0_ENABLE
  28 *   0x1000_0024: CPU0_W1_ENABLE
  29 *   0x1000_0028: CPU0_W2_ENABLE
  30 *   0x1000_002c: CPU0_W3_ENABLE
  31 *   0x1000_0030: CPU0_W0_STATUS        IRQs 96-127
  32 *   0x1000_0034: CPU0_W1_STATUS        IRQs 64-95
  33 *   0x1000_0038: CPU0_W2_STATUS        IRQs 32-63
  34 *   0x1000_003c: CPU0_W3_STATUS        IRQs 0-31
  35 *   0x1000_0040: CPU1_W0_ENABLE
  36 *   0x1000_0044: CPU1_W1_ENABLE
  37 *   0x1000_0048: CPU1_W2_ENABLE
  38 *   0x1000_004c: CPU1_W3_ENABLE
  39 *   0x1000_0050: CPU1_W0_STATUS        IRQs 96-127
  40 *   0x1000_0054: CPU1_W1_STATUS        IRQs 64-95
  41 *   0x1000_0058: CPU1_W2_STATUS        IRQs 32-63
  42 *   0x1000_005c: CPU1_W3_STATUS        IRQs 0-31
  43 *
  44 * IRQs are numbered in CPU native endian order
  45 * (which is big-endian in these examples)
  46 */
  47
  48#define pr_fmt(fmt)     KBUILD_MODNAME  ": " fmt
  49
  50#include <linux/bitops.h>
  51#include <linux/cpumask.h>
  52#include <linux/kernel.h>
  53#include <linux/init.h>
  54#include <linux/interrupt.h>
  55#include <linux/io.h>
  56#include <linux/ioport.h>
  57#include <linux/irq.h>
  58#include <linux/irqdomain.h>
  59#include <linux/module.h>
  60#include <linux/of.h>
  61#include <linux/of_irq.h>
  62#include <linux/of_address.h>
  63#include <linux/of_platform.h>
  64#include <linux/platform_device.h>
  65#include <linux/slab.h>
  66#include <linux/smp.h>
  67#include <linux/types.h>
  68#include <linux/irqchip.h>
  69#include <linux/irqchip/chained_irq.h>
  70
  71#define IRQS_PER_WORD           32
  72#define REG_BYTES_PER_IRQ_WORD  (sizeof(u32) * 2)
  73
  74struct bcm6345_l1_cpu;
  75
  76struct bcm6345_l1_chip {
  77        raw_spinlock_t          lock;
  78        unsigned int            n_words;
  79        struct irq_domain       *domain;
  80        struct cpumask          cpumask;
  81        struct bcm6345_l1_cpu   *cpus[NR_CPUS];
  82};
  83
  84struct bcm6345_l1_cpu {
  85        void __iomem            *map_base;
  86        unsigned int            parent_irq;
  87        u32                     enable_cache[];
  88};
  89
  90static inline unsigned int reg_enable(struct bcm6345_l1_chip *intc,
  91                                           unsigned int word)
  92{
  93#ifdef __BIG_ENDIAN
  94        return (1 * intc->n_words - word - 1) * sizeof(u32);
  95#else
  96        return (0 * intc->n_words + word) * sizeof(u32);
  97#endif
  98}
  99
 100static inline unsigned int reg_status(struct bcm6345_l1_chip *intc,
 101                                      unsigned int word)
 102{
 103#ifdef __BIG_ENDIAN
 104        return (2 * intc->n_words - word - 1) * sizeof(u32);
 105#else
 106        return (1 * intc->n_words + word) * sizeof(u32);
 107#endif
 108}
 109
 110static inline unsigned int cpu_for_irq(struct bcm6345_l1_chip *intc,
 111                                        struct irq_data *d)
 112{
 113        return cpumask_first_and(&intc->cpumask, irq_data_get_affinity_mask(d));
 114}
 115
 116static void bcm6345_l1_irq_handle(struct irq_desc *desc)
 117{
 118        struct bcm6345_l1_chip *intc = irq_desc_get_handler_data(desc);
 119        struct bcm6345_l1_cpu *cpu;
 120        struct irq_chip *chip = irq_desc_get_chip(desc);
 121        unsigned int idx;
 122
 123#ifdef CONFIG_SMP
 124        cpu = intc->cpus[cpu_logical_map(smp_processor_id())];
 125#else
 126        cpu = intc->cpus[0];
 127#endif
 128
 129        chained_irq_enter(chip, desc);
 130
 131        for (idx = 0; idx < intc->n_words; idx++) {
 132                int base = idx * IRQS_PER_WORD;
 133                unsigned long pending;
 134                irq_hw_number_t hwirq;
 135
 136                pending = __raw_readl(cpu->map_base + reg_status(intc, idx));
 137                pending &= __raw_readl(cpu->map_base + reg_enable(intc, idx));
 138
 139                for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) {
 140                        if (generic_handle_domain_irq(intc->domain, base + hwirq))
 141                                spurious_interrupt();
 142                }
 143        }
 144
 145        chained_irq_exit(chip, desc);
 146}
 147
 148static inline void __bcm6345_l1_unmask(struct irq_data *d)
 149{
 150        struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
 151        u32 word = d->hwirq / IRQS_PER_WORD;
 152        u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
 153        unsigned int cpu_idx = cpu_for_irq(intc, d);
 154
 155        intc->cpus[cpu_idx]->enable_cache[word] |= mask;
 156        __raw_writel(intc->cpus[cpu_idx]->enable_cache[word],
 157                intc->cpus[cpu_idx]->map_base + reg_enable(intc, word));
 158}
 159
 160static inline void __bcm6345_l1_mask(struct irq_data *d)
 161{
 162        struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
 163        u32 word = d->hwirq / IRQS_PER_WORD;
 164        u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
 165        unsigned int cpu_idx = cpu_for_irq(intc, d);
 166
 167        intc->cpus[cpu_idx]->enable_cache[word] &= ~mask;
 168        __raw_writel(intc->cpus[cpu_idx]->enable_cache[word],
 169                intc->cpus[cpu_idx]->map_base + reg_enable(intc, word));
 170}
 171
 172static void bcm6345_l1_unmask(struct irq_data *d)
 173{
 174        struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
 175        unsigned long flags;
 176
 177        raw_spin_lock_irqsave(&intc->lock, flags);
 178        __bcm6345_l1_unmask(d);
 179        raw_spin_unlock_irqrestore(&intc->lock, flags);
 180}
 181
 182static void bcm6345_l1_mask(struct irq_data *d)
 183{
 184        struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
 185        unsigned long flags;
 186
 187        raw_spin_lock_irqsave(&intc->lock, flags);
 188        __bcm6345_l1_mask(d);
 189        raw_spin_unlock_irqrestore(&intc->lock, flags);
 190}
 191
 192static int bcm6345_l1_set_affinity(struct irq_data *d,
 193                                   const struct cpumask *dest,
 194                                   bool force)
 195{
 196        struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
 197        u32 word = d->hwirq / IRQS_PER_WORD;
 198        u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
 199        unsigned int old_cpu = cpu_for_irq(intc, d);
 200        unsigned int new_cpu;
 201        struct cpumask valid;
 202        unsigned long flags;
 203        bool enabled;
 204
 205        if (!cpumask_and(&valid, &intc->cpumask, dest))
 206                return -EINVAL;
 207
 208        new_cpu = cpumask_any_and(&valid, cpu_online_mask);
 209        if (new_cpu >= nr_cpu_ids)
 210                return -EINVAL;
 211
 212        dest = cpumask_of(new_cpu);
 213
 214        raw_spin_lock_irqsave(&intc->lock, flags);
 215        if (old_cpu != new_cpu) {
 216                enabled = intc->cpus[old_cpu]->enable_cache[word] & mask;
 217                if (enabled)
 218                        __bcm6345_l1_mask(d);
 219                cpumask_copy(irq_data_get_affinity_mask(d), dest);
 220                if (enabled)
 221                        __bcm6345_l1_unmask(d);
 222        } else {
 223                cpumask_copy(irq_data_get_affinity_mask(d), dest);
 224        }
 225        raw_spin_unlock_irqrestore(&intc->lock, flags);
 226
 227        irq_data_update_effective_affinity(d, cpumask_of(new_cpu));
 228
 229        return IRQ_SET_MASK_OK_NOCOPY;
 230}
 231
 232static int __init bcm6345_l1_init_one(struct device_node *dn,
 233                                      unsigned int idx,
 234                                      struct bcm6345_l1_chip *intc)
 235{
 236        struct resource res;
 237        resource_size_t sz;
 238        struct bcm6345_l1_cpu *cpu;
 239        unsigned int i, n_words;
 240
 241        if (of_address_to_resource(dn, idx, &res))
 242                return -EINVAL;
 243        sz = resource_size(&res);
 244        n_words = sz / REG_BYTES_PER_IRQ_WORD;
 245
 246        if (!intc->n_words)
 247                intc->n_words = n_words;
 248        else if (intc->n_words != n_words)
 249                return -EINVAL;
 250
 251        cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32),
 252                                        GFP_KERNEL);
 253        if (!cpu)
 254                return -ENOMEM;
 255
 256        cpu->map_base = ioremap(res.start, sz);
 257        if (!cpu->map_base)
 258                return -ENOMEM;
 259
 260        for (i = 0; i < n_words; i++) {
 261                cpu->enable_cache[i] = 0;
 262                __raw_writel(0, cpu->map_base + reg_enable(intc, i));
 263        }
 264
 265        cpu->parent_irq = irq_of_parse_and_map(dn, idx);
 266        if (!cpu->parent_irq) {
 267                pr_err("failed to map parent interrupt %d\n", cpu->parent_irq);
 268                return -EINVAL;
 269        }
 270        irq_set_chained_handler_and_data(cpu->parent_irq,
 271                                                bcm6345_l1_irq_handle, intc);
 272
 273        return 0;
 274}
 275
 276static struct irq_chip bcm6345_l1_irq_chip = {
 277        .name                   = "bcm6345-l1",
 278        .irq_mask               = bcm6345_l1_mask,
 279        .irq_unmask             = bcm6345_l1_unmask,
 280        .irq_set_affinity       = bcm6345_l1_set_affinity,
 281};
 282
 283static int bcm6345_l1_map(struct irq_domain *d, unsigned int virq,
 284                          irq_hw_number_t hw_irq)
 285{
 286        irq_set_chip_and_handler(virq,
 287                &bcm6345_l1_irq_chip, handle_percpu_irq);
 288        irq_set_chip_data(virq, d->host_data);
 289        irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
 290        return 0;
 291}
 292
 293static const struct irq_domain_ops bcm6345_l1_domain_ops = {
 294        .xlate                  = irq_domain_xlate_onecell,
 295        .map                    = bcm6345_l1_map,
 296};
 297
 298static int __init bcm6345_l1_of_init(struct device_node *dn,
 299                              struct device_node *parent)
 300{
 301        struct bcm6345_l1_chip *intc;
 302        unsigned int idx;
 303        int ret;
 304
 305        intc = kzalloc(sizeof(*intc), GFP_KERNEL);
 306        if (!intc)
 307                return -ENOMEM;
 308
 309        for_each_possible_cpu(idx) {
 310                ret = bcm6345_l1_init_one(dn, idx, intc);
 311                if (ret)
 312                        pr_err("failed to init intc L1 for cpu %d: %d\n",
 313                                idx, ret);
 314                else
 315                        cpumask_set_cpu(idx, &intc->cpumask);
 316        }
 317
 318        if (!cpumask_weight(&intc->cpumask)) {
 319                ret = -ENODEV;
 320                goto out_free;
 321        }
 322
 323        raw_spin_lock_init(&intc->lock);
 324
 325        intc->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * intc->n_words,
 326                                             &bcm6345_l1_domain_ops,
 327                                             intc);
 328        if (!intc->domain) {
 329                ret = -ENOMEM;
 330                goto out_unmap;
 331        }
 332
 333        pr_info("registered BCM6345 L1 intc (IRQs: %d)\n",
 334                        IRQS_PER_WORD * intc->n_words);
 335        for_each_cpu(idx, &intc->cpumask) {
 336                struct bcm6345_l1_cpu *cpu = intc->cpus[idx];
 337
 338                pr_info("  CPU%u at MMIO 0x%p (irq = %d)\n", idx,
 339                                cpu->map_base, cpu->parent_irq);
 340        }
 341
 342        return 0;
 343
 344out_unmap:
 345        for_each_possible_cpu(idx) {
 346                struct bcm6345_l1_cpu *cpu = intc->cpus[idx];
 347
 348                if (cpu) {
 349                        if (cpu->map_base)
 350                                iounmap(cpu->map_base);
 351                        kfree(cpu);
 352                }
 353        }
 354out_free:
 355        kfree(intc);
 356        return ret;
 357}
 358
 359IRQCHIP_DECLARE(bcm6345_l1, "brcm,bcm6345-l1-intc", bcm6345_l1_of_init);
 360