linux/arch/mips/lantiq/irq.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *
   4 * Copyright (C) 2010 John Crispin <john@phrozen.org>
   5 * Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com>
   6 */
   7
   8#include <linux/interrupt.h>
   9#include <linux/ioport.h>
  10#include <linux/sched.h>
  11#include <linux/irqdomain.h>
  12#include <linux/of_platform.h>
  13#include <linux/of_address.h>
  14#include <linux/of_irq.h>
  15
  16#include <asm/bootinfo.h>
  17#include <asm/irq_cpu.h>
  18
  19#include <lantiq_soc.h>
  20#include <irq.h>
  21
  22/* register definitions - internal irqs */
  23#define LTQ_ICU_ISR             0x0000
  24#define LTQ_ICU_IER             0x0008
  25#define LTQ_ICU_IOSR            0x0010
  26#define LTQ_ICU_IRSR            0x0018
  27#define LTQ_ICU_IMR             0x0020
  28
  29#define LTQ_ICU_IM_SIZE         0x28
  30
  31/* register definitions - external irqs */
  32#define LTQ_EIU_EXIN_C          0x0000
  33#define LTQ_EIU_EXIN_INIC       0x0004
  34#define LTQ_EIU_EXIN_INC        0x0008
  35#define LTQ_EIU_EXIN_INEN       0x000C
  36
  37/* number of external interrupts */
  38#define MAX_EIU                 6
  39
  40/* the performance counter */
  41#define LTQ_PERF_IRQ            (INT_NUM_IM4_IRL0 + 31)
  42
  43/*
  44 * irqs generated by devices attached to the EBU need to be acked in
  45 * a special manner
  46 */
  47#define LTQ_ICU_EBU_IRQ         22
  48
  49#define ltq_icu_w32(vpe, m, x, y)       \
  50        ltq_w32((x), ltq_icu_membase[vpe] + m*LTQ_ICU_IM_SIZE + (y))
  51
  52#define ltq_icu_r32(vpe, m, x)          \
  53        ltq_r32(ltq_icu_membase[vpe] + m*LTQ_ICU_IM_SIZE + (x))
  54
  55#define ltq_eiu_w32(x, y)       ltq_w32((x), ltq_eiu_membase + (y))
  56#define ltq_eiu_r32(x)          ltq_r32(ltq_eiu_membase + (x))
  57
  58/* we have a cascade of 8 irqs */
  59#define MIPS_CPU_IRQ_CASCADE            8
  60
  61static int exin_avail;
  62static u32 ltq_eiu_irq[MAX_EIU];
  63static void __iomem *ltq_icu_membase[NR_CPUS];
  64static void __iomem *ltq_eiu_membase;
  65static struct irq_domain *ltq_domain;
  66static DEFINE_SPINLOCK(ltq_eiu_lock);
  67static DEFINE_RAW_SPINLOCK(ltq_icu_lock);
  68static int ltq_perfcount_irq;
  69
  70int ltq_eiu_get_irq(int exin)
  71{
  72        if (exin < exin_avail)
  73                return ltq_eiu_irq[exin];
  74        return -1;
  75}
  76
  77void ltq_disable_irq(struct irq_data *d)
  78{
  79        unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
  80        unsigned long im = offset / INT_NUM_IM_OFFSET;
  81        unsigned long flags;
  82        int vpe;
  83
  84        offset %= INT_NUM_IM_OFFSET;
  85
  86        raw_spin_lock_irqsave(&ltq_icu_lock, flags);
  87        for_each_present_cpu(vpe) {
  88                ltq_icu_w32(vpe, im,
  89                            ltq_icu_r32(vpe, im, LTQ_ICU_IER) & ~BIT(offset),
  90                            LTQ_ICU_IER);
  91        }
  92        raw_spin_unlock_irqrestore(&ltq_icu_lock, flags);
  93}
  94
  95void ltq_mask_and_ack_irq(struct irq_data *d)
  96{
  97        unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
  98        unsigned long im = offset / INT_NUM_IM_OFFSET;
  99        unsigned long flags;
 100        int vpe;
 101
 102        offset %= INT_NUM_IM_OFFSET;
 103
 104        raw_spin_lock_irqsave(&ltq_icu_lock, flags);
 105        for_each_present_cpu(vpe) {
 106                ltq_icu_w32(vpe, im,
 107                            ltq_icu_r32(vpe, im, LTQ_ICU_IER) & ~BIT(offset),
 108                            LTQ_ICU_IER);
 109                ltq_icu_w32(vpe, im, BIT(offset), LTQ_ICU_ISR);
 110        }
 111        raw_spin_unlock_irqrestore(&ltq_icu_lock, flags);
 112}
 113
 114static void ltq_ack_irq(struct irq_data *d)
 115{
 116        unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
 117        unsigned long im = offset / INT_NUM_IM_OFFSET;
 118        unsigned long flags;
 119        int vpe;
 120
 121        offset %= INT_NUM_IM_OFFSET;
 122
 123        raw_spin_lock_irqsave(&ltq_icu_lock, flags);
 124        for_each_present_cpu(vpe) {
 125                ltq_icu_w32(vpe, im, BIT(offset), LTQ_ICU_ISR);
 126        }
 127        raw_spin_unlock_irqrestore(&ltq_icu_lock, flags);
 128}
 129
 130void ltq_enable_irq(struct irq_data *d)
 131{
 132        unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
 133        unsigned long im = offset / INT_NUM_IM_OFFSET;
 134        unsigned long flags;
 135        int vpe;
 136
 137        offset %= INT_NUM_IM_OFFSET;
 138
 139        vpe = cpumask_first(irq_data_get_effective_affinity_mask(d));
 140
 141        /* This shouldn't be even possible, maybe during CPU hotplug spam */
 142        if (unlikely(vpe >= nr_cpu_ids))
 143                vpe = smp_processor_id();
 144
 145        raw_spin_lock_irqsave(&ltq_icu_lock, flags);
 146
 147        ltq_icu_w32(vpe, im, ltq_icu_r32(vpe, im, LTQ_ICU_IER) | BIT(offset),
 148                    LTQ_ICU_IER);
 149
 150        raw_spin_unlock_irqrestore(&ltq_icu_lock, flags);
 151}
 152
 153static int ltq_eiu_settype(struct irq_data *d, unsigned int type)
 154{
 155        int i;
 156        unsigned long flags;
 157
 158        for (i = 0; i < exin_avail; i++) {
 159                if (d->hwirq == ltq_eiu_irq[i]) {
 160                        int val = 0;
 161                        int edge = 0;
 162
 163                        switch (type) {
 164                        case IRQF_TRIGGER_NONE:
 165                                break;
 166                        case IRQF_TRIGGER_RISING:
 167                                val = 1;
 168                                edge = 1;
 169                                break;
 170                        case IRQF_TRIGGER_FALLING:
 171                                val = 2;
 172                                edge = 1;
 173                                break;
 174                        case IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING:
 175                                val = 3;
 176                                edge = 1;
 177                                break;
 178                        case IRQF_TRIGGER_HIGH:
 179                                val = 5;
 180                                break;
 181                        case IRQF_TRIGGER_LOW:
 182                                val = 6;
 183                                break;
 184                        default:
 185                                pr_err("invalid type %d for irq %ld\n",
 186                                        type, d->hwirq);
 187                                return -EINVAL;
 188                        }
 189
 190                        if (edge)
 191                                irq_set_handler(d->hwirq, handle_edge_irq);
 192
 193                        spin_lock_irqsave(&ltq_eiu_lock, flags);
 194                        ltq_eiu_w32((ltq_eiu_r32(LTQ_EIU_EXIN_C) &
 195                                    (~(7 << (i * 4)))) | (val << (i * 4)),
 196                                    LTQ_EIU_EXIN_C);
 197                        spin_unlock_irqrestore(&ltq_eiu_lock, flags);
 198                }
 199        }
 200
 201        return 0;
 202}
 203
 204static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
 205{
 206        int i;
 207
 208        ltq_enable_irq(d);
 209        for (i = 0; i < exin_avail; i++) {
 210                if (d->hwirq == ltq_eiu_irq[i]) {
 211                        /* by default we are low level triggered */
 212                        ltq_eiu_settype(d, IRQF_TRIGGER_LOW);
 213                        /* clear all pending */
 214                        ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INC) & ~BIT(i),
 215                                LTQ_EIU_EXIN_INC);
 216                        /* enable */
 217                        ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) | BIT(i),
 218                                LTQ_EIU_EXIN_INEN);
 219                        break;
 220                }
 221        }
 222
 223        return 0;
 224}
 225
 226static void ltq_shutdown_eiu_irq(struct irq_data *d)
 227{
 228        int i;
 229
 230        ltq_disable_irq(d);
 231        for (i = 0; i < exin_avail; i++) {
 232                if (d->hwirq == ltq_eiu_irq[i]) {
 233                        /* disable */
 234                        ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~BIT(i),
 235                                LTQ_EIU_EXIN_INEN);
 236                        break;
 237                }
 238        }
 239}
 240
 241#if defined(CONFIG_SMP)
 242static int ltq_icu_irq_set_affinity(struct irq_data *d,
 243                                    const struct cpumask *cpumask, bool force)
 244{
 245        struct cpumask tmask;
 246
 247        if (!cpumask_and(&tmask, cpumask, cpu_online_mask))
 248                return -EINVAL;
 249
 250        irq_data_update_effective_affinity(d, &tmask);
 251
 252        return IRQ_SET_MASK_OK;
 253}
 254#endif
 255
 256static struct irq_chip ltq_irq_type = {
 257        .name = "icu",
 258        .irq_enable = ltq_enable_irq,
 259        .irq_disable = ltq_disable_irq,
 260        .irq_unmask = ltq_enable_irq,
 261        .irq_ack = ltq_ack_irq,
 262        .irq_mask = ltq_disable_irq,
 263        .irq_mask_ack = ltq_mask_and_ack_irq,
 264#if defined(CONFIG_SMP)
 265        .irq_set_affinity = ltq_icu_irq_set_affinity,
 266#endif
 267};
 268
 269static struct irq_chip ltq_eiu_type = {
 270        .name = "eiu",
 271        .irq_startup = ltq_startup_eiu_irq,
 272        .irq_shutdown = ltq_shutdown_eiu_irq,
 273        .irq_enable = ltq_enable_irq,
 274        .irq_disable = ltq_disable_irq,
 275        .irq_unmask = ltq_enable_irq,
 276        .irq_ack = ltq_ack_irq,
 277        .irq_mask = ltq_disable_irq,
 278        .irq_mask_ack = ltq_mask_and_ack_irq,
 279        .irq_set_type = ltq_eiu_settype,
 280#if defined(CONFIG_SMP)
 281        .irq_set_affinity = ltq_icu_irq_set_affinity,
 282#endif
 283};
 284
 285static void ltq_hw_irq_handler(struct irq_desc *desc)
 286{
 287        unsigned int module = irq_desc_get_irq(desc) - 2;
 288        u32 irq;
 289        irq_hw_number_t hwirq;
 290        int vpe = smp_processor_id();
 291
 292        irq = ltq_icu_r32(vpe, module, LTQ_ICU_IOSR);
 293        if (irq == 0)
 294                return;
 295
 296        /*
 297         * silicon bug causes only the msb set to 1 to be valid. all
 298         * other bits might be bogus
 299         */
 300        irq = __fls(irq);
 301        hwirq = irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module);
 302        generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq));
 303
 304        /* if this is a EBU irq, we need to ack it or get a deadlock */
 305        if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT)
 306                ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10,
 307                        LTQ_EBU_PCC_ISTAT);
 308}
 309
 310static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
 311{
 312        struct irq_chip *chip = &ltq_irq_type;
 313        struct irq_data *data;
 314        int i;
 315
 316        if (hw < MIPS_CPU_IRQ_CASCADE)
 317                return 0;
 318
 319        for (i = 0; i < exin_avail; i++)
 320                if (hw == ltq_eiu_irq[i])
 321                        chip = &ltq_eiu_type;
 322
 323        data = irq_get_irq_data(irq);
 324
 325        irq_data_update_effective_affinity(data, cpumask_of(0));
 326
 327        irq_set_chip_and_handler(irq, chip, handle_level_irq);
 328
 329        return 0;
 330}
 331
 332static const struct irq_domain_ops irq_domain_ops = {
 333        .xlate = irq_domain_xlate_onetwocell,
 334        .map = icu_map,
 335};
 336
 337int __init icu_of_init(struct device_node *node, struct device_node *parent)
 338{
 339        struct device_node *eiu_node;
 340        struct resource res;
 341        int i, ret, vpe;
 342
 343        /* load register regions of available ICUs */
 344        for_each_possible_cpu(vpe) {
 345                if (of_address_to_resource(node, vpe, &res))
 346                        panic("Failed to get icu%i memory range", vpe);
 347
 348                if (!request_mem_region(res.start, resource_size(&res),
 349                                        res.name))
 350                        pr_err("Failed to request icu%i memory\n", vpe);
 351
 352                ltq_icu_membase[vpe] = ioremap(res.start,
 353                                        resource_size(&res));
 354
 355                if (!ltq_icu_membase[vpe])
 356                        panic("Failed to remap icu%i memory", vpe);
 357        }
 358
 359        /* turn off all irqs by default */
 360        for_each_possible_cpu(vpe) {
 361                for (i = 0; i < MAX_IM; i++) {
 362                        /* make sure all irqs are turned off by default */
 363                        ltq_icu_w32(vpe, i, 0, LTQ_ICU_IER);
 364
 365                        /* clear all possibly pending interrupts */
 366                        ltq_icu_w32(vpe, i, ~0, LTQ_ICU_ISR);
 367                        ltq_icu_w32(vpe, i, ~0, LTQ_ICU_IMR);
 368
 369                        /* clear resend */
 370                        ltq_icu_w32(vpe, i, 0, LTQ_ICU_IRSR);
 371                }
 372        }
 373
 374        mips_cpu_irq_init();
 375
 376        for (i = 0; i < MAX_IM; i++)
 377                irq_set_chained_handler(i + 2, ltq_hw_irq_handler);
 378
 379        ltq_domain = irq_domain_add_linear(node,
 380                (MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE,
 381                &irq_domain_ops, 0);
 382
 383        /* tell oprofile which irq to use */
 384        ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);
 385
 386        /* the external interrupts are optional and xway only */
 387        eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway");
 388        if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) {
 389                /* find out how many external irq sources we have */
 390                exin_avail = of_property_count_u32_elems(eiu_node,
 391                                                         "lantiq,eiu-irqs");
 392
 393                if (exin_avail > MAX_EIU)
 394                        exin_avail = MAX_EIU;
 395
 396                ret = of_property_read_u32_array(eiu_node, "lantiq,eiu-irqs",
 397                                                ltq_eiu_irq, exin_avail);
 398                if (ret)
 399                        panic("failed to load external irq resources");
 400
 401                if (!request_mem_region(res.start, resource_size(&res),
 402                                                        res.name))
 403                        pr_err("Failed to request eiu memory");
 404
 405                ltq_eiu_membase = ioremap(res.start,
 406                                                        resource_size(&res));
 407                if (!ltq_eiu_membase)
 408                        panic("Failed to remap eiu memory");
 409        }
 410
 411        return 0;
 412}
 413
 414int get_c0_perfcount_int(void)
 415{
 416        return ltq_perfcount_irq;
 417}
 418EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
 419
 420unsigned int get_c0_compare_int(void)
 421{
 422        return CP0_LEGACY_COMPARE_IRQ;
 423}
 424
 425static const struct of_device_id of_irq_ids[] __initconst = {
 426        { .compatible = "lantiq,icu", .data = icu_of_init },
 427        {},
 428};
 429
 430void __init arch_init_irq(void)
 431{
 432        of_irq_init(of_irq_ids);
 433}
 434