linux/drivers/irqchip/irq-orion.c
<<
>>
Prefs
   1/*
   2 * Marvell Orion SoCs IRQ chip driver.
   3 *
   4 * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
   5 *
   6 * This file is licensed under the terms of the GNU General Public
   7 * License version 2.  This program is licensed "as is" without any
   8 * warranty of any kind, whether express or implied.
   9 */
  10
  11#include <linux/io.h>
  12#include <linux/irq.h>
  13#include <linux/irqchip.h>
  14#include <linux/of.h>
  15#include <linux/of_address.h>
  16#include <linux/of_irq.h>
  17#include <asm/exception.h>
  18#include <asm/mach/irq.h>
  19
  20/*
  21 * Orion SoC main interrupt controller
  22 */
  23#define ORION_IRQS_PER_CHIP             32
  24
  25#define ORION_IRQ_CAUSE                 0x00
  26#define ORION_IRQ_MASK                  0x04
  27#define ORION_IRQ_FIQ_MASK              0x08
  28#define ORION_IRQ_ENDP_MASK             0x0c
  29
  30static struct irq_domain *orion_irq_domain;
  31
  32static void
  33__exception_irq_entry orion_handle_irq(struct pt_regs *regs)
  34{
  35        struct irq_domain_chip_generic *dgc = orion_irq_domain->gc;
  36        int n, base = 0;
  37
  38        for (n = 0; n < dgc->num_chips; n++, base += ORION_IRQS_PER_CHIP) {
  39                struct irq_chip_generic *gc =
  40                        irq_get_domain_generic_chip(orion_irq_domain, base);
  41                u32 stat = readl_relaxed(gc->reg_base + ORION_IRQ_CAUSE) &
  42                        gc->mask_cache;
  43                while (stat) {
  44                        u32 hwirq = __fls(stat);
  45                        handle_domain_irq(orion_irq_domain,
  46                                          gc->irq_base + hwirq, regs);
  47                        stat &= ~(1 << hwirq);
  48                }
  49        }
  50}
  51
  52static int __init orion_irq_init(struct device_node *np,
  53                                 struct device_node *parent)
  54{
  55        unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
  56        int n, ret, base, num_chips = 0;
  57        struct resource r;
  58
  59        /* count number of irq chips by valid reg addresses */
  60        while (of_address_to_resource(np, num_chips, &r) == 0)
  61                num_chips++;
  62
  63        orion_irq_domain = irq_domain_add_linear(np,
  64                                num_chips * ORION_IRQS_PER_CHIP,
  65                                &irq_generic_chip_ops, NULL);
  66        if (!orion_irq_domain)
  67                panic("%pOFn: unable to add irq domain\n", np);
  68
  69        ret = irq_alloc_domain_generic_chips(orion_irq_domain,
  70                                ORION_IRQS_PER_CHIP, 1, np->full_name,
  71                                handle_level_irq, clr, 0,
  72                                IRQ_GC_INIT_MASK_CACHE);
  73        if (ret)
  74                panic("%pOFn: unable to alloc irq domain gc\n", np);
  75
  76        for (n = 0, base = 0; n < num_chips; n++, base += ORION_IRQS_PER_CHIP) {
  77                struct irq_chip_generic *gc =
  78                        irq_get_domain_generic_chip(orion_irq_domain, base);
  79
  80                of_address_to_resource(np, n, &r);
  81
  82                if (!request_mem_region(r.start, resource_size(&r), np->name))
  83                        panic("%pOFn: unable to request mem region %d",
  84                              np, n);
  85
  86                gc->reg_base = ioremap(r.start, resource_size(&r));
  87                if (!gc->reg_base)
  88                        panic("%pOFn: unable to map resource %d", np, n);
  89
  90                gc->chip_types[0].regs.mask = ORION_IRQ_MASK;
  91                gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
  92                gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
  93
  94                /* mask all interrupts */
  95                writel(0, gc->reg_base + ORION_IRQ_MASK);
  96        }
  97
  98        set_handle_irq(orion_handle_irq);
  99        return 0;
 100}
 101IRQCHIP_DECLARE(orion_intc, "marvell,orion-intc", orion_irq_init);
 102
 103/*
 104 * Orion SoC bridge interrupt controller
 105 */
 106#define ORION_BRIDGE_IRQ_CAUSE  0x00
 107#define ORION_BRIDGE_IRQ_MASK   0x04
 108
 109static void orion_bridge_irq_handler(struct irq_desc *desc)
 110{
 111        struct irq_domain *d = irq_desc_get_handler_data(desc);
 112
 113        struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, 0);
 114        u32 stat = readl_relaxed(gc->reg_base + ORION_BRIDGE_IRQ_CAUSE) &
 115                   gc->mask_cache;
 116
 117        while (stat) {
 118                u32 hwirq = __fls(stat);
 119
 120                generic_handle_irq(irq_find_mapping(d, gc->irq_base + hwirq));
 121                stat &= ~(1 << hwirq);
 122        }
 123}
 124
 125/*
 126 * Bridge IRQ_CAUSE is asserted regardless of IRQ_MASK register.
 127 * To avoid interrupt events on stale irqs, we clear them before unmask.
 128 */
 129static unsigned int orion_bridge_irq_startup(struct irq_data *d)
 130{
 131        struct irq_chip_type *ct = irq_data_get_chip_type(d);
 132
 133        ct->chip.irq_ack(d);
 134        ct->chip.irq_unmask(d);
 135        return 0;
 136}
 137
 138static int __init orion_bridge_irq_init(struct device_node *np,
 139                                        struct device_node *parent)
 140{
 141        unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
 142        struct resource r;
 143        struct irq_domain *domain;
 144        struct irq_chip_generic *gc;
 145        int ret, irq, nrirqs = 32;
 146
 147        /* get optional number of interrupts provided */
 148        of_property_read_u32(np, "marvell,#interrupts", &nrirqs);
 149
 150        domain = irq_domain_add_linear(np, nrirqs,
 151                                       &irq_generic_chip_ops, NULL);
 152        if (!domain) {
 153                pr_err("%pOFn: unable to add irq domain\n", np);
 154                return -ENOMEM;
 155        }
 156
 157        ret = irq_alloc_domain_generic_chips(domain, nrirqs, 1, np->name,
 158                             handle_edge_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE);
 159        if (ret) {
 160                pr_err("%pOFn: unable to alloc irq domain gc\n", np);
 161                return ret;
 162        }
 163
 164        ret = of_address_to_resource(np, 0, &r);
 165        if (ret) {
 166                pr_err("%pOFn: unable to get resource\n", np);
 167                return ret;
 168        }
 169
 170        if (!request_mem_region(r.start, resource_size(&r), np->name)) {
 171                pr_err("%s: unable to request mem region\n", np->name);
 172                return -ENOMEM;
 173        }
 174
 175        /* Map the parent interrupt for the chained handler */
 176        irq = irq_of_parse_and_map(np, 0);
 177        if (irq <= 0) {
 178                pr_err("%pOFn: unable to parse irq\n", np);
 179                return -EINVAL;
 180        }
 181
 182        gc = irq_get_domain_generic_chip(domain, 0);
 183        gc->reg_base = ioremap(r.start, resource_size(&r));
 184        if (!gc->reg_base) {
 185                pr_err("%pOFn: unable to map resource\n", np);
 186                return -ENOMEM;
 187        }
 188
 189        gc->chip_types[0].regs.ack = ORION_BRIDGE_IRQ_CAUSE;
 190        gc->chip_types[0].regs.mask = ORION_BRIDGE_IRQ_MASK;
 191        gc->chip_types[0].chip.irq_startup = orion_bridge_irq_startup;
 192        gc->chip_types[0].chip.irq_ack = irq_gc_ack_clr_bit;
 193        gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
 194        gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
 195
 196        /* mask and clear all interrupts */
 197        writel(0, gc->reg_base + ORION_BRIDGE_IRQ_MASK);
 198        writel(0, gc->reg_base + ORION_BRIDGE_IRQ_CAUSE);
 199
 200        irq_set_chained_handler_and_data(irq, orion_bridge_irq_handler,
 201                                         domain);
 202
 203        return 0;
 204}
 205IRQCHIP_DECLARE(orion_bridge_intc,
 206                "marvell,orion-bridge-intc", orion_bridge_irq_init);
 207