1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/io.h>
15#include <linux/irq.h>
16#include <linux/irqchip.h>
17#include <linux/irqchip/chained_irq.h>
18#include <linux/of_address.h>
19#include <linux/of_irq.h>
20
21#define APB_INT_ENABLE_L 0x00
22#define APB_INT_ENABLE_H 0x04
23#define APB_INT_MASK_L 0x08
24#define APB_INT_MASK_H 0x0c
25#define APB_INT_FINALSTATUS_L 0x30
26#define APB_INT_FINALSTATUS_H 0x34
27#define APB_INT_BASE_OFFSET 0x04
28
29static void dw_apb_ictl_handler(struct irq_desc *desc)
30{
31 struct irq_domain *d = irq_desc_get_handler_data(desc);
32 struct irq_chip *chip = irq_desc_get_chip(desc);
33 int n;
34
35 chained_irq_enter(chip, desc);
36
37 for (n = 0; n < d->revmap_size; n += 32) {
38 struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, n);
39 u32 stat = readl_relaxed(gc->reg_base + APB_INT_FINALSTATUS_L);
40
41 while (stat) {
42 u32 hwirq = ffs(stat) - 1;
43 u32 virq = irq_find_mapping(d, gc->irq_base + hwirq);
44
45 generic_handle_irq(virq);
46 stat &= ~(1 << hwirq);
47 }
48 }
49
50 chained_irq_exit(chip, desc);
51}
52
53#ifdef CONFIG_PM
54static void dw_apb_ictl_resume(struct irq_data *d)
55{
56 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
57 struct irq_chip_type *ct = irq_data_get_chip_type(d);
58
59 irq_gc_lock(gc);
60 writel_relaxed(~0, gc->reg_base + ct->regs.enable);
61 writel_relaxed(*ct->mask_cache, gc->reg_base + ct->regs.mask);
62 irq_gc_unlock(gc);
63}
64#else
65#define dw_apb_ictl_resume NULL
66#endif
67
68static int __init dw_apb_ictl_init(struct device_node *np,
69 struct device_node *parent)
70{
71 unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
72 struct resource r;
73 struct irq_domain *domain;
74 struct irq_chip_generic *gc;
75 void __iomem *iobase;
76 int ret, nrirqs, irq, i;
77 u32 reg;
78
79
80 irq = irq_of_parse_and_map(np, 0);
81 if (irq <= 0) {
82 pr_err("%pOF: unable to parse irq\n", np);
83 return -EINVAL;
84 }
85
86 ret = of_address_to_resource(np, 0, &r);
87 if (ret) {
88 pr_err("%pOF: unable to get resource\n", np);
89 return ret;
90 }
91
92 if (!request_mem_region(r.start, resource_size(&r), np->full_name)) {
93 pr_err("%pOF: unable to request mem region\n", np);
94 return -ENOMEM;
95 }
96
97 iobase = ioremap(r.start, resource_size(&r));
98 if (!iobase) {
99 pr_err("%pOF: unable to map resource\n", np);
100 ret = -ENOMEM;
101 goto err_release;
102 }
103
104
105
106
107
108
109
110
111
112 writel_relaxed(~0, iobase + APB_INT_MASK_L);
113 writel_relaxed(~0, iobase + APB_INT_MASK_H);
114 writel_relaxed(~0, iobase + APB_INT_ENABLE_L);
115 writel_relaxed(~0, iobase + APB_INT_ENABLE_H);
116
117 reg = readl_relaxed(iobase + APB_INT_ENABLE_H);
118 if (reg)
119 nrirqs = 32 + fls(reg);
120 else
121 nrirqs = fls(readl_relaxed(iobase + APB_INT_ENABLE_L));
122
123 domain = irq_domain_add_linear(np, nrirqs,
124 &irq_generic_chip_ops, NULL);
125 if (!domain) {
126 pr_err("%pOF: unable to add irq domain\n", np);
127 ret = -ENOMEM;
128 goto err_unmap;
129 }
130
131 ret = irq_alloc_domain_generic_chips(domain, 32, 1, np->name,
132 handle_level_irq, clr, 0,
133 IRQ_GC_INIT_MASK_CACHE);
134 if (ret) {
135 pr_err("%pOF: unable to alloc irq domain gc\n", np);
136 goto err_unmap;
137 }
138
139 for (i = 0; i < DIV_ROUND_UP(nrirqs, 32); i++) {
140 gc = irq_get_domain_generic_chip(domain, i * 32);
141 gc->reg_base = iobase + i * APB_INT_BASE_OFFSET;
142 gc->chip_types[0].regs.mask = APB_INT_MASK_L;
143 gc->chip_types[0].regs.enable = APB_INT_ENABLE_L;
144 gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit;
145 gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit;
146 gc->chip_types[0].chip.irq_resume = dw_apb_ictl_resume;
147 }
148
149 irq_set_chained_handler_and_data(irq, dw_apb_ictl_handler, domain);
150
151 return 0;
152
153err_unmap:
154 iounmap(iobase);
155err_release:
156 release_mem_region(r.start, resource_size(&r));
157 return ret;
158}
159IRQCHIP_DECLARE(dw_apb_ictl,
160 "snps,dw-apb-ictl", dw_apb_ictl_init);
161