1
2
3
4
5
6
7
8#include <linux/init.h>
9#include <linux/platform_device.h>
10#include <linux/spinlock.h>
11#include <linux/interrupt.h>
12#include <linux/ioport.h>
13#include <linux/io.h>
14#include <linux/irq.h>
15#include <linux/irqdomain.h>
16#include <linux/err.h>
17#include <linux/slab.h>
18#include <linux/module.h>
19#include <linux/pm_runtime.h>
20
21#define IRQC_IRQ_MAX 32
22
23#define IRQC_REQ_STS 0x00
24#define IRQC_EN_STS 0x04
25#define IRQC_EN_SET 0x08
26#define IRQC_INT_CPU_BASE(n) (0x000 + ((n) * 0x10))
27
28#define DETECT_STATUS 0x100
29#define MONITOR 0x104
30#define HLVL_STS 0x108
31#define LLVL_STS 0x10c
32#define S_R_EDGE_STS 0x110
33#define S_F_EDGE_STS 0x114
34#define A_R_EDGE_STS 0x118
35#define A_F_EDGE_STS 0x11c
36#define CHTEN_STS 0x120
37#define IRQC_CONFIG(n) (0x180 + ((n) * 0x04))
38
39
40struct irqc_irq {
41 int hw_irq;
42 int requested_irq;
43 struct irqc_priv *p;
44};
45
46struct irqc_priv {
47 void __iomem *iomem;
48 void __iomem *cpu_int_base;
49 struct irqc_irq irq[IRQC_IRQ_MAX];
50 unsigned int number_of_irqs;
51 struct platform_device *pdev;
52 struct irq_chip_generic *gc;
53 struct irq_domain *irq_domain;
54 atomic_t wakeup_path;
55};
56
57static struct irqc_priv *irq_data_to_priv(struct irq_data *data)
58{
59 return data->domain->host_data;
60}
61
62static void irqc_dbg(struct irqc_irq *i, char *str)
63{
64 dev_dbg(&i->p->pdev->dev, "%s (%d:%d)\n",
65 str, i->requested_irq, i->hw_irq);
66}
67
68static unsigned char irqc_sense[IRQ_TYPE_SENSE_MASK + 1] = {
69 [IRQ_TYPE_LEVEL_LOW] = 0x01,
70 [IRQ_TYPE_LEVEL_HIGH] = 0x02,
71 [IRQ_TYPE_EDGE_FALLING] = 0x04,
72 [IRQ_TYPE_EDGE_RISING] = 0x08,
73 [IRQ_TYPE_EDGE_BOTH] = 0x0c,
74};
75
76static int irqc_irq_set_type(struct irq_data *d, unsigned int type)
77{
78 struct irqc_priv *p = irq_data_to_priv(d);
79 int hw_irq = irqd_to_hwirq(d);
80 unsigned char value = irqc_sense[type & IRQ_TYPE_SENSE_MASK];
81 u32 tmp;
82
83 irqc_dbg(&p->irq[hw_irq], "sense");
84
85 if (!value)
86 return -EINVAL;
87
88 tmp = ioread32(p->iomem + IRQC_CONFIG(hw_irq));
89 tmp &= ~0x3f;
90 tmp |= value;
91 iowrite32(tmp, p->iomem + IRQC_CONFIG(hw_irq));
92 return 0;
93}
94
95static int irqc_irq_set_wake(struct irq_data *d, unsigned int on)
96{
97 struct irqc_priv *p = irq_data_to_priv(d);
98 int hw_irq = irqd_to_hwirq(d);
99
100 irq_set_irq_wake(p->irq[hw_irq].requested_irq, on);
101 if (on)
102 atomic_inc(&p->wakeup_path);
103 else
104 atomic_dec(&p->wakeup_path);
105
106 return 0;
107}
108
109static irqreturn_t irqc_irq_handler(int irq, void *dev_id)
110{
111 struct irqc_irq *i = dev_id;
112 struct irqc_priv *p = i->p;
113 u32 bit = BIT(i->hw_irq);
114
115 irqc_dbg(i, "demux1");
116
117 if (ioread32(p->iomem + DETECT_STATUS) & bit) {
118 iowrite32(bit, p->iomem + DETECT_STATUS);
119 irqc_dbg(i, "demux2");
120 generic_handle_irq(irq_find_mapping(p->irq_domain, i->hw_irq));
121 return IRQ_HANDLED;
122 }
123 return IRQ_NONE;
124}
125
126static int irqc_probe(struct platform_device *pdev)
127{
128 struct irqc_priv *p;
129 struct resource *io;
130 struct resource *irq;
131 const char *name = dev_name(&pdev->dev);
132 int ret;
133 int k;
134
135 p = kzalloc(sizeof(*p), GFP_KERNEL);
136 if (!p) {
137 dev_err(&pdev->dev, "failed to allocate driver data\n");
138 ret = -ENOMEM;
139 goto err0;
140 }
141
142 p->pdev = pdev;
143 platform_set_drvdata(pdev, p);
144
145 pm_runtime_enable(&pdev->dev);
146 pm_runtime_get_sync(&pdev->dev);
147
148
149 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
150 if (!io) {
151 dev_err(&pdev->dev, "not enough IOMEM resources\n");
152 ret = -EINVAL;
153 goto err1;
154 }
155
156
157 for (k = 0; k < IRQC_IRQ_MAX; k++) {
158 irq = platform_get_resource(pdev, IORESOURCE_IRQ, k);
159 if (!irq)
160 break;
161
162 p->irq[k].p = p;
163 p->irq[k].hw_irq = k;
164 p->irq[k].requested_irq = irq->start;
165 }
166
167 p->number_of_irqs = k;
168 if (p->number_of_irqs < 1) {
169 dev_err(&pdev->dev, "not enough IRQ resources\n");
170 ret = -EINVAL;
171 goto err1;
172 }
173
174
175 p->iomem = ioremap_nocache(io->start, resource_size(io));
176 if (!p->iomem) {
177 dev_err(&pdev->dev, "failed to remap IOMEM\n");
178 ret = -ENXIO;
179 goto err2;
180 }
181
182 p->cpu_int_base = p->iomem + IRQC_INT_CPU_BASE(0);
183
184 p->irq_domain = irq_domain_add_linear(pdev->dev.of_node,
185 p->number_of_irqs,
186 &irq_generic_chip_ops, p);
187 if (!p->irq_domain) {
188 ret = -ENXIO;
189 dev_err(&pdev->dev, "cannot initialize irq domain\n");
190 goto err2;
191 }
192
193 ret = irq_alloc_domain_generic_chips(p->irq_domain, p->number_of_irqs,
194 1, name, handle_level_irq,
195 0, 0, IRQ_GC_INIT_NESTED_LOCK);
196 if (ret) {
197 dev_err(&pdev->dev, "cannot allocate generic chip\n");
198 goto err3;
199 }
200
201 p->gc = irq_get_domain_generic_chip(p->irq_domain, 0);
202 p->gc->reg_base = p->cpu_int_base;
203 p->gc->chip_types[0].regs.enable = IRQC_EN_SET;
204 p->gc->chip_types[0].regs.disable = IRQC_EN_STS;
205 p->gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
206 p->gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
207 p->gc->chip_types[0].chip.irq_set_type = irqc_irq_set_type;
208 p->gc->chip_types[0].chip.irq_set_wake = irqc_irq_set_wake;
209 p->gc->chip_types[0].chip.flags = IRQCHIP_MASK_ON_SUSPEND;
210
211
212 for (k = 0; k < p->number_of_irqs; k++) {
213 if (request_irq(p->irq[k].requested_irq, irqc_irq_handler,
214 0, name, &p->irq[k])) {
215 dev_err(&pdev->dev, "failed to request IRQ\n");
216 ret = -ENOENT;
217 goto err4;
218 }
219 }
220
221 dev_info(&pdev->dev, "driving %d irqs\n", p->number_of_irqs);
222
223 return 0;
224err4:
225 while (--k >= 0)
226 free_irq(p->irq[k].requested_irq, &p->irq[k]);
227
228err3:
229 irq_domain_remove(p->irq_domain);
230err2:
231 iounmap(p->iomem);
232err1:
233 pm_runtime_put(&pdev->dev);
234 pm_runtime_disable(&pdev->dev);
235 kfree(p);
236err0:
237 return ret;
238}
239
240static int irqc_remove(struct platform_device *pdev)
241{
242 struct irqc_priv *p = platform_get_drvdata(pdev);
243 int k;
244
245 for (k = 0; k < p->number_of_irqs; k++)
246 free_irq(p->irq[k].requested_irq, &p->irq[k]);
247
248 irq_domain_remove(p->irq_domain);
249 iounmap(p->iomem);
250 pm_runtime_put(&pdev->dev);
251 pm_runtime_disable(&pdev->dev);
252 kfree(p);
253 return 0;
254}
255
256static int __maybe_unused irqc_suspend(struct device *dev)
257{
258 struct irqc_priv *p = dev_get_drvdata(dev);
259
260 if (atomic_read(&p->wakeup_path))
261 device_set_wakeup_path(dev);
262
263 return 0;
264}
265
266static SIMPLE_DEV_PM_OPS(irqc_pm_ops, irqc_suspend, NULL);
267
268static const struct of_device_id irqc_dt_ids[] = {
269 { .compatible = "renesas,irqc", },
270 {},
271};
272MODULE_DEVICE_TABLE(of, irqc_dt_ids);
273
274static struct platform_driver irqc_device_driver = {
275 .probe = irqc_probe,
276 .remove = irqc_remove,
277 .driver = {
278 .name = "renesas_irqc",
279 .of_match_table = irqc_dt_ids,
280 .pm = &irqc_pm_ops,
281 }
282};
283
284static int __init irqc_init(void)
285{
286 return platform_driver_register(&irqc_device_driver);
287}
288postcore_initcall(irqc_init);
289
290static void __exit irqc_exit(void)
291{
292 platform_driver_unregister(&irqc_device_driver);
293}
294module_exit(irqc_exit);
295
296MODULE_AUTHOR("Magnus Damm");
297MODULE_DESCRIPTION("Renesas IRQC Driver");
298MODULE_LICENSE("GPL v2");
299