1
2
3
4
5
6
7
8
9
10
11
12#include <linux/err.h>
13#include <linux/io.h>
14#include <linux/irqchip.h>
15#include <linux/irqdomain.h>
16#include <linux/of_address.h>
17#include <linux/of_irq.h>
18#include <linux/slab.h>
19
20#define IRQ_FREE -1
21#define IRQ_RESERVED -2
22#define IRQ_SKIP -3
23#define GIC_IRQ_START 32
24
25
26
27
28
29
30
31
32
33
34
35
36struct crossbar_device {
37 raw_spinlock_t lock;
38 uint int_max;
39 uint safe_map;
40 uint max_crossbar_sources;
41 uint *irq_map;
42 void __iomem *crossbar_base;
43 int *register_offsets;
44 void (*write)(int, int);
45};
46
47static struct crossbar_device *cb;
48
49static void crossbar_writel(int irq_no, int cb_no)
50{
51 writel(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]);
52}
53
54static void crossbar_writew(int irq_no, int cb_no)
55{
56 writew(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]);
57}
58
59static void crossbar_writeb(int irq_no, int cb_no)
60{
61 writeb(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]);
62}
63
64static struct irq_chip crossbar_chip = {
65 .name = "CBAR",
66 .irq_eoi = irq_chip_eoi_parent,
67 .irq_mask = irq_chip_mask_parent,
68 .irq_unmask = irq_chip_unmask_parent,
69 .irq_retrigger = irq_chip_retrigger_hierarchy,
70 .irq_set_type = irq_chip_set_type_parent,
71 .flags = IRQCHIP_MASK_ON_SUSPEND |
72 IRQCHIP_SKIP_SET_WAKE,
73#ifdef CONFIG_SMP
74 .irq_set_affinity = irq_chip_set_affinity_parent,
75#endif
76};
77
78static int allocate_gic_irq(struct irq_domain *domain, unsigned virq,
79 irq_hw_number_t hwirq)
80{
81 struct irq_fwspec fwspec;
82 int i;
83 int err;
84
85 if (!irq_domain_get_of_node(domain->parent))
86 return -EINVAL;
87
88 raw_spin_lock(&cb->lock);
89 for (i = cb->int_max - 1; i >= 0; i--) {
90 if (cb->irq_map[i] == IRQ_FREE) {
91 cb->irq_map[i] = hwirq;
92 break;
93 }
94 }
95 raw_spin_unlock(&cb->lock);
96
97 if (i < 0)
98 return -ENODEV;
99
100 fwspec.fwnode = domain->parent->fwnode;
101 fwspec.param_count = 3;
102 fwspec.param[0] = 0;
103 fwspec.param[1] = i;
104 fwspec.param[2] = IRQ_TYPE_LEVEL_HIGH;
105
106 err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
107 if (err)
108 cb->irq_map[i] = IRQ_FREE;
109 else
110 cb->write(i, hwirq);
111
112 return err;
113}
114
115static int crossbar_domain_alloc(struct irq_domain *d, unsigned int virq,
116 unsigned int nr_irqs, void *data)
117{
118 struct irq_fwspec *fwspec = data;
119 irq_hw_number_t hwirq;
120 int i;
121
122 if (fwspec->param_count != 3)
123 return -EINVAL;
124 if (fwspec->param[0] != 0)
125 return -EINVAL;
126
127 hwirq = fwspec->param[1];
128 if ((hwirq + nr_irqs) > cb->max_crossbar_sources)
129 return -EINVAL;
130
131 for (i = 0; i < nr_irqs; i++) {
132 int err = allocate_gic_irq(d, virq + i, hwirq + i);
133
134 if (err)
135 return err;
136
137 irq_domain_set_hwirq_and_chip(d, virq + i, hwirq + i,
138 &crossbar_chip, NULL);
139 }
140
141 return 0;
142}
143
144
145
146
147
148
149
150
151
152
153
154
155
156static void crossbar_domain_free(struct irq_domain *domain, unsigned int virq,
157 unsigned int nr_irqs)
158{
159 int i;
160
161 raw_spin_lock(&cb->lock);
162 for (i = 0; i < nr_irqs; i++) {
163 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
164
165 irq_domain_reset_irq_data(d);
166 cb->irq_map[d->hwirq] = IRQ_FREE;
167 cb->write(d->hwirq, cb->safe_map);
168 }
169 raw_spin_unlock(&cb->lock);
170}
171
172static int crossbar_domain_translate(struct irq_domain *d,
173 struct irq_fwspec *fwspec,
174 unsigned long *hwirq,
175 unsigned int *type)
176{
177 if (is_of_node(fwspec->fwnode)) {
178 if (fwspec->param_count != 3)
179 return -EINVAL;
180
181
182 if (fwspec->param[0] != 0)
183 return -EINVAL;
184
185 *hwirq = fwspec->param[1];
186 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
187 return 0;
188 }
189
190 return -EINVAL;
191}
192
193static const struct irq_domain_ops crossbar_domain_ops = {
194 .alloc = crossbar_domain_alloc,
195 .free = crossbar_domain_free,
196 .translate = crossbar_domain_translate,
197};
198
199static int __init crossbar_of_init(struct device_node *node)
200{
201 int i, size, max = 0, reserved = 0, entry;
202 const __be32 *irqsr;
203 int ret = -ENOMEM;
204
205 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
206
207 if (!cb)
208 return ret;
209
210 cb->crossbar_base = of_iomap(node, 0);
211 if (!cb->crossbar_base)
212 goto err_cb;
213
214 of_property_read_u32(node, "ti,max-crossbar-sources",
215 &cb->max_crossbar_sources);
216 if (!cb->max_crossbar_sources) {
217 pr_err("missing 'ti,max-crossbar-sources' property\n");
218 ret = -EINVAL;
219 goto err_base;
220 }
221
222 of_property_read_u32(node, "ti,max-irqs", &max);
223 if (!max) {
224 pr_err("missing 'ti,max-irqs' property\n");
225 ret = -EINVAL;
226 goto err_base;
227 }
228 cb->irq_map = kcalloc(max, sizeof(int), GFP_KERNEL);
229 if (!cb->irq_map)
230 goto err_base;
231
232 cb->int_max = max;
233
234 for (i = 0; i < max; i++)
235 cb->irq_map[i] = IRQ_FREE;
236
237
238 irqsr = of_get_property(node, "ti,irqs-reserved", &size);
239 if (irqsr) {
240 size /= sizeof(__be32);
241
242 for (i = 0; i < size; i++) {
243 of_property_read_u32_index(node,
244 "ti,irqs-reserved",
245 i, &entry);
246 if (entry >= max) {
247 pr_err("Invalid reserved entry\n");
248 ret = -EINVAL;
249 goto err_irq_map;
250 }
251 cb->irq_map[entry] = IRQ_RESERVED;
252 }
253 }
254
255
256 irqsr = of_get_property(node, "ti,irqs-skip", &size);
257 if (irqsr) {
258 size /= sizeof(__be32);
259
260 for (i = 0; i < size; i++) {
261 of_property_read_u32_index(node,
262 "ti,irqs-skip",
263 i, &entry);
264 if (entry >= max) {
265 pr_err("Invalid skip entry\n");
266 ret = -EINVAL;
267 goto err_irq_map;
268 }
269 cb->irq_map[entry] = IRQ_SKIP;
270 }
271 }
272
273
274 cb->register_offsets = kcalloc(max, sizeof(int), GFP_KERNEL);
275 if (!cb->register_offsets)
276 goto err_irq_map;
277
278 of_property_read_u32(node, "ti,reg-size", &size);
279
280 switch (size) {
281 case 1:
282 cb->write = crossbar_writeb;
283 break;
284 case 2:
285 cb->write = crossbar_writew;
286 break;
287 case 4:
288 cb->write = crossbar_writel;
289 break;
290 default:
291 pr_err("Invalid reg-size property\n");
292 ret = -EINVAL;
293 goto err_reg_offset;
294 break;
295 }
296
297
298
299
300
301 for (i = 0; i < max; i++) {
302 if (cb->irq_map[i] == IRQ_RESERVED)
303 continue;
304
305 cb->register_offsets[i] = reserved;
306 reserved += size;
307 }
308
309 of_property_read_u32(node, "ti,irqs-safe-map", &cb->safe_map);
310
311 for (i = 0; i < max; i++) {
312 if (cb->irq_map[i] == IRQ_RESERVED ||
313 cb->irq_map[i] == IRQ_SKIP)
314 continue;
315
316 cb->write(i, cb->safe_map);
317 }
318
319 raw_spin_lock_init(&cb->lock);
320
321 return 0;
322
323err_reg_offset:
324 kfree(cb->register_offsets);
325err_irq_map:
326 kfree(cb->irq_map);
327err_base:
328 iounmap(cb->crossbar_base);
329err_cb:
330 kfree(cb);
331
332 cb = NULL;
333 return ret;
334}
335
336static int __init irqcrossbar_init(struct device_node *node,
337 struct device_node *parent)
338{
339 struct irq_domain *parent_domain, *domain;
340 int err;
341
342 if (!parent) {
343 pr_err("%s: no parent, giving up\n", node->full_name);
344 return -ENODEV;
345 }
346
347 parent_domain = irq_find_host(parent);
348 if (!parent_domain) {
349 pr_err("%s: unable to obtain parent domain\n", node->full_name);
350 return -ENXIO;
351 }
352
353 err = crossbar_of_init(node);
354 if (err)
355 return err;
356
357 domain = irq_domain_add_hierarchy(parent_domain, 0,
358 cb->max_crossbar_sources,
359 node, &crossbar_domain_ops,
360 NULL);
361 if (!domain) {
362 pr_err("%s: failed to allocated domain\n", node->full_name);
363 return -ENOMEM;
364 }
365
366 return 0;
367}
368
369IRQCHIP_DECLARE(ti_irqcrossbar, "ti,irq-crossbar", irqcrossbar_init);
370