1
2
3
4
5
6#include <linux/io.h>
7#include <linux/irq.h>
8#include <linux/slab.h>
9#include <linux/export.h>
10#include <linux/irqdomain.h>
11#include <linux/interrupt.h>
12#include <linux/kernel_stat.h>
13#include <linux/syscore_ops.h>
14
15#include "internals.h"
16
17static LIST_HEAD(gc_list);
18static DEFINE_RAW_SPINLOCK(gc_lock);
19
20
21
22
23
24void irq_gc_noop(struct irq_data *d)
25{
26}
27
28
29
30
31
32
33
34
35void irq_gc_mask_disable_reg(struct irq_data *d)
36{
37 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
38 struct irq_chip_type *ct = irq_data_get_chip_type(d);
39 u32 mask = d->mask;
40
41 irq_gc_lock(gc);
42 irq_reg_writel(gc, mask, ct->regs.disable);
43 *ct->mask_cache &= ~mask;
44 irq_gc_unlock(gc);
45}
46
47
48
49
50
51
52
53
54void irq_gc_mask_set_bit(struct irq_data *d)
55{
56 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
57 struct irq_chip_type *ct = irq_data_get_chip_type(d);
58 u32 mask = d->mask;
59
60 irq_gc_lock(gc);
61 *ct->mask_cache |= mask;
62 irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask);
63 irq_gc_unlock(gc);
64}
65EXPORT_SYMBOL_GPL(irq_gc_mask_set_bit);
66
67
68
69
70
71
72
73
74void irq_gc_mask_clr_bit(struct irq_data *d)
75{
76 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
77 struct irq_chip_type *ct = irq_data_get_chip_type(d);
78 u32 mask = d->mask;
79
80 irq_gc_lock(gc);
81 *ct->mask_cache &= ~mask;
82 irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask);
83 irq_gc_unlock(gc);
84}
85EXPORT_SYMBOL_GPL(irq_gc_mask_clr_bit);
86
87
88
89
90
91
92
93
94void irq_gc_unmask_enable_reg(struct irq_data *d)
95{
96 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
97 struct irq_chip_type *ct = irq_data_get_chip_type(d);
98 u32 mask = d->mask;
99
100 irq_gc_lock(gc);
101 irq_reg_writel(gc, mask, ct->regs.enable);
102 *ct->mask_cache |= mask;
103 irq_gc_unlock(gc);
104}
105
106
107
108
109
110void irq_gc_ack_set_bit(struct irq_data *d)
111{
112 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
113 struct irq_chip_type *ct = irq_data_get_chip_type(d);
114 u32 mask = d->mask;
115
116 irq_gc_lock(gc);
117 irq_reg_writel(gc, mask, ct->regs.ack);
118 irq_gc_unlock(gc);
119}
120EXPORT_SYMBOL_GPL(irq_gc_ack_set_bit);
121
122
123
124
125
126void irq_gc_ack_clr_bit(struct irq_data *d)
127{
128 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
129 struct irq_chip_type *ct = irq_data_get_chip_type(d);
130 u32 mask = ~d->mask;
131
132 irq_gc_lock(gc);
133 irq_reg_writel(gc, mask, ct->regs.ack);
134 irq_gc_unlock(gc);
135}
136
137
138
139
140
141void irq_gc_mask_disable_reg_and_ack(struct irq_data *d)
142{
143 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
144 struct irq_chip_type *ct = irq_data_get_chip_type(d);
145 u32 mask = d->mask;
146
147 irq_gc_lock(gc);
148 irq_reg_writel(gc, mask, ct->regs.mask);
149 irq_reg_writel(gc, mask, ct->regs.ack);
150 irq_gc_unlock(gc);
151}
152
153
154
155
156
157void irq_gc_eoi(struct irq_data *d)
158{
159 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
160 struct irq_chip_type *ct = irq_data_get_chip_type(d);
161 u32 mask = d->mask;
162
163 irq_gc_lock(gc);
164 irq_reg_writel(gc, mask, ct->regs.eoi);
165 irq_gc_unlock(gc);
166}
167
168
169
170
171
172
173
174
175
176
177int irq_gc_set_wake(struct irq_data *d, unsigned int on)
178{
179 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
180 u32 mask = d->mask;
181
182 if (!(mask & gc->wake_enabled))
183 return -EINVAL;
184
185 irq_gc_lock(gc);
186 if (on)
187 gc->wake_active |= mask;
188 else
189 gc->wake_active &= ~mask;
190 irq_gc_unlock(gc);
191 return 0;
192}
193
194static u32 irq_readl_be(void __iomem *addr)
195{
196 return ioread32be(addr);
197}
198
199static void irq_writel_be(u32 val, void __iomem *addr)
200{
201 iowrite32be(val, addr);
202}
203
204static void
205irq_init_generic_chip(struct irq_chip_generic *gc, const char *name,
206 int num_ct, unsigned int irq_base,
207 void __iomem *reg_base, irq_flow_handler_t handler)
208{
209 raw_spin_lock_init(&gc->lock);
210 gc->num_ct = num_ct;
211 gc->irq_base = irq_base;
212 gc->reg_base = reg_base;
213 gc->chip_types->chip.name = name;
214 gc->chip_types->handler = handler;
215}
216
217
218
219
220
221
222
223
224
225
226
227
228struct irq_chip_generic *
229irq_alloc_generic_chip(const char *name, int num_ct, unsigned int irq_base,
230 void __iomem *reg_base, irq_flow_handler_t handler)
231{
232 struct irq_chip_generic *gc;
233 unsigned long sz = sizeof(*gc) + num_ct * sizeof(struct irq_chip_type);
234
235 gc = kzalloc(sz, GFP_KERNEL);
236 if (gc) {
237 irq_init_generic_chip(gc, name, num_ct, irq_base, reg_base,
238 handler);
239 }
240 return gc;
241}
242EXPORT_SYMBOL_GPL(irq_alloc_generic_chip);
243
244static void
245irq_gc_init_mask_cache(struct irq_chip_generic *gc, enum irq_gc_flags flags)
246{
247 struct irq_chip_type *ct = gc->chip_types;
248 u32 *mskptr = &gc->mask_cache, mskreg = ct->regs.mask;
249 int i;
250
251 for (i = 0; i < gc->num_ct; i++) {
252 if (flags & IRQ_GC_MASK_CACHE_PER_TYPE) {
253 mskptr = &ct[i].mask_cache_priv;
254 mskreg = ct[i].regs.mask;
255 }
256 ct[i].mask_cache = mskptr;
257 if (flags & IRQ_GC_INIT_MASK_CACHE)
258 *mskptr = irq_reg_readl(gc, mskreg);
259 }
260}
261
262
263
264
265
266
267
268
269
270
271
272
273int irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
274 int num_ct, const char *name,
275 irq_flow_handler_t handler,
276 unsigned int clr, unsigned int set,
277 enum irq_gc_flags gcflags)
278{
279 struct irq_domain_chip_generic *dgc;
280 struct irq_chip_generic *gc;
281 int numchips, sz, i;
282 unsigned long flags;
283 void *tmp;
284
285 if (d->gc)
286 return -EBUSY;
287
288 numchips = DIV_ROUND_UP(d->revmap_size, irqs_per_chip);
289 if (!numchips)
290 return -EINVAL;
291
292
293 sz = sizeof(*dgc) + numchips * sizeof(gc);
294 sz += numchips * (sizeof(*gc) + num_ct * sizeof(struct irq_chip_type));
295
296 tmp = dgc = kzalloc(sz, GFP_KERNEL);
297 if (!dgc)
298 return -ENOMEM;
299 dgc->irqs_per_chip = irqs_per_chip;
300 dgc->num_chips = numchips;
301 dgc->irq_flags_to_set = set;
302 dgc->irq_flags_to_clear = clr;
303 dgc->gc_flags = gcflags;
304 d->gc = dgc;
305
306
307 tmp += sizeof(*dgc) + numchips * sizeof(gc);
308 for (i = 0; i < numchips; i++) {
309
310 dgc->gc[i] = gc = tmp;
311 irq_init_generic_chip(gc, name, num_ct, i * irqs_per_chip,
312 NULL, handler);
313
314 gc->domain = d;
315 if (gcflags & IRQ_GC_BE_IO) {
316 gc->reg_readl = &irq_readl_be;
317 gc->reg_writel = &irq_writel_be;
318 }
319
320 raw_spin_lock_irqsave(&gc_lock, flags);
321 list_add_tail(&gc->list, &gc_list);
322 raw_spin_unlock_irqrestore(&gc_lock, flags);
323
324 tmp += sizeof(*gc) + num_ct * sizeof(struct irq_chip_type);
325 }
326 d->name = name;
327 return 0;
328}
329EXPORT_SYMBOL_GPL(irq_alloc_domain_generic_chips);
330
331
332
333
334
335
336struct irq_chip_generic *
337irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq)
338{
339 struct irq_domain_chip_generic *dgc = d->gc;
340 int idx;
341
342 if (!dgc)
343 return NULL;
344 idx = hw_irq / dgc->irqs_per_chip;
345 if (idx >= dgc->num_chips)
346 return NULL;
347 return dgc->gc[idx];
348}
349EXPORT_SYMBOL_GPL(irq_get_domain_generic_chip);
350
351
352
353
354
355static struct lock_class_key irq_nested_lock_class;
356
357
358
359
360int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
361 irq_hw_number_t hw_irq)
362{
363 struct irq_data *data = irq_domain_get_irq_data(d, virq);
364 struct irq_domain_chip_generic *dgc = d->gc;
365 struct irq_chip_generic *gc;
366 struct irq_chip_type *ct;
367 struct irq_chip *chip;
368 unsigned long flags;
369 int idx;
370
371 if (!d->gc)
372 return -ENODEV;
373
374 idx = hw_irq / dgc->irqs_per_chip;
375 if (idx >= dgc->num_chips)
376 return -EINVAL;
377 gc = dgc->gc[idx];
378
379 idx = hw_irq % dgc->irqs_per_chip;
380
381 if (test_bit(idx, &gc->unused))
382 return -ENOTSUPP;
383
384 if (test_bit(idx, &gc->installed))
385 return -EBUSY;
386
387 ct = gc->chip_types;
388 chip = &ct->chip;
389
390
391 if (!gc->installed) {
392 raw_spin_lock_irqsave(&gc->lock, flags);
393 irq_gc_init_mask_cache(gc, dgc->gc_flags);
394 raw_spin_unlock_irqrestore(&gc->lock, flags);
395 }
396
397
398 set_bit(idx, &gc->installed);
399
400 if (dgc->gc_flags & IRQ_GC_INIT_NESTED_LOCK)
401 irq_set_lockdep_class(virq, &irq_nested_lock_class);
402
403 if (chip->irq_calc_mask)
404 chip->irq_calc_mask(data);
405 else
406 data->mask = 1 << idx;
407
408 irq_domain_set_info(d, virq, hw_irq, chip, gc, ct->handler, NULL, NULL);
409 irq_modify_status(virq, dgc->irq_flags_to_clear, dgc->irq_flags_to_set);
410 return 0;
411}
412EXPORT_SYMBOL_GPL(irq_map_generic_chip);
413
414struct irq_domain_ops irq_generic_chip_ops = {
415 .map = irq_map_generic_chip,
416 .xlate = irq_domain_xlate_onetwocell,
417};
418EXPORT_SYMBOL_GPL(irq_generic_chip_ops);
419
420
421
422
423
424
425
426
427
428
429
430
431
432void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
433 enum irq_gc_flags flags, unsigned int clr,
434 unsigned int set)
435{
436 struct irq_chip_type *ct = gc->chip_types;
437 struct irq_chip *chip = &ct->chip;
438 unsigned int i;
439
440 raw_spin_lock(&gc_lock);
441 list_add_tail(&gc->list, &gc_list);
442 raw_spin_unlock(&gc_lock);
443
444 irq_gc_init_mask_cache(gc, flags);
445
446 for (i = gc->irq_base; msk; msk >>= 1, i++) {
447 if (!(msk & 0x01))
448 continue;
449
450 if (flags & IRQ_GC_INIT_NESTED_LOCK)
451 irq_set_lockdep_class(i, &irq_nested_lock_class);
452
453 if (!(flags & IRQ_GC_NO_MASK)) {
454 struct irq_data *d = irq_get_irq_data(i);
455
456 if (chip->irq_calc_mask)
457 chip->irq_calc_mask(d);
458 else
459 d->mask = 1 << (i - gc->irq_base);
460 }
461 irq_set_chip_and_handler(i, chip, ct->handler);
462 irq_set_chip_data(i, gc);
463 irq_modify_status(i, clr, set);
464 }
465 gc->irq_cnt = i - gc->irq_base;
466}
467EXPORT_SYMBOL_GPL(irq_setup_generic_chip);
468
469
470
471
472
473
474
475
476int irq_setup_alt_chip(struct irq_data *d, unsigned int type)
477{
478 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
479 struct irq_chip_type *ct = gc->chip_types;
480 unsigned int i;
481
482 for (i = 0; i < gc->num_ct; i++, ct++) {
483 if (ct->type & type) {
484 d->chip = &ct->chip;
485 irq_data_to_desc(d)->handle_irq = ct->handler;
486 return 0;
487 }
488 }
489 return -EINVAL;
490}
491EXPORT_SYMBOL_GPL(irq_setup_alt_chip);
492
493
494
495
496
497
498
499
500
501
502void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
503 unsigned int clr, unsigned int set)
504{
505 unsigned int i = gc->irq_base;
506
507 raw_spin_lock(&gc_lock);
508 list_del(&gc->list);
509 raw_spin_unlock(&gc_lock);
510
511 for (; msk; msk >>= 1, i++) {
512 if (!(msk & 0x01))
513 continue;
514
515
516 irq_set_handler(i, NULL);
517 irq_set_chip(i, &no_irq_chip);
518 irq_set_chip_data(i, NULL);
519 irq_modify_status(i, clr, set);
520 }
521}
522EXPORT_SYMBOL_GPL(irq_remove_generic_chip);
523
524static struct irq_data *irq_gc_get_irq_data(struct irq_chip_generic *gc)
525{
526 unsigned int virq;
527
528 if (!gc->domain)
529 return irq_get_irq_data(gc->irq_base);
530
531
532
533
534
535 if (!gc->installed)
536 return NULL;
537
538 virq = irq_find_mapping(gc->domain, gc->irq_base + __ffs(gc->installed));
539 return virq ? irq_get_irq_data(virq) : NULL;
540}
541
542#ifdef CONFIG_PM
543static int irq_gc_suspend(void)
544{
545 struct irq_chip_generic *gc;
546
547 list_for_each_entry(gc, &gc_list, list) {
548 struct irq_chip_type *ct = gc->chip_types;
549
550 if (ct->chip.irq_suspend) {
551 struct irq_data *data = irq_gc_get_irq_data(gc);
552
553 if (data)
554 ct->chip.irq_suspend(data);
555 }
556
557 if (gc->suspend)
558 gc->suspend(gc);
559 }
560 return 0;
561}
562
563static void irq_gc_resume(void)
564{
565 struct irq_chip_generic *gc;
566
567 list_for_each_entry(gc, &gc_list, list) {
568 struct irq_chip_type *ct = gc->chip_types;
569
570 if (gc->resume)
571 gc->resume(gc);
572
573 if (ct->chip.irq_resume) {
574 struct irq_data *data = irq_gc_get_irq_data(gc);
575
576 if (data)
577 ct->chip.irq_resume(data);
578 }
579 }
580}
581#else
582#define irq_gc_suspend NULL
583#define irq_gc_resume NULL
584#endif
585
586static void irq_gc_shutdown(void)
587{
588 struct irq_chip_generic *gc;
589
590 list_for_each_entry(gc, &gc_list, list) {
591 struct irq_chip_type *ct = gc->chip_types;
592
593 if (ct->chip.irq_pm_shutdown) {
594 struct irq_data *data = irq_gc_get_irq_data(gc);
595
596 if (data)
597 ct->chip.irq_pm_shutdown(data);
598 }
599 }
600}
601
602static struct syscore_ops irq_gc_syscore_ops = {
603 .suspend = irq_gc_suspend,
604 .resume = irq_gc_resume,
605 .shutdown = irq_gc_shutdown,
606};
607
608static int __init irq_gc_init_ops(void)
609{
610 register_syscore_ops(&irq_gc_syscore_ops);
611 return 0;
612}
613device_initcall(irq_gc_init_ops);
614