1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/interrupt.h>
14#include <linux/irqchip/metag-ext.h>
15#include <linux/irqdomain.h>
16#include <linux/io.h>
17#include <linux/of.h>
18#include <linux/slab.h>
19#include <linux/syscore_ops.h>
20
21#include <asm/irq.h>
22#include <asm/hwthread.h>
23
24#define HWSTAT_STRIDE 8
25#define HWVEC_BLK_STRIDE 0x1000
26
27
28
29
30
31
32
33
34struct meta_intc_priv {
35 unsigned int nr_banks;
36 struct irq_domain *domain;
37
38 unsigned long unmasked[4];
39
40#ifdef CONFIG_METAG_SUSPEND_MEM
41 unsigned long levels_altered[4];
42#endif
43};
44
45
46static struct meta_intc_priv meta_intc_priv;
47
48
49
50
51
52
53
54static unsigned int meta_intc_offset(irq_hw_number_t hw)
55{
56 return hw & 0x1f;
57}
58
59
60
61
62
63
64
65static unsigned int meta_intc_bank(irq_hw_number_t hw)
66{
67 return hw >> 5;
68}
69
70
71
72
73
74
75
76
77static void __iomem *meta_intc_stat_addr(irq_hw_number_t hw)
78{
79 return (void __iomem *)(HWSTATEXT +
80 HWSTAT_STRIDE * meta_intc_bank(hw));
81}
82
83
84
85
86
87
88
89
90static void __iomem *meta_intc_level_addr(irq_hw_number_t hw)
91{
92 return (void __iomem *)(HWLEVELEXT +
93 HWSTAT_STRIDE * meta_intc_bank(hw));
94}
95
96
97
98
99
100
101
102
103static void __iomem *meta_intc_mask_addr(irq_hw_number_t hw)
104{
105 return (void __iomem *)(HWMASKEXT +
106 HWSTAT_STRIDE * meta_intc_bank(hw));
107}
108
109
110
111
112
113
114
115
116static inline void __iomem *meta_intc_vec_addr(irq_hw_number_t hw)
117{
118 return (void __iomem *)(HWVEC0EXT +
119 HWVEC_BLK_STRIDE * meta_intc_bank(hw) +
120 HWVECnEXT_STRIDE * meta_intc_offset(hw));
121}
122
123
124
125
126
127
128
129
130static unsigned int meta_intc_startup_irq(struct irq_data *data)
131{
132 irq_hw_number_t hw = data->hwirq;
133 void __iomem *vec_addr = meta_intc_vec_addr(hw);
134 int thread = hard_processor_id();
135
136
137 if (data->chip->irq_ack)
138 data->chip->irq_ack(data);
139
140
141 metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr);
142
143
144 data->chip->irq_unmask(data);
145
146 return 0;
147}
148
149
150
151
152
153
154
155static void meta_intc_shutdown_irq(struct irq_data *data)
156{
157 irq_hw_number_t hw = data->hwirq;
158 void __iomem *vec_addr = meta_intc_vec_addr(hw);
159
160
161 data->chip->irq_mask(data);
162
163
164
165
166
167 metag_out32(0, vec_addr);
168}
169
170
171
172
173
174
175
176static void meta_intc_ack_irq(struct irq_data *data)
177{
178 irq_hw_number_t hw = data->hwirq;
179 unsigned int bit = 1 << meta_intc_offset(hw);
180 void __iomem *stat_addr = meta_intc_stat_addr(hw);
181
182
183
184
185 if (metag_in32(stat_addr) & bit)
186 metag_out32(bit, stat_addr);
187}
188
189
190
191
192
193
194
195
196
197static void record_irq_is_masked(struct irq_data *data)
198{
199 struct meta_intc_priv *priv = &meta_intc_priv;
200 irq_hw_number_t hw = data->hwirq;
201
202 clear_bit(meta_intc_offset(hw), &priv->unmasked[meta_intc_bank(hw)]);
203}
204
205
206
207
208
209
210
211
212
213static void record_irq_is_unmasked(struct irq_data *data)
214{
215 struct meta_intc_priv *priv = &meta_intc_priv;
216 irq_hw_number_t hw = data->hwirq;
217
218 set_bit(meta_intc_offset(hw), &priv->unmasked[meta_intc_bank(hw)]);
219}
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234void meta_intc_mask_irq_simple(struct irq_data *data)
235{
236 record_irq_is_masked(data);
237}
238
239
240
241
242
243
244
245
246
247
248void meta_intc_unmask_irq_simple(struct irq_data *data)
249{
250 record_irq_is_unmasked(data);
251}
252
253
254
255
256
257
258
259
260
261
262
263
264
265static void meta_intc_mask_irq(struct irq_data *data)
266{
267 irq_hw_number_t hw = data->hwirq;
268 unsigned int bit = 1 << meta_intc_offset(hw);
269 void __iomem *mask_addr = meta_intc_mask_addr(hw);
270 unsigned long flags;
271
272 record_irq_is_masked(data);
273
274
275 __global_lock2(flags);
276 metag_out32(metag_in32(mask_addr) & ~bit, mask_addr);
277 __global_unlock2(flags);
278}
279
280
281
282
283
284
285
286
287
288static void meta_intc_unmask_irq(struct irq_data *data)
289{
290 irq_hw_number_t hw = data->hwirq;
291 unsigned int bit = 1 << meta_intc_offset(hw);
292 void __iomem *mask_addr = meta_intc_mask_addr(hw);
293 unsigned long flags;
294
295 record_irq_is_unmasked(data);
296
297
298 __global_lock2(flags);
299 metag_out32(metag_in32(mask_addr) | bit, mask_addr);
300 __global_unlock2(flags);
301}
302
303
304
305
306
307
308
309
310
311static void meta_intc_mask_irq_nomask(struct irq_data *data)
312{
313 irq_hw_number_t hw = data->hwirq;
314 void __iomem *vec_addr = meta_intc_vec_addr(hw);
315
316 record_irq_is_masked(data);
317
318
319 metag_out32(0, vec_addr);
320}
321
322
323
324
325
326
327
328
329
330
331
332static void meta_intc_unmask_edge_irq_nomask(struct irq_data *data)
333{
334 irq_hw_number_t hw = data->hwirq;
335 unsigned int bit = 1 << meta_intc_offset(hw);
336 void __iomem *stat_addr = meta_intc_stat_addr(hw);
337 void __iomem *vec_addr = meta_intc_vec_addr(hw);
338 unsigned int thread = hard_processor_id();
339
340 record_irq_is_unmasked(data);
341
342
343 metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr);
344
345
346
347
348
349
350
351
352
353
354
355
356 if (metag_in32(stat_addr) & bit) {
357 metag_out32(bit, stat_addr);
358 while (!(metag_in32(stat_addr) & bit))
359 metag_out32(bit, stat_addr);
360 }
361}
362
363
364
365
366
367
368
369
370
371
372
373static void meta_intc_unmask_level_irq_nomask(struct irq_data *data)
374{
375 irq_hw_number_t hw = data->hwirq;
376 unsigned int bit = 1 << meta_intc_offset(hw);
377 void __iomem *stat_addr = meta_intc_stat_addr(hw);
378 void __iomem *vec_addr = meta_intc_vec_addr(hw);
379 unsigned int thread = hard_processor_id();
380
381 record_irq_is_unmasked(data);
382
383
384 metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr);
385
386
387
388 if (metag_in32(stat_addr) & bit)
389 metag_out32(bit, stat_addr);
390}
391
392
393
394
395
396
397
398
399
400
401
402static int meta_intc_irq_set_type(struct irq_data *data, unsigned int flow_type)
403{
404#ifdef CONFIG_METAG_SUSPEND_MEM
405 struct meta_intc_priv *priv = &meta_intc_priv;
406#endif
407 irq_hw_number_t hw = data->hwirq;
408 unsigned int bit = 1 << meta_intc_offset(hw);
409 void __iomem *level_addr = meta_intc_level_addr(hw);
410 unsigned long flags;
411 unsigned int level;
412
413
414 if (flow_type & IRQ_TYPE_LEVEL_MASK)
415 irq_set_chip_handler_name_locked(data, &meta_intc_level_chip,
416 handle_level_irq, NULL);
417 else
418 irq_set_chip_handler_name_locked(data, &meta_intc_edge_chip,
419 handle_edge_irq, NULL);
420
421
422 __global_lock2(flags);
423 level = metag_in32(level_addr);
424 if (flow_type & IRQ_TYPE_LEVEL_MASK)
425 level |= bit;
426 else
427 level &= ~bit;
428 metag_out32(level, level_addr);
429#ifdef CONFIG_METAG_SUSPEND_MEM
430 priv->levels_altered[meta_intc_bank(hw)] |= bit;
431#endif
432 __global_unlock2(flags);
433
434 return 0;
435}
436
437
438
439
440
441
442
443
444
445
446
447
448static void meta_intc_irq_demux(struct irq_desc *desc)
449{
450 struct meta_intc_priv *priv = &meta_intc_priv;
451 irq_hw_number_t hw;
452 unsigned int bank, irq_no, status;
453 void __iomem *stat_addr = meta_intc_stat_addr(0);
454
455
456
457
458 for (bank = 0; bank < priv->nr_banks; ++bank) {
459
460recalculate:
461 status = metag_in32(stat_addr) & priv->unmasked[bank];
462
463 for (hw = bank*32; status; status >>= 1, ++hw) {
464 if (status & 0x1) {
465
466
467
468
469 irq_no = irq_linear_revmap(priv->domain, hw);
470
471
472
473
474
475
476
477 generic_handle_irq(irq_no);
478
479
480
481
482
483
484
485 goto recalculate;
486 }
487 }
488 stat_addr += HWSTAT_STRIDE;
489 }
490}
491
492#ifdef CONFIG_SMP
493
494
495
496
497
498
499
500
501
502static int meta_intc_set_affinity(struct irq_data *data,
503 const struct cpumask *cpumask, bool force)
504{
505 irq_hw_number_t hw = data->hwirq;
506 void __iomem *vec_addr = meta_intc_vec_addr(hw);
507 unsigned int cpu, thread;
508
509
510
511
512
513
514
515
516 cpu = cpumask_any_and(cpumask, cpu_online_mask);
517 thread = cpu_2_hwthread_id[cpu];
518
519 metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr);
520
521 return 0;
522}
523#else
524#define meta_intc_set_affinity NULL
525#endif
526
527#ifdef CONFIG_PM_SLEEP
528#define META_INTC_CHIP_FLAGS (IRQCHIP_MASK_ON_SUSPEND \
529 | IRQCHIP_SKIP_SET_WAKE)
530#else
531#define META_INTC_CHIP_FLAGS 0
532#endif
533
534
535
536struct irq_chip meta_intc_edge_chip = {
537 .irq_startup = meta_intc_startup_irq,
538 .irq_shutdown = meta_intc_shutdown_irq,
539 .irq_ack = meta_intc_ack_irq,
540 .irq_mask = meta_intc_mask_irq,
541 .irq_unmask = meta_intc_unmask_irq,
542 .irq_set_type = meta_intc_irq_set_type,
543 .irq_set_affinity = meta_intc_set_affinity,
544 .flags = META_INTC_CHIP_FLAGS,
545};
546
547struct irq_chip meta_intc_level_chip = {
548 .irq_startup = meta_intc_startup_irq,
549 .irq_shutdown = meta_intc_shutdown_irq,
550 .irq_set_type = meta_intc_irq_set_type,
551 .irq_mask = meta_intc_mask_irq,
552 .irq_unmask = meta_intc_unmask_irq,
553 .irq_set_affinity = meta_intc_set_affinity,
554 .flags = META_INTC_CHIP_FLAGS,
555};
556
557
558
559
560
561
562
563
564
565
566
567
568static int meta_intc_map(struct irq_domain *d, unsigned int irq,
569 irq_hw_number_t hw)
570{
571 unsigned int bit = 1 << meta_intc_offset(hw);
572 void __iomem *level_addr = meta_intc_level_addr(hw);
573
574
575 if (metag_in32(level_addr) & bit)
576 irq_set_chip_and_handler(irq, &meta_intc_level_chip,
577 handle_level_irq);
578 else
579 irq_set_chip_and_handler(irq, &meta_intc_edge_chip,
580 handle_edge_irq);
581 return 0;
582}
583
584static const struct irq_domain_ops meta_intc_domain_ops = {
585 .map = meta_intc_map,
586 .xlate = irq_domain_xlate_twocell,
587};
588
589#ifdef CONFIG_METAG_SUSPEND_MEM
590
591
592
593
594
595
596
597
598
599
600struct meta_intc_context {
601 u32 levels[4];
602 u32 masks[4];
603 u8 vectors[4*32];
604
605 u8 txvecint[4][4];
606};
607
608
609static struct meta_intc_context *meta_intc_context;
610
611
612
613
614
615
616
617static int meta_intc_suspend(void)
618{
619 struct meta_intc_priv *priv = &meta_intc_priv;
620 int i, j;
621 irq_hw_number_t hw;
622 unsigned int bank;
623 unsigned long flags;
624 struct meta_intc_context *context;
625 void __iomem *level_addr, *mask_addr, *vec_addr;
626 u32 mask, bit;
627
628 context = kzalloc(sizeof(*context), GFP_ATOMIC);
629 if (!context)
630 return -ENOMEM;
631
632 hw = 0;
633 level_addr = meta_intc_level_addr(0);
634 mask_addr = meta_intc_mask_addr(0);
635 for (bank = 0; bank < priv->nr_banks; ++bank) {
636 vec_addr = meta_intc_vec_addr(hw);
637
638
639 mask = 0;
640 for (bit = 1; bit; bit <<= 1) {
641 i = irq_linear_revmap(priv->domain, hw);
642
643 if (i && (!irqd_irq_disabled(irq_get_irq_data(i)) ||
644 irq_has_action(i))) {
645 mask |= bit;
646
647
648 context->vectors[hw] = metag_in32(vec_addr);
649 }
650
651 ++hw;
652 vec_addr += HWVECnEXT_STRIDE;
653 }
654
655
656 if (priv->levels_altered[bank])
657 context->levels[bank] = metag_in32(level_addr);
658
659 if (mask)
660 context->masks[bank] = metag_in32(mask_addr);
661
662 level_addr += HWSTAT_STRIDE;
663 mask_addr += HWSTAT_STRIDE;
664 }
665
666
667 __global_lock2(flags);
668 for (i = 0; i < 4; ++i)
669 for (j = 0; j < 4; ++j)
670 context->txvecint[i][j] = metag_in32(T0VECINT_BHALT +
671 TnVECINT_STRIDE*i +
672 8*j);
673 __global_unlock2(flags);
674
675 meta_intc_context = context;
676 return 0;
677}
678
679
680
681
682
683
684static void meta_intc_resume(void)
685{
686 struct meta_intc_priv *priv = &meta_intc_priv;
687 int i, j;
688 irq_hw_number_t hw;
689 unsigned int bank;
690 unsigned long flags;
691 struct meta_intc_context *context = meta_intc_context;
692 void __iomem *level_addr, *mask_addr, *vec_addr;
693 u32 mask, bit, tmp;
694
695 meta_intc_context = NULL;
696
697 hw = 0;
698 level_addr = meta_intc_level_addr(0);
699 mask_addr = meta_intc_mask_addr(0);
700 for (bank = 0; bank < priv->nr_banks; ++bank) {
701 vec_addr = meta_intc_vec_addr(hw);
702
703
704 mask = 0;
705 for (bit = 1; bit; bit <<= 1) {
706 i = irq_linear_revmap(priv->domain, hw);
707
708 if (i && (!irqd_irq_disabled(irq_get_irq_data(i)) ||
709 irq_has_action(i))) {
710 mask |= bit;
711
712
713 metag_out32(context->vectors[hw], vec_addr);
714 }
715
716 ++hw;
717 vec_addr += HWVECnEXT_STRIDE;
718 }
719
720 if (mask) {
721
722 __global_lock2(flags);
723 tmp = metag_in32(mask_addr);
724 tmp = (tmp & ~mask) | (context->masks[bank] & mask);
725 metag_out32(tmp, mask_addr);
726 __global_unlock2(flags);
727 }
728
729 mask = priv->levels_altered[bank];
730 if (mask) {
731
732 __global_lock2(flags);
733 tmp = metag_in32(level_addr);
734 tmp = (tmp & ~mask) | (context->levels[bank] & mask);
735 metag_out32(tmp, level_addr);
736 __global_unlock2(flags);
737 }
738
739 level_addr += HWSTAT_STRIDE;
740 mask_addr += HWSTAT_STRIDE;
741 }
742
743
744 __global_lock2(flags);
745 for (i = 0; i < 4; ++i) {
746 for (j = 0; j < 4; ++j) {
747 metag_out32(context->txvecint[i][j],
748 T0VECINT_BHALT +
749 TnVECINT_STRIDE*i +
750 8*j);
751 }
752 }
753 __global_unlock2(flags);
754
755 kfree(context);
756}
757
758static struct syscore_ops meta_intc_syscore_ops = {
759 .suspend = meta_intc_suspend,
760 .resume = meta_intc_resume,
761};
762
763static void __init meta_intc_init_syscore_ops(struct meta_intc_priv *priv)
764{
765 register_syscore_ops(&meta_intc_syscore_ops);
766}
767#else
768#define meta_intc_init_syscore_ops(priv) do {} while (0)
769#endif
770
771
772
773
774
775
776
777
778static void __init meta_intc_init_cpu(struct meta_intc_priv *priv, int cpu)
779{
780 unsigned int thread = cpu_2_hwthread_id[cpu];
781 unsigned int signum = TBID_SIGNUM_TR2(thread);
782 int irq = tbisig_map(signum);
783
784
785 irq_set_chained_handler(irq, meta_intc_irq_demux);
786 irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW);
787}
788
789
790
791
792
793
794
795
796
797void __init meta_intc_no_mask(void)
798{
799 meta_intc_edge_chip.irq_mask = meta_intc_mask_irq_nomask;
800 meta_intc_edge_chip.irq_unmask = meta_intc_unmask_edge_irq_nomask;
801 meta_intc_level_chip.irq_mask = meta_intc_mask_irq_nomask;
802 meta_intc_level_chip.irq_unmask = meta_intc_unmask_level_irq_nomask;
803}
804
805
806
807
808
809
810
811int __init init_external_IRQ(void)
812{
813 struct meta_intc_priv *priv = &meta_intc_priv;
814 struct device_node *node;
815 int ret, cpu;
816 u32 val;
817 bool no_masks = false;
818
819 node = of_find_compatible_node(NULL, NULL, "img,meta-intc");
820 if (!node)
821 return -ENOENT;
822
823
824 ret = of_property_read_u32(node, "num-banks", &val);
825 if (ret) {
826 pr_err("meta-intc: No num-banks property found\n");
827 return ret;
828 }
829 if (val < 1 || val > 4) {
830 pr_err("meta-intc: num-banks (%u) out of range\n", val);
831 return -EINVAL;
832 }
833 priv->nr_banks = val;
834
835
836 if (of_get_property(node, "no-mask", NULL))
837 no_masks = true;
838
839
840 if (no_masks)
841 meta_intc_no_mask();
842
843
844
845
846
847
848 priv->domain = irq_domain_add_linear(node, priv->nr_banks*32,
849 &meta_intc_domain_ops, priv);
850 if (unlikely(!priv->domain)) {
851 pr_err("meta-intc: cannot add IRQ domain\n");
852 return -ENOMEM;
853 }
854
855
856 for_each_possible_cpu(cpu)
857 meta_intc_init_cpu(priv, cpu);
858
859
860 meta_intc_init_syscore_ops(priv);
861
862 pr_info("meta-intc: External IRQ controller initialised (%u IRQs)\n",
863 priv->nr_banks*32);
864
865 return 0;
866}
867