1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/interrupt.h>
14#include <linux/irqchip/metag-ext.h>
15#include <linux/irqdomain.h>
16#include <linux/io.h>
17#include <linux/of.h>
18#include <linux/slab.h>
19#include <linux/syscore_ops.h>
20
21#include <asm/irq.h>
22#include <asm/hwthread.h>
23
24#define HWSTAT_STRIDE 8
25#define HWVEC_BLK_STRIDE 0x1000
26
27
28
29
30
31
32
33
34struct meta_intc_priv {
35 unsigned int nr_banks;
36 struct irq_domain *domain;
37
38 unsigned long unmasked[4];
39
40#ifdef CONFIG_METAG_SUSPEND_MEM
41 unsigned long levels_altered[4];
42#endif
43};
44
45
46static struct meta_intc_priv meta_intc_priv;
47
48
49
50
51
52
53
54static unsigned int meta_intc_offset(irq_hw_number_t hw)
55{
56 return hw & 0x1f;
57}
58
59
60
61
62
63
64
65static unsigned int meta_intc_bank(irq_hw_number_t hw)
66{
67 return hw >> 5;
68}
69
70
71
72
73
74
75
76
77static void __iomem *meta_intc_stat_addr(irq_hw_number_t hw)
78{
79 return (void __iomem *)(HWSTATEXT +
80 HWSTAT_STRIDE * meta_intc_bank(hw));
81}
82
83
84
85
86
87
88
89
90static void __iomem *meta_intc_level_addr(irq_hw_number_t hw)
91{
92 return (void __iomem *)(HWLEVELEXT +
93 HWSTAT_STRIDE * meta_intc_bank(hw));
94}
95
96
97
98
99
100
101
102
103static void __iomem *meta_intc_mask_addr(irq_hw_number_t hw)
104{
105 return (void __iomem *)(HWMASKEXT +
106 HWSTAT_STRIDE * meta_intc_bank(hw));
107}
108
109
110
111
112
113
114
115
116static inline void __iomem *meta_intc_vec_addr(irq_hw_number_t hw)
117{
118 return (void __iomem *)(HWVEC0EXT +
119 HWVEC_BLK_STRIDE * meta_intc_bank(hw) +
120 HWVECnEXT_STRIDE * meta_intc_offset(hw));
121}
122
123
124
125
126
127
128
129
130static unsigned int meta_intc_startup_irq(struct irq_data *data)
131{
132 irq_hw_number_t hw = data->hwirq;
133 void __iomem *vec_addr = meta_intc_vec_addr(hw);
134 int thread = hard_processor_id();
135
136
137 if (data->chip->irq_ack)
138 data->chip->irq_ack(data);
139
140
141 metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr);
142
143
144 data->chip->irq_unmask(data);
145
146 return 0;
147}
148
149
150
151
152
153
154
155static void meta_intc_shutdown_irq(struct irq_data *data)
156{
157 irq_hw_number_t hw = data->hwirq;
158 void __iomem *vec_addr = meta_intc_vec_addr(hw);
159
160
161 data->chip->irq_mask(data);
162
163
164
165
166
167 metag_out32(0, vec_addr);
168}
169
170
171
172
173
174
175
176static void meta_intc_ack_irq(struct irq_data *data)
177{
178 irq_hw_number_t hw = data->hwirq;
179 unsigned int bit = 1 << meta_intc_offset(hw);
180 void __iomem *stat_addr = meta_intc_stat_addr(hw);
181
182
183
184
185 if (metag_in32(stat_addr) & bit)
186 metag_out32(bit, stat_addr);
187}
188
189
190
191
192
193
194
195
196
197static void record_irq_is_masked(struct irq_data *data)
198{
199 struct meta_intc_priv *priv = &meta_intc_priv;
200 irq_hw_number_t hw = data->hwirq;
201
202 clear_bit(meta_intc_offset(hw), &priv->unmasked[meta_intc_bank(hw)]);
203}
204
205
206
207
208
209
210
211
212
213static void record_irq_is_unmasked(struct irq_data *data)
214{
215 struct meta_intc_priv *priv = &meta_intc_priv;
216 irq_hw_number_t hw = data->hwirq;
217
218 set_bit(meta_intc_offset(hw), &priv->unmasked[meta_intc_bank(hw)]);
219}
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234void meta_intc_mask_irq_simple(struct irq_data *data)
235{
236 record_irq_is_masked(data);
237}
238
239
240
241
242
243
244
245
246
247
248void meta_intc_unmask_irq_simple(struct irq_data *data)
249{
250 record_irq_is_unmasked(data);
251}
252
253
254
255
256
257
258
259
260
261
262
263
264
265static void meta_intc_mask_irq(struct irq_data *data)
266{
267 irq_hw_number_t hw = data->hwirq;
268 unsigned int bit = 1 << meta_intc_offset(hw);
269 void __iomem *mask_addr = meta_intc_mask_addr(hw);
270 unsigned long flags;
271
272 record_irq_is_masked(data);
273
274
275 __global_lock2(flags);
276 metag_out32(metag_in32(mask_addr) & ~bit, mask_addr);
277 __global_unlock2(flags);
278}
279
280
281
282
283
284
285
286
287
288static void meta_intc_unmask_irq(struct irq_data *data)
289{
290 irq_hw_number_t hw = data->hwirq;
291 unsigned int bit = 1 << meta_intc_offset(hw);
292 void __iomem *mask_addr = meta_intc_mask_addr(hw);
293 unsigned long flags;
294
295 record_irq_is_unmasked(data);
296
297
298 __global_lock2(flags);
299 metag_out32(metag_in32(mask_addr) | bit, mask_addr);
300 __global_unlock2(flags);
301}
302
303
304
305
306
307
308
309
310
311static void meta_intc_mask_irq_nomask(struct irq_data *data)
312{
313 irq_hw_number_t hw = data->hwirq;
314 void __iomem *vec_addr = meta_intc_vec_addr(hw);
315
316 record_irq_is_masked(data);
317
318
319 metag_out32(0, vec_addr);
320}
321
322
323
324
325
326
327
328
329
330
331
332static void meta_intc_unmask_edge_irq_nomask(struct irq_data *data)
333{
334 irq_hw_number_t hw = data->hwirq;
335 unsigned int bit = 1 << meta_intc_offset(hw);
336 void __iomem *stat_addr = meta_intc_stat_addr(hw);
337 void __iomem *vec_addr = meta_intc_vec_addr(hw);
338 unsigned int thread = hard_processor_id();
339
340 record_irq_is_unmasked(data);
341
342
343 metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr);
344
345
346
347
348
349
350
351
352
353
354
355
356 if (metag_in32(stat_addr) & bit) {
357 metag_out32(bit, stat_addr);
358 while (!(metag_in32(stat_addr) & bit))
359 metag_out32(bit, stat_addr);
360 }
361}
362
363
364
365
366
367
368
369
370
371
372
373static void meta_intc_unmask_level_irq_nomask(struct irq_data *data)
374{
375 irq_hw_number_t hw = data->hwirq;
376 unsigned int bit = 1 << meta_intc_offset(hw);
377 void __iomem *stat_addr = meta_intc_stat_addr(hw);
378 void __iomem *vec_addr = meta_intc_vec_addr(hw);
379 unsigned int thread = hard_processor_id();
380
381 record_irq_is_unmasked(data);
382
383
384 metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr);
385
386
387
388 if (metag_in32(stat_addr) & bit)
389 metag_out32(bit, stat_addr);
390}
391
392
393
394
395
396
397
398
399
400
401
402static int meta_intc_irq_set_type(struct irq_data *data, unsigned int flow_type)
403{
404#ifdef CONFIG_METAG_SUSPEND_MEM
405 struct meta_intc_priv *priv = &meta_intc_priv;
406#endif
407 unsigned int irq = data->irq;
408 irq_hw_number_t hw = data->hwirq;
409 unsigned int bit = 1 << meta_intc_offset(hw);
410 void __iomem *level_addr = meta_intc_level_addr(hw);
411 unsigned long flags;
412 unsigned int level;
413
414
415 if (flow_type & IRQ_TYPE_LEVEL_MASK)
416 __irq_set_chip_handler_name_locked(irq, &meta_intc_level_chip,
417 handle_level_irq, NULL);
418 else
419 __irq_set_chip_handler_name_locked(irq, &meta_intc_edge_chip,
420 handle_edge_irq, NULL);
421
422
423 __global_lock2(flags);
424 level = metag_in32(level_addr);
425 if (flow_type & IRQ_TYPE_LEVEL_MASK)
426 level |= bit;
427 else
428 level &= ~bit;
429 metag_out32(level, level_addr);
430#ifdef CONFIG_METAG_SUSPEND_MEM
431 priv->levels_altered[meta_intc_bank(hw)] |= bit;
432#endif
433 __global_unlock2(flags);
434
435 return 0;
436}
437
438
439
440
441
442
443
444
445
446
447
448
449
450static void meta_intc_irq_demux(unsigned int irq, struct irq_desc *desc)
451{
452 struct meta_intc_priv *priv = &meta_intc_priv;
453 irq_hw_number_t hw;
454 unsigned int bank, irq_no, status;
455 void __iomem *stat_addr = meta_intc_stat_addr(0);
456
457
458
459
460 for (bank = 0; bank < priv->nr_banks; ++bank) {
461
462recalculate:
463 status = metag_in32(stat_addr) & priv->unmasked[bank];
464
465 for (hw = bank*32; status; status >>= 1, ++hw) {
466 if (status & 0x1) {
467
468
469
470
471 irq_no = irq_linear_revmap(priv->domain, hw);
472
473
474
475
476
477
478
479 generic_handle_irq(irq_no);
480
481
482
483
484
485
486
487 goto recalculate;
488 }
489 }
490 stat_addr += HWSTAT_STRIDE;
491 }
492}
493
494#ifdef CONFIG_SMP
495
496
497
498
499
500
501
502
503
504static int meta_intc_set_affinity(struct irq_data *data,
505 const struct cpumask *cpumask, bool force)
506{
507 irq_hw_number_t hw = data->hwirq;
508 void __iomem *vec_addr = meta_intc_vec_addr(hw);
509 unsigned int cpu, thread;
510
511
512
513
514
515
516
517
518 cpu = cpumask_any(cpumask);
519 thread = cpu_2_hwthread_id[cpu];
520
521 metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr);
522
523 return 0;
524}
525#else
526#define meta_intc_set_affinity NULL
527#endif
528
529#ifdef CONFIG_PM_SLEEP
530#define META_INTC_CHIP_FLAGS (IRQCHIP_MASK_ON_SUSPEND \
531 | IRQCHIP_SKIP_SET_WAKE)
532#else
533#define META_INTC_CHIP_FLAGS 0
534#endif
535
536
537
538struct irq_chip meta_intc_edge_chip = {
539 .irq_startup = meta_intc_startup_irq,
540 .irq_shutdown = meta_intc_shutdown_irq,
541 .irq_ack = meta_intc_ack_irq,
542 .irq_mask = meta_intc_mask_irq,
543 .irq_unmask = meta_intc_unmask_irq,
544 .irq_set_type = meta_intc_irq_set_type,
545 .irq_set_affinity = meta_intc_set_affinity,
546 .flags = META_INTC_CHIP_FLAGS,
547};
548
549struct irq_chip meta_intc_level_chip = {
550 .irq_startup = meta_intc_startup_irq,
551 .irq_shutdown = meta_intc_shutdown_irq,
552 .irq_set_type = meta_intc_irq_set_type,
553 .irq_mask = meta_intc_mask_irq,
554 .irq_unmask = meta_intc_unmask_irq,
555 .irq_set_affinity = meta_intc_set_affinity,
556 .flags = META_INTC_CHIP_FLAGS,
557};
558
559
560
561
562
563
564
565
566
567
568
569
570static int meta_intc_map(struct irq_domain *d, unsigned int irq,
571 irq_hw_number_t hw)
572{
573 unsigned int bit = 1 << meta_intc_offset(hw);
574 void __iomem *level_addr = meta_intc_level_addr(hw);
575
576
577 if (metag_in32(level_addr) & bit)
578 irq_set_chip_and_handler(irq, &meta_intc_level_chip,
579 handle_level_irq);
580 else
581 irq_set_chip_and_handler(irq, &meta_intc_edge_chip,
582 handle_edge_irq);
583 return 0;
584}
585
586static const struct irq_domain_ops meta_intc_domain_ops = {
587 .map = meta_intc_map,
588 .xlate = irq_domain_xlate_twocell,
589};
590
591#ifdef CONFIG_METAG_SUSPEND_MEM
592
593
594
595
596
597
598
599
600
601
602struct meta_intc_context {
603 u32 levels[4];
604 u32 masks[4];
605 u8 vectors[4*32];
606
607 u8 txvecint[4][4];
608};
609
610
611static struct meta_intc_context *meta_intc_context;
612
613
614
615
616
617
618
619static int meta_intc_suspend(void)
620{
621 struct meta_intc_priv *priv = &meta_intc_priv;
622 int i, j;
623 irq_hw_number_t hw;
624 unsigned int bank;
625 unsigned long flags;
626 struct meta_intc_context *context;
627 void __iomem *level_addr, *mask_addr, *vec_addr;
628 u32 mask, bit;
629
630 context = kzalloc(sizeof(*context), GFP_ATOMIC);
631 if (!context)
632 return -ENOMEM;
633
634 hw = 0;
635 level_addr = meta_intc_level_addr(0);
636 mask_addr = meta_intc_mask_addr(0);
637 for (bank = 0; bank < priv->nr_banks; ++bank) {
638 vec_addr = meta_intc_vec_addr(hw);
639
640
641 mask = 0;
642 for (bit = 1; bit; bit <<= 1) {
643 i = irq_linear_revmap(priv->domain, hw);
644
645 if (i && (!irqd_irq_disabled(irq_get_irq_data(i)) ||
646 irq_has_action(i))) {
647 mask |= bit;
648
649
650 context->vectors[hw] = metag_in32(vec_addr);
651 }
652
653 ++hw;
654 vec_addr += HWVECnEXT_STRIDE;
655 }
656
657
658 if (priv->levels_altered[bank])
659 context->levels[bank] = metag_in32(level_addr);
660
661 if (mask)
662 context->masks[bank] = metag_in32(mask_addr);
663
664 level_addr += HWSTAT_STRIDE;
665 mask_addr += HWSTAT_STRIDE;
666 }
667
668
669 __global_lock2(flags);
670 for (i = 0; i < 4; ++i)
671 for (j = 0; j < 4; ++j)
672 context->txvecint[i][j] = metag_in32(T0VECINT_BHALT +
673 TnVECINT_STRIDE*i +
674 8*j);
675 __global_unlock2(flags);
676
677 meta_intc_context = context;
678 return 0;
679}
680
681
682
683
684
685
686static void meta_intc_resume(void)
687{
688 struct meta_intc_priv *priv = &meta_intc_priv;
689 int i, j;
690 irq_hw_number_t hw;
691 unsigned int bank;
692 unsigned long flags;
693 struct meta_intc_context *context = meta_intc_context;
694 void __iomem *level_addr, *mask_addr, *vec_addr;
695 u32 mask, bit, tmp;
696
697 meta_intc_context = NULL;
698
699 hw = 0;
700 level_addr = meta_intc_level_addr(0);
701 mask_addr = meta_intc_mask_addr(0);
702 for (bank = 0; bank < priv->nr_banks; ++bank) {
703 vec_addr = meta_intc_vec_addr(hw);
704
705
706 mask = 0;
707 for (bit = 1; bit; bit <<= 1) {
708 i = irq_linear_revmap(priv->domain, hw);
709
710 if (i && (!irqd_irq_disabled(irq_get_irq_data(i)) ||
711 irq_has_action(i))) {
712 mask |= bit;
713
714
715 metag_out32(context->vectors[hw], vec_addr);
716 }
717
718 ++hw;
719 vec_addr += HWVECnEXT_STRIDE;
720 }
721
722 if (mask) {
723
724 __global_lock2(flags);
725 tmp = metag_in32(mask_addr);
726 tmp = (tmp & ~mask) | (context->masks[bank] & mask);
727 metag_out32(tmp, mask_addr);
728 __global_unlock2(flags);
729 }
730
731 mask = priv->levels_altered[bank];
732 if (mask) {
733
734 __global_lock2(flags);
735 tmp = metag_in32(level_addr);
736 tmp = (tmp & ~mask) | (context->levels[bank] & mask);
737 metag_out32(tmp, level_addr);
738 __global_unlock2(flags);
739 }
740
741 level_addr += HWSTAT_STRIDE;
742 mask_addr += HWSTAT_STRIDE;
743 }
744
745
746 __global_lock2(flags);
747 for (i = 0; i < 4; ++i) {
748 for (j = 0; j < 4; ++j) {
749 metag_out32(context->txvecint[i][j],
750 T0VECINT_BHALT +
751 TnVECINT_STRIDE*i +
752 8*j);
753 }
754 }
755 __global_unlock2(flags);
756
757 kfree(context);
758}
759
760static struct syscore_ops meta_intc_syscore_ops = {
761 .suspend = meta_intc_suspend,
762 .resume = meta_intc_resume,
763};
764
765static void __init meta_intc_init_syscore_ops(struct meta_intc_priv *priv)
766{
767 register_syscore_ops(&meta_intc_syscore_ops);
768}
769#else
770#define meta_intc_init_syscore_ops(priv) do {} while (0)
771#endif
772
773
774
775
776
777
778
779
780static void __init meta_intc_init_cpu(struct meta_intc_priv *priv, int cpu)
781{
782 unsigned int thread = cpu_2_hwthread_id[cpu];
783 unsigned int signum = TBID_SIGNUM_TR2(thread);
784 int irq = tbisig_map(signum);
785
786
787 irq_set_chained_handler(irq, meta_intc_irq_demux);
788 irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW);
789}
790
791
792
793
794
795
796
797
798
799void __init meta_intc_no_mask(void)
800{
801 meta_intc_edge_chip.irq_mask = meta_intc_mask_irq_nomask;
802 meta_intc_edge_chip.irq_unmask = meta_intc_unmask_edge_irq_nomask;
803 meta_intc_level_chip.irq_mask = meta_intc_mask_irq_nomask;
804 meta_intc_level_chip.irq_unmask = meta_intc_unmask_level_irq_nomask;
805}
806
807
808
809
810
811
812
813int __init init_external_IRQ(void)
814{
815 struct meta_intc_priv *priv = &meta_intc_priv;
816 struct device_node *node;
817 int ret, cpu;
818 u32 val;
819 bool no_masks = false;
820
821 node = of_find_compatible_node(NULL, NULL, "img,meta-intc");
822 if (!node)
823 return -ENOENT;
824
825
826 ret = of_property_read_u32(node, "num-banks", &val);
827 if (ret) {
828 pr_err("meta-intc: No num-banks property found\n");
829 return ret;
830 }
831 if (val < 1 || val > 4) {
832 pr_err("meta-intc: num-banks (%u) out of range\n", val);
833 return -EINVAL;
834 }
835 priv->nr_banks = val;
836
837
838 if (of_get_property(node, "no-mask", NULL))
839 no_masks = true;
840
841
842 if (no_masks)
843 meta_intc_no_mask();
844
845
846
847
848
849
850 priv->domain = irq_domain_add_linear(node, priv->nr_banks*32,
851 &meta_intc_domain_ops, priv);
852 if (unlikely(!priv->domain)) {
853 pr_err("meta-intc: cannot add IRQ domain\n");
854 return -ENOMEM;
855 }
856
857
858 for_each_possible_cpu(cpu)
859 meta_intc_init_cpu(priv, cpu);
860
861
862 meta_intc_init_syscore_ops(priv);
863
864 pr_info("meta-intc: External IRQ controller initialised (%u IRQs)\n",
865 priv->nr_banks*32);
866
867 return 0;
868}
869