1
2
3
4
5
6
7
8
9
10
11#include <linux/interrupt.h>
12#include <linux/irqdomain.h>
13#include <linux/irq.h>
14#include <linux/of.h>
15
16#include <asm/mxregs.h>
17
18#include "irqchip.h"
19
20#define HW_IRQ_IPI_COUNT 2
21#define HW_IRQ_MX_BASE 2
22#define HW_IRQ_EXTERN_BASE 3
23
24static DEFINE_PER_CPU(unsigned int, cached_irq_mask);
25
26static int xtensa_mx_irq_map(struct irq_domain *d, unsigned int irq,
27 irq_hw_number_t hw)
28{
29 if (hw < HW_IRQ_IPI_COUNT) {
30 struct irq_chip *irq_chip = d->host_data;
31 irq_set_chip_and_handler_name(irq, irq_chip,
32 handle_percpu_irq, "ipi");
33 irq_set_status_flags(irq, IRQ_LEVEL);
34 return 0;
35 }
36 return xtensa_irq_map(d, irq, hw);
37}
38
39
40
41
42
43
44
45static int xtensa_mx_irq_domain_xlate(struct irq_domain *d,
46 struct device_node *ctrlr,
47 const u32 *intspec, unsigned int intsize,
48 unsigned long *out_hwirq, unsigned int *out_type)
49{
50 return xtensa_irq_domain_xlate(intspec, intsize,
51 intspec[0], intspec[0] + HW_IRQ_EXTERN_BASE,
52 out_hwirq, out_type);
53}
54
55static const struct irq_domain_ops xtensa_mx_irq_domain_ops = {
56 .xlate = xtensa_mx_irq_domain_xlate,
57 .map = xtensa_mx_irq_map,
58};
59
60void secondary_init_irq(void)
61{
62 __this_cpu_write(cached_irq_mask,
63 XCHAL_INTTYPE_MASK_EXTERN_EDGE |
64 XCHAL_INTTYPE_MASK_EXTERN_LEVEL);
65 set_sr(XCHAL_INTTYPE_MASK_EXTERN_EDGE |
66 XCHAL_INTTYPE_MASK_EXTERN_LEVEL, intenable);
67}
68
69static void xtensa_mx_irq_mask(struct irq_data *d)
70{
71 unsigned int mask = 1u << d->hwirq;
72
73 if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
74 XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
75 set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) -
76 HW_IRQ_MX_BASE), MIENG);
77 } else {
78 mask = __this_cpu_read(cached_irq_mask) & ~mask;
79 __this_cpu_write(cached_irq_mask, mask);
80 set_sr(mask, intenable);
81 }
82}
83
84static void xtensa_mx_irq_unmask(struct irq_data *d)
85{
86 unsigned int mask = 1u << d->hwirq;
87
88 if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
89 XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
90 set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) -
91 HW_IRQ_MX_BASE), MIENGSET);
92 } else {
93 mask |= __this_cpu_read(cached_irq_mask);
94 __this_cpu_write(cached_irq_mask, mask);
95 set_sr(mask, intenable);
96 }
97}
98
99static void xtensa_mx_irq_enable(struct irq_data *d)
100{
101 variant_irq_enable(d->hwirq);
102 xtensa_mx_irq_unmask(d);
103}
104
105static void xtensa_mx_irq_disable(struct irq_data *d)
106{
107 xtensa_mx_irq_mask(d);
108 variant_irq_disable(d->hwirq);
109}
110
111static void xtensa_mx_irq_ack(struct irq_data *d)
112{
113 set_sr(1 << d->hwirq, intclear);
114}
115
116static int xtensa_mx_irq_retrigger(struct irq_data *d)
117{
118 set_sr(1 << d->hwirq, intset);
119 return 1;
120}
121
122static int xtensa_mx_irq_set_affinity(struct irq_data *d,
123 const struct cpumask *dest, bool force)
124{
125 unsigned mask = 1u << cpumask_any_and(dest, cpu_online_mask);
126
127 set_er(mask, MIROUT(d->hwirq - HW_IRQ_MX_BASE));
128 return 0;
129
130}
131
132static struct irq_chip xtensa_mx_irq_chip = {
133 .name = "xtensa-mx",
134 .irq_enable = xtensa_mx_irq_enable,
135 .irq_disable = xtensa_mx_irq_disable,
136 .irq_mask = xtensa_mx_irq_mask,
137 .irq_unmask = xtensa_mx_irq_unmask,
138 .irq_ack = xtensa_mx_irq_ack,
139 .irq_retrigger = xtensa_mx_irq_retrigger,
140 .irq_set_affinity = xtensa_mx_irq_set_affinity,
141};
142
143int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent)
144{
145 struct irq_domain *root_domain =
146 irq_domain_add_legacy(NULL, NR_IRQS, 0, 0,
147 &xtensa_mx_irq_domain_ops,
148 &xtensa_mx_irq_chip);
149 irq_set_default_host(root_domain);
150 secondary_init_irq();
151 return 0;
152}
153
154static int __init xtensa_mx_init(struct device_node *np,
155 struct device_node *interrupt_parent)
156{
157 struct irq_domain *root_domain =
158 irq_domain_add_linear(np, NR_IRQS, &xtensa_mx_irq_domain_ops,
159 &xtensa_mx_irq_chip);
160 irq_set_default_host(root_domain);
161 secondary_init_irq();
162 return 0;
163}
164IRQCHIP_DECLARE(xtensa_mx_irq_chip, "cdns,xtensa-mx", xtensa_mx_init);
165