1
2
3
4
5
6
7
8
9#include <linux/of_address.h>
10#include <linux/of_irq.h>
11#include <linux/slab.h>
12#include <linux/irqchip.h>
13#include <linux/syscore_ops.h>
14
15#define IMR_NUM 4
16#define GPC_MAX_IRQS (IMR_NUM * 32)
17
18#define GPC_IMR1_CORE0 0x30
19#define GPC_IMR1_CORE1 0x40
20
21struct gpcv2_irqchip_data {
22 struct raw_spinlock rlock;
23 void __iomem *gpc_base;
24 u32 wakeup_sources[IMR_NUM];
25 u32 saved_irq_mask[IMR_NUM];
26 u32 cpu2wakeup;
27};
28
29static struct gpcv2_irqchip_data *imx_gpcv2_instance;
30
31static int gpcv2_wakeup_source_save(void)
32{
33 struct gpcv2_irqchip_data *cd;
34 void __iomem *reg;
35 int i;
36
37 cd = imx_gpcv2_instance;
38 if (!cd)
39 return 0;
40
41 for (i = 0; i < IMR_NUM; i++) {
42 reg = cd->gpc_base + cd->cpu2wakeup + i * 4;
43 cd->saved_irq_mask[i] = readl_relaxed(reg);
44 writel_relaxed(cd->wakeup_sources[i], reg);
45 }
46
47 return 0;
48}
49
50static void gpcv2_wakeup_source_restore(void)
51{
52 struct gpcv2_irqchip_data *cd;
53 void __iomem *reg;
54 int i;
55
56 cd = imx_gpcv2_instance;
57 if (!cd)
58 return;
59
60 for (i = 0; i < IMR_NUM; i++) {
61 reg = cd->gpc_base + cd->cpu2wakeup + i * 4;
62 writel_relaxed(cd->saved_irq_mask[i], reg);
63 }
64}
65
66static struct syscore_ops imx_gpcv2_syscore_ops = {
67 .suspend = gpcv2_wakeup_source_save,
68 .resume = gpcv2_wakeup_source_restore,
69};
70
71static int imx_gpcv2_irq_set_wake(struct irq_data *d, unsigned int on)
72{
73 struct gpcv2_irqchip_data *cd = d->chip_data;
74 unsigned int idx = d->hwirq / 32;
75 unsigned long flags;
76 void __iomem *reg;
77 u32 mask, val;
78
79 raw_spin_lock_irqsave(&cd->rlock, flags);
80 reg = cd->gpc_base + cd->cpu2wakeup + idx * 4;
81 mask = 1 << d->hwirq % 32;
82 val = cd->wakeup_sources[idx];
83
84 cd->wakeup_sources[idx] = on ? (val & ~mask) : (val | mask);
85 raw_spin_unlock_irqrestore(&cd->rlock, flags);
86
87
88
89
90
91
92 return 0;
93}
94
95static void imx_gpcv2_irq_unmask(struct irq_data *d)
96{
97 struct gpcv2_irqchip_data *cd = d->chip_data;
98 void __iomem *reg;
99 u32 val;
100
101 raw_spin_lock(&cd->rlock);
102 reg = cd->gpc_base + cd->cpu2wakeup + d->hwirq / 32 * 4;
103 val = readl_relaxed(reg);
104 val &= ~(1 << d->hwirq % 32);
105 writel_relaxed(val, reg);
106 raw_spin_unlock(&cd->rlock);
107
108 irq_chip_unmask_parent(d);
109}
110
111static void imx_gpcv2_irq_mask(struct irq_data *d)
112{
113 struct gpcv2_irqchip_data *cd = d->chip_data;
114 void __iomem *reg;
115 u32 val;
116
117 raw_spin_lock(&cd->rlock);
118 reg = cd->gpc_base + cd->cpu2wakeup + d->hwirq / 32 * 4;
119 val = readl_relaxed(reg);
120 val |= 1 << (d->hwirq % 32);
121 writel_relaxed(val, reg);
122 raw_spin_unlock(&cd->rlock);
123
124 irq_chip_mask_parent(d);
125}
126
127static struct irq_chip gpcv2_irqchip_data_chip = {
128 .name = "GPCv2",
129 .irq_eoi = irq_chip_eoi_parent,
130 .irq_mask = imx_gpcv2_irq_mask,
131 .irq_unmask = imx_gpcv2_irq_unmask,
132 .irq_set_wake = imx_gpcv2_irq_set_wake,
133 .irq_retrigger = irq_chip_retrigger_hierarchy,
134#ifdef CONFIG_SMP
135 .irq_set_affinity = irq_chip_set_affinity_parent,
136#endif
137};
138
139static int imx_gpcv2_domain_translate(struct irq_domain *d,
140 struct irq_fwspec *fwspec,
141 unsigned long *hwirq,
142 unsigned int *type)
143{
144 if (is_of_node(fwspec->fwnode)) {
145 if (fwspec->param_count != 3)
146 return -EINVAL;
147
148
149 if (fwspec->param[0] != 0)
150 return -EINVAL;
151
152 *hwirq = fwspec->param[1];
153 *type = fwspec->param[2];
154 return 0;
155 }
156
157 return -EINVAL;
158}
159
160static int imx_gpcv2_domain_alloc(struct irq_domain *domain,
161 unsigned int irq, unsigned int nr_irqs,
162 void *data)
163{
164 struct irq_fwspec *fwspec = data;
165 struct irq_fwspec parent_fwspec;
166 irq_hw_number_t hwirq;
167 unsigned int type;
168 int err;
169 int i;
170
171 err = imx_gpcv2_domain_translate(domain, fwspec, &hwirq, &type);
172 if (err)
173 return err;
174
175 if (hwirq >= GPC_MAX_IRQS)
176 return -EINVAL;
177
178 for (i = 0; i < nr_irqs; i++) {
179 irq_domain_set_hwirq_and_chip(domain, irq + i, hwirq + i,
180 &gpcv2_irqchip_data_chip, domain->host_data);
181 }
182
183 parent_fwspec = *fwspec;
184 parent_fwspec.fwnode = domain->parent->fwnode;
185 return irq_domain_alloc_irqs_parent(domain, irq, nr_irqs,
186 &parent_fwspec);
187}
188
189static const struct irq_domain_ops gpcv2_irqchip_data_domain_ops = {
190 .translate = imx_gpcv2_domain_translate,
191 .alloc = imx_gpcv2_domain_alloc,
192 .free = irq_domain_free_irqs_common,
193};
194
195static int __init imx_gpcv2_irqchip_init(struct device_node *node,
196 struct device_node *parent)
197{
198 struct irq_domain *parent_domain, *domain;
199 struct gpcv2_irqchip_data *cd;
200 int i;
201
202 if (!parent) {
203 pr_err("%pOF: no parent, giving up\n", node);
204 return -ENODEV;
205 }
206
207 parent_domain = irq_find_host(parent);
208 if (!parent_domain) {
209 pr_err("%pOF: unable to get parent domain\n", node);
210 return -ENXIO;
211 }
212
213 cd = kzalloc(sizeof(struct gpcv2_irqchip_data), GFP_KERNEL);
214 if (!cd) {
215 pr_err("kzalloc failed!\n");
216 return -ENOMEM;
217 }
218
219 raw_spin_lock_init(&cd->rlock);
220
221 cd->gpc_base = of_iomap(node, 0);
222 if (!cd->gpc_base) {
223 pr_err("fsl-gpcv2: unable to map gpc registers\n");
224 kfree(cd);
225 return -ENOMEM;
226 }
227
228 domain = irq_domain_add_hierarchy(parent_domain, 0, GPC_MAX_IRQS,
229 node, &gpcv2_irqchip_data_domain_ops, cd);
230 if (!domain) {
231 iounmap(cd->gpc_base);
232 kfree(cd);
233 return -ENOMEM;
234 }
235 irq_set_default_host(domain);
236
237
238 for (i = 0; i < IMR_NUM; i++) {
239 writel_relaxed(~0, cd->gpc_base + GPC_IMR1_CORE0 + i * 4);
240 writel_relaxed(~0, cd->gpc_base + GPC_IMR1_CORE1 + i * 4);
241 cd->wakeup_sources[i] = ~0;
242 }
243
244
245 cd->cpu2wakeup = GPC_IMR1_CORE0;
246
247
248
249
250
251
252 writel_relaxed(~0x1, cd->gpc_base + cd->cpu2wakeup);
253
254 imx_gpcv2_instance = cd;
255 register_syscore_ops(&imx_gpcv2_syscore_ops);
256
257
258
259
260
261 of_node_clear_flag(node, OF_POPULATED);
262 return 0;
263}
264
265IRQCHIP_DECLARE(imx_gpcv2, "fsl,imx7d-gpc", imx_gpcv2_irqchip_init);
266