1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/clk.h>
14#include <linux/delay.h>
15#include <linux/io.h>
16#include <linux/irq.h>
17#include <linux/irqchip.h>
18#include <linux/of.h>
19#include <linux/of_address.h>
20#include <linux/of_irq.h>
21#include <linux/platform_device.h>
22#include <linux/pm_domain.h>
23#include <linux/regulator/consumer.h>
24#include <linux/irqchip/arm-gic.h>
25#include "common.h"
26#include "hardware.h"
27
28#define GPC_CNTR 0x000
29#define GPC_IMR1 0x008
30#define GPC_PGC_GPU_PDN 0x260
31#define GPC_PGC_GPU_PUPSCR 0x264
32#define GPC_PGC_GPU_PDNSCR 0x268
33#define GPC_PGC_CPU_PDN 0x2a0
34#define GPC_PGC_CPU_PUPSCR 0x2a4
35#define GPC_PGC_CPU_PDNSCR 0x2a8
36#define GPC_PGC_SW2ISO_SHIFT 0x8
37#define GPC_PGC_SW_SHIFT 0x0
38
39#define IMR_NUM 4
40#define GPC_MAX_IRQS (IMR_NUM * 32)
41
42#define GPU_VPU_PUP_REQ BIT(1)
43#define GPU_VPU_PDN_REQ BIT(0)
44
45#define GPC_CLK_MAX 6
46
47struct pu_domain {
48 struct generic_pm_domain base;
49 struct regulator *reg;
50 struct clk *clk[GPC_CLK_MAX];
51 int num_clks;
52};
53
54static void __iomem *gpc_base;
55static u32 gpc_wake_irqs[IMR_NUM];
56static u32 gpc_saved_imrs[IMR_NUM];
57
58void imx_gpc_set_arm_power_up_timing(u32 sw2iso, u32 sw)
59{
60 writel_relaxed((sw2iso << GPC_PGC_SW2ISO_SHIFT) |
61 (sw << GPC_PGC_SW_SHIFT), gpc_base + GPC_PGC_CPU_PUPSCR);
62}
63
64void imx_gpc_set_arm_power_down_timing(u32 sw2iso, u32 sw)
65{
66 writel_relaxed((sw2iso << GPC_PGC_SW2ISO_SHIFT) |
67 (sw << GPC_PGC_SW_SHIFT), gpc_base + GPC_PGC_CPU_PDNSCR);
68}
69
70void imx_gpc_set_arm_power_in_lpm(bool power_off)
71{
72 writel_relaxed(power_off, gpc_base + GPC_PGC_CPU_PDN);
73}
74
75void imx_gpc_pre_suspend(bool arm_power_off)
76{
77 void __iomem *reg_imr1 = gpc_base + GPC_IMR1;
78 int i;
79
80
81 if (arm_power_off)
82 imx_gpc_set_arm_power_in_lpm(arm_power_off);
83
84 for (i = 0; i < IMR_NUM; i++) {
85 gpc_saved_imrs[i] = readl_relaxed(reg_imr1 + i * 4);
86 writel_relaxed(~gpc_wake_irqs[i], reg_imr1 + i * 4);
87 }
88}
89
90void imx_gpc_post_resume(void)
91{
92 void __iomem *reg_imr1 = gpc_base + GPC_IMR1;
93 int i;
94
95
96 imx_gpc_set_arm_power_in_lpm(false);
97
98 for (i = 0; i < IMR_NUM; i++)
99 writel_relaxed(gpc_saved_imrs[i], reg_imr1 + i * 4);
100}
101
102static int imx_gpc_irq_set_wake(struct irq_data *d, unsigned int on)
103{
104 unsigned int idx = d->hwirq / 32;
105 u32 mask;
106
107 mask = 1 << d->hwirq % 32;
108 gpc_wake_irqs[idx] = on ? gpc_wake_irqs[idx] | mask :
109 gpc_wake_irqs[idx] & ~mask;
110
111
112
113
114
115 return 0;
116}
117
118void imx_gpc_mask_all(void)
119{
120 void __iomem *reg_imr1 = gpc_base + GPC_IMR1;
121 int i;
122
123 for (i = 0; i < IMR_NUM; i++) {
124 gpc_saved_imrs[i] = readl_relaxed(reg_imr1 + i * 4);
125 writel_relaxed(~0, reg_imr1 + i * 4);
126 }
127
128}
129
130void imx_gpc_restore_all(void)
131{
132 void __iomem *reg_imr1 = gpc_base + GPC_IMR1;
133 int i;
134
135 for (i = 0; i < IMR_NUM; i++)
136 writel_relaxed(gpc_saved_imrs[i], reg_imr1 + i * 4);
137}
138
139void imx_gpc_hwirq_unmask(unsigned int hwirq)
140{
141 void __iomem *reg;
142 u32 val;
143
144 reg = gpc_base + GPC_IMR1 + hwirq / 32 * 4;
145 val = readl_relaxed(reg);
146 val &= ~(1 << hwirq % 32);
147 writel_relaxed(val, reg);
148}
149
150void imx_gpc_hwirq_mask(unsigned int hwirq)
151{
152 void __iomem *reg;
153 u32 val;
154
155 reg = gpc_base + GPC_IMR1 + hwirq / 32 * 4;
156 val = readl_relaxed(reg);
157 val |= 1 << (hwirq % 32);
158 writel_relaxed(val, reg);
159}
160
161static void imx_gpc_irq_unmask(struct irq_data *d)
162{
163 imx_gpc_hwirq_unmask(d->hwirq);
164 irq_chip_unmask_parent(d);
165}
166
167static void imx_gpc_irq_mask(struct irq_data *d)
168{
169 imx_gpc_hwirq_mask(d->hwirq);
170 irq_chip_mask_parent(d);
171}
172
173static struct irq_chip imx_gpc_chip = {
174 .name = "GPC",
175 .irq_eoi = irq_chip_eoi_parent,
176 .irq_mask = imx_gpc_irq_mask,
177 .irq_unmask = imx_gpc_irq_unmask,
178 .irq_retrigger = irq_chip_retrigger_hierarchy,
179 .irq_set_wake = imx_gpc_irq_set_wake,
180 .irq_set_type = irq_chip_set_type_parent,
181#ifdef CONFIG_SMP
182 .irq_set_affinity = irq_chip_set_affinity_parent,
183#endif
184};
185
186static int imx_gpc_domain_translate(struct irq_domain *d,
187 struct irq_fwspec *fwspec,
188 unsigned long *hwirq,
189 unsigned int *type)
190{
191 if (is_of_node(fwspec->fwnode)) {
192 if (fwspec->param_count != 3)
193 return -EINVAL;
194
195
196 if (fwspec->param[0] != 0)
197 return -EINVAL;
198
199 *hwirq = fwspec->param[1];
200 *type = fwspec->param[2];
201 return 0;
202 }
203
204 return -EINVAL;
205}
206
207static int imx_gpc_domain_alloc(struct irq_domain *domain,
208 unsigned int irq,
209 unsigned int nr_irqs, void *data)
210{
211 struct irq_fwspec *fwspec = data;
212 struct irq_fwspec parent_fwspec;
213 irq_hw_number_t hwirq;
214 int i;
215
216 if (fwspec->param_count != 3)
217 return -EINVAL;
218 if (fwspec->param[0] != 0)
219 return -EINVAL;
220
221 hwirq = fwspec->param[1];
222 if (hwirq >= GPC_MAX_IRQS)
223 return -EINVAL;
224
225 for (i = 0; i < nr_irqs; i++)
226 irq_domain_set_hwirq_and_chip(domain, irq + i, hwirq + i,
227 &imx_gpc_chip, NULL);
228
229 parent_fwspec = *fwspec;
230 parent_fwspec.fwnode = domain->parent->fwnode;
231 return irq_domain_alloc_irqs_parent(domain, irq, nr_irqs,
232 &parent_fwspec);
233}
234
235static const struct irq_domain_ops imx_gpc_domain_ops = {
236 .translate = imx_gpc_domain_translate,
237 .alloc = imx_gpc_domain_alloc,
238 .free = irq_domain_free_irqs_common,
239};
240
241static int __init imx_gpc_init(struct device_node *node,
242 struct device_node *parent)
243{
244 struct irq_domain *parent_domain, *domain;
245 int i;
246
247 if (!parent) {
248 pr_err("%s: no parent, giving up\n", node->full_name);
249 return -ENODEV;
250 }
251
252 parent_domain = irq_find_host(parent);
253 if (!parent_domain) {
254 pr_err("%s: unable to obtain parent domain\n", node->full_name);
255 return -ENXIO;
256 }
257
258 gpc_base = of_iomap(node, 0);
259 if (WARN_ON(!gpc_base))
260 return -ENOMEM;
261
262 domain = irq_domain_add_hierarchy(parent_domain, 0, GPC_MAX_IRQS,
263 node, &imx_gpc_domain_ops,
264 NULL);
265 if (!domain) {
266 iounmap(gpc_base);
267 return -ENOMEM;
268 }
269
270
271 for (i = 0; i < IMR_NUM; i++)
272 writel_relaxed(~0, gpc_base + GPC_IMR1 + i * 4);
273
274
275
276
277
278 of_node_clear_flag(node, OF_POPULATED);
279
280 return 0;
281}
282IRQCHIP_DECLARE(imx_gpc, "fsl,imx6q-gpc", imx_gpc_init);
283
284void __init imx_gpc_check_dt(void)
285{
286 struct device_node *np;
287
288 np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpc");
289 if (WARN_ON(!np))
290 return;
291
292 if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL))) {
293 pr_warn("Outdated DT detected, suspend/resume will NOT work\n");
294
295
296 gpc_base = of_iomap(np, 0);
297 }
298}
299
300static void _imx6q_pm_pu_power_off(struct generic_pm_domain *genpd)
301{
302 int iso, iso2sw;
303 u32 val;
304
305
306 val = readl_relaxed(gpc_base + GPC_PGC_GPU_PDNSCR);
307 iso = val & 0x3f;
308 iso2sw = (val >> 8) & 0x3f;
309
310
311 writel_relaxed(0x1, gpc_base + GPC_PGC_GPU_PDN);
312
313
314 val = readl_relaxed(gpc_base + GPC_CNTR);
315 val |= GPU_VPU_PDN_REQ;
316 writel_relaxed(val, gpc_base + GPC_CNTR);
317
318
319 ndelay((iso + iso2sw) * 1000 / 66);
320}
321
322static int imx6q_pm_pu_power_off(struct generic_pm_domain *genpd)
323{
324 struct pu_domain *pu = container_of(genpd, struct pu_domain, base);
325
326 _imx6q_pm_pu_power_off(genpd);
327
328 if (pu->reg)
329 regulator_disable(pu->reg);
330
331 return 0;
332}
333
334static int imx6q_pm_pu_power_on(struct generic_pm_domain *genpd)
335{
336 struct pu_domain *pu = container_of(genpd, struct pu_domain, base);
337 int i, ret, sw, sw2iso;
338 u32 val;
339
340 if (pu->reg)
341 ret = regulator_enable(pu->reg);
342 if (pu->reg && ret) {
343 pr_err("%s: failed to enable regulator: %d\n", __func__, ret);
344 return ret;
345 }
346
347
348 for (i = 0; i < pu->num_clks; i++)
349 clk_prepare_enable(pu->clk[i]);
350
351
352 writel_relaxed(0x1, gpc_base + GPC_PGC_GPU_PDN);
353
354
355 val = readl_relaxed(gpc_base + GPC_PGC_GPU_PUPSCR);
356 sw = val & 0x3f;
357 sw2iso = (val >> 8) & 0x3f;
358
359
360 val = readl_relaxed(gpc_base + GPC_CNTR);
361 val |= GPU_VPU_PUP_REQ;
362 writel_relaxed(val, gpc_base + GPC_CNTR);
363
364
365 ndelay((sw + sw2iso) * 1000 / 66);
366
367
368 for (i = 0; i < pu->num_clks; i++)
369 clk_disable_unprepare(pu->clk[i]);
370
371 return 0;
372}
373
374static struct generic_pm_domain imx6q_arm_domain = {
375 .name = "ARM",
376};
377
378static struct pu_domain imx6q_pu_domain = {
379 .base = {
380 .name = "PU",
381 .power_off = imx6q_pm_pu_power_off,
382 .power_on = imx6q_pm_pu_power_on,
383 .states = {
384 [0] = {
385 .power_off_latency_ns = 25000,
386 .power_on_latency_ns = 2000000,
387 },
388 },
389 .state_count = 1,
390 },
391};
392
393static struct generic_pm_domain imx6sl_display_domain = {
394 .name = "DISPLAY",
395};
396
397static struct generic_pm_domain *imx_gpc_domains[] = {
398 &imx6q_arm_domain,
399 &imx6q_pu_domain.base,
400 &imx6sl_display_domain,
401};
402
403static struct genpd_onecell_data imx_gpc_onecell_data = {
404 .domains = imx_gpc_domains,
405 .num_domains = ARRAY_SIZE(imx_gpc_domains),
406};
407
408static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg)
409{
410 struct clk *clk;
411 int i;
412
413 imx6q_pu_domain.reg = pu_reg;
414
415 for (i = 0; ; i++) {
416 clk = of_clk_get(dev->of_node, i);
417 if (IS_ERR(clk))
418 break;
419 if (i >= GPC_CLK_MAX) {
420 dev_err(dev, "more than %d clocks\n", GPC_CLK_MAX);
421 goto clk_err;
422 }
423 imx6q_pu_domain.clk[i] = clk;
424 }
425 imx6q_pu_domain.num_clks = i;
426
427
428 imx6q_pm_pu_power_on(&imx6q_pu_domain.base);
429
430 if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS))
431 return 0;
432
433 pm_genpd_init(&imx6q_pu_domain.base, NULL, false);
434 return of_genpd_add_provider_onecell(dev->of_node,
435 &imx_gpc_onecell_data);
436
437clk_err:
438 while (i--)
439 clk_put(imx6q_pu_domain.clk[i]);
440 return -EINVAL;
441}
442
443static int imx_gpc_probe(struct platform_device *pdev)
444{
445 struct regulator *pu_reg;
446 int ret;
447
448
449 if (!of_property_read_bool(pdev->dev.of_node, "#power-domain-cells"))
450 return 0;
451
452 pu_reg = devm_regulator_get_optional(&pdev->dev, "pu");
453 if (PTR_ERR(pu_reg) == -ENODEV)
454 pu_reg = NULL;
455 if (IS_ERR(pu_reg)) {
456 ret = PTR_ERR(pu_reg);
457 dev_err(&pdev->dev, "failed to get pu regulator: %d\n", ret);
458 return ret;
459 }
460
461 return imx_gpc_genpd_init(&pdev->dev, pu_reg);
462}
463
464static const struct of_device_id imx_gpc_dt_ids[] = {
465 { .compatible = "fsl,imx6q-gpc" },
466 { .compatible = "fsl,imx6sl-gpc" },
467 { }
468};
469
470static struct platform_driver imx_gpc_driver = {
471 .driver = {
472 .name = "imx-gpc",
473 .of_match_table = imx_gpc_dt_ids,
474 },
475 .probe = imx_gpc_probe,
476};
477
478static int __init imx_pgc_init(void)
479{
480 return platform_driver_register(&imx_gpc_driver);
481}
482subsys_initcall(imx_pgc_init);
483