1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/clk.h>
14#include <linux/clk-provider.h>
15#include <linux/clk/renesas.h>
16#include <linux/delay.h>
17#include <linux/device.h>
18#include <linux/init.h>
19#include <linux/io.h>
20#include <linux/mod_devicetable.h>
21#include <linux/module.h>
22#include <linux/of_address.h>
23#include <linux/of_device.h>
24#include <linux/platform_device.h>
25#include <linux/pm_clock.h>
26#include <linux/pm_domain.h>
27#include <linux/psci.h>
28#include <linux/reset-controller.h>
29#include <linux/slab.h>
30
31#include <dt-bindings/clock/renesas-cpg-mssr.h>
32
33#include "renesas-cpg-mssr.h"
34#include "clk-div6.h"
35
36#ifdef DEBUG
37#define WARN_DEBUG(x) WARN_ON(x)
38#else
39#define WARN_DEBUG(x) do { } while (0)
40#endif
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55static const u16 mstpsr[] = {
56 0x030, 0x038, 0x040, 0x048, 0x04C, 0x03C, 0x1C0, 0x1C4,
57 0x9A0, 0x9A4, 0x9A8, 0x9AC,
58};
59
60#define MSTPSR(i) mstpsr[i]
61
62
63
64
65
66
67static const u16 smstpcr[] = {
68 0x130, 0x134, 0x138, 0x13C, 0x140, 0x144, 0x148, 0x14C,
69 0x990, 0x994, 0x998, 0x99C,
70};
71
72#define SMSTPCR(i) smstpcr[i]
73
74
75
76
77
78
79static const u16 stbcr[] = {
80 0xFFFF, 0x010, 0x014, 0x410, 0x414, 0x418, 0x41C, 0x420,
81 0x424, 0x428, 0x42C,
82};
83
84#define STBCR(i) stbcr[i]
85
86
87
88
89
90static const u16 srcr[] = {
91 0x0A0, 0x0A8, 0x0B0, 0x0B8, 0x0BC, 0x0C4, 0x1C8, 0x1CC,
92 0x920, 0x924, 0x928, 0x92C,
93};
94
95#define SRCR(i) srcr[i]
96
97
98
99#define RMSTPCR(i) (smstpcr[i] - 0x20)
100
101
102#define MMSTPCR(i) (smstpcr[i] + 0x20)
103
104
105#define SRSTCLR(i) (0x940 + (i) * 4)
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125struct cpg_mssr_priv {
126#ifdef CONFIG_RESET_CONTROLLER
127 struct reset_controller_dev rcdev;
128#endif
129 struct device *dev;
130 void __iomem *base;
131 spinlock_t rmw_lock;
132 struct device_node *np;
133
134 unsigned int num_core_clks;
135 unsigned int num_mod_clks;
136 unsigned int last_dt_core_clk;
137 bool stbyctrl;
138
139 struct raw_notifier_head notifiers;
140 struct {
141 u32 mask;
142 u32 val;
143 } smstpcr_saved[ARRAY_SIZE(smstpcr)];
144
145 struct clk *clks[];
146};
147
148static struct cpg_mssr_priv *cpg_mssr_priv;
149
150
151
152
153
154
155
156struct mstp_clock {
157 struct clk_hw hw;
158 u32 index;
159 struct cpg_mssr_priv *priv;
160};
161
162#define to_mstp_clock(_hw) container_of(_hw, struct mstp_clock, hw)
163
164static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
165{
166 struct mstp_clock *clock = to_mstp_clock(hw);
167 struct cpg_mssr_priv *priv = clock->priv;
168 unsigned int reg = clock->index / 32;
169 unsigned int bit = clock->index % 32;
170 struct device *dev = priv->dev;
171 u32 bitmask = BIT(bit);
172 unsigned long flags;
173 unsigned int i;
174 u32 value;
175
176 dev_dbg(dev, "MSTP %u%02u/%pC %s\n", reg, bit, hw->clk,
177 enable ? "ON" : "OFF");
178 spin_lock_irqsave(&priv->rmw_lock, flags);
179
180 if (priv->stbyctrl) {
181 value = readb(priv->base + STBCR(reg));
182 if (enable)
183 value &= ~bitmask;
184 else
185 value |= bitmask;
186 writeb(value, priv->base + STBCR(reg));
187
188
189 readb(priv->base + STBCR(reg));
190 barrier_data(priv->base + STBCR(reg));
191 } else {
192 value = readl(priv->base + SMSTPCR(reg));
193 if (enable)
194 value &= ~bitmask;
195 else
196 value |= bitmask;
197 writel(value, priv->base + SMSTPCR(reg));
198 }
199
200 spin_unlock_irqrestore(&priv->rmw_lock, flags);
201
202 if (!enable || priv->stbyctrl)
203 return 0;
204
205 for (i = 1000; i > 0; --i) {
206 if (!(readl(priv->base + MSTPSR(reg)) & bitmask))
207 break;
208 cpu_relax();
209 }
210
211 if (!i) {
212 dev_err(dev, "Failed to enable SMSTP %p[%d]\n",
213 priv->base + SMSTPCR(reg), bit);
214 return -ETIMEDOUT;
215 }
216
217 return 0;
218}
219
220static int cpg_mstp_clock_enable(struct clk_hw *hw)
221{
222 return cpg_mstp_clock_endisable(hw, true);
223}
224
225static void cpg_mstp_clock_disable(struct clk_hw *hw)
226{
227 cpg_mstp_clock_endisable(hw, false);
228}
229
230static int cpg_mstp_clock_is_enabled(struct clk_hw *hw)
231{
232 struct mstp_clock *clock = to_mstp_clock(hw);
233 struct cpg_mssr_priv *priv = clock->priv;
234 u32 value;
235
236 if (priv->stbyctrl)
237 value = readb(priv->base + STBCR(clock->index / 32));
238 else
239 value = readl(priv->base + MSTPSR(clock->index / 32));
240
241 return !(value & BIT(clock->index % 32));
242}
243
244static const struct clk_ops cpg_mstp_clock_ops = {
245 .enable = cpg_mstp_clock_enable,
246 .disable = cpg_mstp_clock_disable,
247 .is_enabled = cpg_mstp_clock_is_enabled,
248};
249
250static
251struct clk *cpg_mssr_clk_src_twocell_get(struct of_phandle_args *clkspec,
252 void *data)
253{
254 unsigned int clkidx = clkspec->args[1];
255 struct cpg_mssr_priv *priv = data;
256 struct device *dev = priv->dev;
257 unsigned int idx;
258 const char *type;
259 struct clk *clk;
260 int range_check;
261
262 switch (clkspec->args[0]) {
263 case CPG_CORE:
264 type = "core";
265 if (clkidx > priv->last_dt_core_clk) {
266 dev_err(dev, "Invalid %s clock index %u\n", type,
267 clkidx);
268 return ERR_PTR(-EINVAL);
269 }
270 clk = priv->clks[clkidx];
271 break;
272
273 case CPG_MOD:
274 type = "module";
275 if (priv->stbyctrl) {
276 idx = MOD_CLK_PACK_10(clkidx);
277 range_check = 7 - (clkidx % 10);
278 } else {
279 idx = MOD_CLK_PACK(clkidx);
280 range_check = 31 - (clkidx % 100);
281 }
282 if (range_check < 0 || idx >= priv->num_mod_clks) {
283 dev_err(dev, "Invalid %s clock index %u\n", type,
284 clkidx);
285 return ERR_PTR(-EINVAL);
286 }
287 clk = priv->clks[priv->num_core_clks + idx];
288 break;
289
290 default:
291 dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]);
292 return ERR_PTR(-EINVAL);
293 }
294
295 if (IS_ERR(clk))
296 dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
297 PTR_ERR(clk));
298 else
299 dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
300 clkspec->args[0], clkspec->args[1], clk,
301 clk_get_rate(clk));
302 return clk;
303}
304
305static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core,
306 const struct cpg_mssr_info *info,
307 struct cpg_mssr_priv *priv)
308{
309 struct clk *clk = ERR_PTR(-ENOTSUPP), *parent;
310 struct device *dev = priv->dev;
311 unsigned int id = core->id, div = core->div;
312 const char *parent_name;
313
314 WARN_DEBUG(id >= priv->num_core_clks);
315 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
316
317 if (!core->name) {
318
319 return;
320 }
321
322 switch (core->type) {
323 case CLK_TYPE_IN:
324 clk = of_clk_get_by_name(priv->np, core->name);
325 break;
326
327 case CLK_TYPE_FF:
328 case CLK_TYPE_DIV6P1:
329 case CLK_TYPE_DIV6_RO:
330 WARN_DEBUG(core->parent >= priv->num_core_clks);
331 parent = priv->clks[core->parent];
332 if (IS_ERR(parent)) {
333 clk = parent;
334 goto fail;
335 }
336
337 parent_name = __clk_get_name(parent);
338
339 if (core->type == CLK_TYPE_DIV6_RO)
340
341 div *= (readl(priv->base + core->offset) & 0x3f) + 1;
342
343 if (core->type == CLK_TYPE_DIV6P1) {
344 clk = cpg_div6_register(core->name, 1, &parent_name,
345 priv->base + core->offset,
346 &priv->notifiers);
347 } else {
348 clk = clk_register_fixed_factor(NULL, core->name,
349 parent_name, 0,
350 core->mult, div);
351 }
352 break;
353
354 case CLK_TYPE_FR:
355 clk = clk_register_fixed_rate(NULL, core->name, NULL, 0,
356 core->mult);
357 break;
358
359 default:
360 if (info->cpg_clk_register)
361 clk = info->cpg_clk_register(dev, core, info,
362 priv->clks, priv->base,
363 &priv->notifiers);
364 else
365 dev_err(dev, "%s has unsupported core clock type %u\n",
366 core->name, core->type);
367 break;
368 }
369
370 if (IS_ERR_OR_NULL(clk))
371 goto fail;
372
373 dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
374 priv->clks[id] = clk;
375 return;
376
377fail:
378 dev_err(dev, "Failed to register %s clock %s: %ld\n", "core",
379 core->name, PTR_ERR(clk));
380}
381
382static void __init cpg_mssr_register_mod_clk(const struct mssr_mod_clk *mod,
383 const struct cpg_mssr_info *info,
384 struct cpg_mssr_priv *priv)
385{
386 struct mstp_clock *clock = NULL;
387 struct device *dev = priv->dev;
388 unsigned int id = mod->id;
389 struct clk_init_data init;
390 struct clk *parent, *clk;
391 const char *parent_name;
392 unsigned int i;
393
394 WARN_DEBUG(id < priv->num_core_clks);
395 WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
396 WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
397 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
398
399 if (!mod->name) {
400
401 return;
402 }
403
404 parent = priv->clks[mod->parent];
405 if (IS_ERR(parent)) {
406 clk = parent;
407 goto fail;
408 }
409
410 clock = kzalloc(sizeof(*clock), GFP_KERNEL);
411 if (!clock) {
412 clk = ERR_PTR(-ENOMEM);
413 goto fail;
414 }
415
416 init.name = mod->name;
417 init.ops = &cpg_mstp_clock_ops;
418 init.flags = CLK_SET_RATE_PARENT;
419 for (i = 0; i < info->num_crit_mod_clks; i++)
420 if (id == info->crit_mod_clks[i]) {
421 dev_dbg(dev, "MSTP %s setting CLK_IS_CRITICAL\n",
422 mod->name);
423 init.flags |= CLK_IS_CRITICAL;
424 break;
425 }
426
427 parent_name = __clk_get_name(parent);
428 init.parent_names = &parent_name;
429 init.num_parents = 1;
430
431 clock->index = id - priv->num_core_clks;
432 clock->priv = priv;
433 clock->hw.init = &init;
434
435 clk = clk_register(NULL, &clock->hw);
436 if (IS_ERR(clk))
437 goto fail;
438
439 dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
440 priv->clks[id] = clk;
441 priv->smstpcr_saved[clock->index / 32].mask |= BIT(clock->index % 32);
442 return;
443
444fail:
445 dev_err(dev, "Failed to register %s clock %s: %ld\n", "module",
446 mod->name, PTR_ERR(clk));
447 kfree(clock);
448}
449
450struct cpg_mssr_clk_domain {
451 struct generic_pm_domain genpd;
452 unsigned int num_core_pm_clks;
453 unsigned int core_pm_clks[];
454};
455
456static struct cpg_mssr_clk_domain *cpg_mssr_clk_domain;
457
458static bool cpg_mssr_is_pm_clk(const struct of_phandle_args *clkspec,
459 struct cpg_mssr_clk_domain *pd)
460{
461 unsigned int i;
462
463 if (clkspec->np != pd->genpd.dev.of_node || clkspec->args_count != 2)
464 return false;
465
466 switch (clkspec->args[0]) {
467 case CPG_CORE:
468 for (i = 0; i < pd->num_core_pm_clks; i++)
469 if (clkspec->args[1] == pd->core_pm_clks[i])
470 return true;
471 return false;
472
473 case CPG_MOD:
474 return true;
475
476 default:
477 return false;
478 }
479}
480
481int cpg_mssr_attach_dev(struct generic_pm_domain *unused, struct device *dev)
482{
483 struct cpg_mssr_clk_domain *pd = cpg_mssr_clk_domain;
484 struct device_node *np = dev->of_node;
485 struct of_phandle_args clkspec;
486 struct clk *clk;
487 int i = 0;
488 int error;
489
490 if (!pd) {
491 dev_dbg(dev, "CPG/MSSR clock domain not yet available\n");
492 return -EPROBE_DEFER;
493 }
494
495 while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
496 &clkspec)) {
497 if (cpg_mssr_is_pm_clk(&clkspec, pd))
498 goto found;
499
500 of_node_put(clkspec.np);
501 i++;
502 }
503
504 return 0;
505
506found:
507 clk = of_clk_get_from_provider(&clkspec);
508 of_node_put(clkspec.np);
509
510 if (IS_ERR(clk))
511 return PTR_ERR(clk);
512
513 error = pm_clk_create(dev);
514 if (error)
515 goto fail_put;
516
517 error = pm_clk_add_clk(dev, clk);
518 if (error)
519 goto fail_destroy;
520
521 return 0;
522
523fail_destroy:
524 pm_clk_destroy(dev);
525fail_put:
526 clk_put(clk);
527 return error;
528}
529
530void cpg_mssr_detach_dev(struct generic_pm_domain *unused, struct device *dev)
531{
532 if (!pm_clk_no_clocks(dev))
533 pm_clk_destroy(dev);
534}
535
536static int __init cpg_mssr_add_clk_domain(struct device *dev,
537 const unsigned int *core_pm_clks,
538 unsigned int num_core_pm_clks)
539{
540 struct device_node *np = dev->of_node;
541 struct generic_pm_domain *genpd;
542 struct cpg_mssr_clk_domain *pd;
543 size_t pm_size = num_core_pm_clks * sizeof(core_pm_clks[0]);
544
545 pd = devm_kzalloc(dev, sizeof(*pd) + pm_size, GFP_KERNEL);
546 if (!pd)
547 return -ENOMEM;
548
549 pd->num_core_pm_clks = num_core_pm_clks;
550 memcpy(pd->core_pm_clks, core_pm_clks, pm_size);
551
552 genpd = &pd->genpd;
553 genpd->name = np->name;
554 genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
555 genpd->attach_dev = cpg_mssr_attach_dev;
556 genpd->detach_dev = cpg_mssr_detach_dev;
557 pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
558 cpg_mssr_clk_domain = pd;
559
560 of_genpd_add_provider_simple(np, genpd);
561 return 0;
562}
563
564#ifdef CONFIG_RESET_CONTROLLER
565
566#define rcdev_to_priv(x) container_of(x, struct cpg_mssr_priv, rcdev)
567
568static int cpg_mssr_reset(struct reset_controller_dev *rcdev,
569 unsigned long id)
570{
571 struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
572 unsigned int reg = id / 32;
573 unsigned int bit = id % 32;
574 u32 bitmask = BIT(bit);
575
576 dev_dbg(priv->dev, "reset %u%02u\n", reg, bit);
577
578
579 writel(bitmask, priv->base + SRCR(reg));
580
581
582 udelay(35);
583
584
585 writel(bitmask, priv->base + SRSTCLR(reg));
586
587 return 0;
588}
589
590static int cpg_mssr_assert(struct reset_controller_dev *rcdev, unsigned long id)
591{
592 struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
593 unsigned int reg = id / 32;
594 unsigned int bit = id % 32;
595 u32 bitmask = BIT(bit);
596
597 dev_dbg(priv->dev, "assert %u%02u\n", reg, bit);
598
599 writel(bitmask, priv->base + SRCR(reg));
600 return 0;
601}
602
603static int cpg_mssr_deassert(struct reset_controller_dev *rcdev,
604 unsigned long id)
605{
606 struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
607 unsigned int reg = id / 32;
608 unsigned int bit = id % 32;
609 u32 bitmask = BIT(bit);
610
611 dev_dbg(priv->dev, "deassert %u%02u\n", reg, bit);
612
613 writel(bitmask, priv->base + SRSTCLR(reg));
614 return 0;
615}
616
617static int cpg_mssr_status(struct reset_controller_dev *rcdev,
618 unsigned long id)
619{
620 struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
621 unsigned int reg = id / 32;
622 unsigned int bit = id % 32;
623 u32 bitmask = BIT(bit);
624
625 return !!(readl(priv->base + SRCR(reg)) & bitmask);
626}
627
628static const struct reset_control_ops cpg_mssr_reset_ops = {
629 .reset = cpg_mssr_reset,
630 .assert = cpg_mssr_assert,
631 .deassert = cpg_mssr_deassert,
632 .status = cpg_mssr_status,
633};
634
635static int cpg_mssr_reset_xlate(struct reset_controller_dev *rcdev,
636 const struct of_phandle_args *reset_spec)
637{
638 struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
639 unsigned int unpacked = reset_spec->args[0];
640 unsigned int idx = MOD_CLK_PACK(unpacked);
641
642 if (unpacked % 100 > 31 || idx >= rcdev->nr_resets) {
643 dev_err(priv->dev, "Invalid reset index %u\n", unpacked);
644 return -EINVAL;
645 }
646
647 return idx;
648}
649
650static int cpg_mssr_reset_controller_register(struct cpg_mssr_priv *priv)
651{
652 priv->rcdev.ops = &cpg_mssr_reset_ops;
653 priv->rcdev.of_node = priv->dev->of_node;
654 priv->rcdev.of_reset_n_cells = 1;
655 priv->rcdev.of_xlate = cpg_mssr_reset_xlate;
656 priv->rcdev.nr_resets = priv->num_mod_clks;
657 return devm_reset_controller_register(priv->dev, &priv->rcdev);
658}
659
660#else
661static inline int cpg_mssr_reset_controller_register(struct cpg_mssr_priv *priv)
662{
663 return 0;
664}
665#endif
666
667
668static const struct of_device_id cpg_mssr_match[] = {
669#ifdef CONFIG_CLK_R7S9210
670 {
671 .compatible = "renesas,r7s9210-cpg-mssr",
672 .data = &r7s9210_cpg_mssr_info,
673 },
674#endif
675#ifdef CONFIG_CLK_R8A7743
676 {
677 .compatible = "renesas,r8a7743-cpg-mssr",
678 .data = &r8a7743_cpg_mssr_info,
679 },
680
681 {
682 .compatible = "renesas,r8a7744-cpg-mssr",
683 .data = &r8a7743_cpg_mssr_info,
684 },
685#endif
686#ifdef CONFIG_CLK_R8A7745
687 {
688 .compatible = "renesas,r8a7745-cpg-mssr",
689 .data = &r8a7745_cpg_mssr_info,
690 },
691#endif
692#ifdef CONFIG_CLK_R8A77470
693 {
694 .compatible = "renesas,r8a77470-cpg-mssr",
695 .data = &r8a77470_cpg_mssr_info,
696 },
697#endif
698#ifdef CONFIG_CLK_R8A774A1
699 {
700 .compatible = "renesas,r8a774a1-cpg-mssr",
701 .data = &r8a774a1_cpg_mssr_info,
702 },
703#endif
704#ifdef CONFIG_CLK_R8A774C0
705 {
706 .compatible = "renesas,r8a774c0-cpg-mssr",
707 .data = &r8a774c0_cpg_mssr_info,
708 },
709#endif
710#ifdef CONFIG_CLK_R8A7790
711 {
712 .compatible = "renesas,r8a7790-cpg-mssr",
713 .data = &r8a7790_cpg_mssr_info,
714 },
715#endif
716#ifdef CONFIG_CLK_R8A7791
717 {
718 .compatible = "renesas,r8a7791-cpg-mssr",
719 .data = &r8a7791_cpg_mssr_info,
720 },
721
722 {
723 .compatible = "renesas,r8a7793-cpg-mssr",
724 .data = &r8a7791_cpg_mssr_info,
725 },
726#endif
727#ifdef CONFIG_CLK_R8A7792
728 {
729 .compatible = "renesas,r8a7792-cpg-mssr",
730 .data = &r8a7792_cpg_mssr_info,
731 },
732#endif
733#ifdef CONFIG_CLK_R8A7794
734 {
735 .compatible = "renesas,r8a7794-cpg-mssr",
736 .data = &r8a7794_cpg_mssr_info,
737 },
738#endif
739#ifdef CONFIG_CLK_R8A7795
740 {
741 .compatible = "renesas,r8a7795-cpg-mssr",
742 .data = &r8a7795_cpg_mssr_info,
743 },
744#endif
745#ifdef CONFIG_CLK_R8A7796
746 {
747 .compatible = "renesas,r8a7796-cpg-mssr",
748 .data = &r8a7796_cpg_mssr_info,
749 },
750#endif
751#ifdef CONFIG_CLK_R8A77965
752 {
753 .compatible = "renesas,r8a77965-cpg-mssr",
754 .data = &r8a77965_cpg_mssr_info,
755 },
756#endif
757#ifdef CONFIG_CLK_R8A77970
758 {
759 .compatible = "renesas,r8a77970-cpg-mssr",
760 .data = &r8a77970_cpg_mssr_info,
761 },
762#endif
763#ifdef CONFIG_CLK_R8A77980
764 {
765 .compatible = "renesas,r8a77980-cpg-mssr",
766 .data = &r8a77980_cpg_mssr_info,
767 },
768#endif
769#ifdef CONFIG_CLK_R8A77990
770 {
771 .compatible = "renesas,r8a77990-cpg-mssr",
772 .data = &r8a77990_cpg_mssr_info,
773 },
774#endif
775#ifdef CONFIG_CLK_R8A77995
776 {
777 .compatible = "renesas,r8a77995-cpg-mssr",
778 .data = &r8a77995_cpg_mssr_info,
779 },
780#endif
781 { }
782};
783
784static void cpg_mssr_del_clk_provider(void *data)
785{
786 of_clk_del_provider(data);
787}
788
789#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_ARM_PSCI_FW)
790static int cpg_mssr_suspend_noirq(struct device *dev)
791{
792 struct cpg_mssr_priv *priv = dev_get_drvdata(dev);
793 unsigned int reg;
794
795
796 if (!psci_ops.cpu_suspend)
797 return 0;
798
799
800 for (reg = 0; reg < ARRAY_SIZE(priv->smstpcr_saved); reg++) {
801 if (priv->smstpcr_saved[reg].mask)
802 priv->smstpcr_saved[reg].val =
803 readl(priv->base + SMSTPCR(reg));
804 }
805
806
807 raw_notifier_call_chain(&priv->notifiers, PM_EVENT_SUSPEND, NULL);
808
809 return 0;
810}
811
812static int cpg_mssr_resume_noirq(struct device *dev)
813{
814 struct cpg_mssr_priv *priv = dev_get_drvdata(dev);
815 unsigned int reg, i;
816 u32 mask, oldval, newval;
817
818
819 if (!psci_ops.cpu_suspend)
820 return 0;
821
822
823 raw_notifier_call_chain(&priv->notifiers, PM_EVENT_RESUME, NULL);
824
825
826 for (reg = 0; reg < ARRAY_SIZE(priv->smstpcr_saved); reg++) {
827 mask = priv->smstpcr_saved[reg].mask;
828 if (!mask)
829 continue;
830
831 if (priv->stbyctrl)
832 oldval = readb(priv->base + STBCR(reg));
833 else
834 oldval = readl(priv->base + SMSTPCR(reg));
835 newval = oldval & ~mask;
836 newval |= priv->smstpcr_saved[reg].val & mask;
837 if (newval == oldval)
838 continue;
839
840 if (priv->stbyctrl) {
841 writeb(newval, priv->base + STBCR(reg));
842
843 readb(priv->base + STBCR(reg));
844 barrier_data(priv->base + STBCR(reg));
845 continue;
846 } else
847 writel(newval, priv->base + SMSTPCR(reg));
848
849
850 mask &= ~priv->smstpcr_saved[reg].val;
851 if (!mask)
852 continue;
853
854 for (i = 1000; i > 0; --i) {
855 oldval = readl(priv->base + MSTPSR(reg));
856 if (!(oldval & mask))
857 break;
858 cpu_relax();
859 }
860
861 if (!i)
862 dev_warn(dev, "Failed to enable SMSTP %p[0x%x]\n",
863 priv->base + SMSTPCR(reg), oldval & mask);
864 }
865
866 return 0;
867}
868
869static const struct dev_pm_ops cpg_mssr_pm = {
870 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cpg_mssr_suspend_noirq,
871 cpg_mssr_resume_noirq)
872};
873#define DEV_PM_OPS &cpg_mssr_pm
874#else
875#define DEV_PM_OPS NULL
876#endif
877
878static int __init cpg_mssr_common_init(struct device *dev,
879 struct device_node *np,
880 const struct cpg_mssr_info *info)
881{
882 struct cpg_mssr_priv *priv;
883 unsigned int nclks, i;
884 int error;
885
886 if (info->init) {
887 error = info->init(dev);
888 if (error)
889 return error;
890 }
891
892 nclks = info->num_total_core_clks + info->num_hw_mod_clks;
893 priv = kzalloc(struct_size(priv, clks, nclks), GFP_KERNEL);
894 if (!priv)
895 return -ENOMEM;
896
897 priv->np = np;
898 priv->dev = dev;
899 spin_lock_init(&priv->rmw_lock);
900
901 priv->base = of_iomap(np, 0);
902 if (!priv->base) {
903 error = -ENOMEM;
904 goto out_err;
905 }
906
907 cpg_mssr_priv = priv;
908 priv->num_core_clks = info->num_total_core_clks;
909 priv->num_mod_clks = info->num_hw_mod_clks;
910 priv->last_dt_core_clk = info->last_dt_core_clk;
911 RAW_INIT_NOTIFIER_HEAD(&priv->notifiers);
912 priv->stbyctrl = info->stbyctrl;
913
914 for (i = 0; i < nclks; i++)
915 priv->clks[i] = ERR_PTR(-ENOENT);
916
917 error = of_clk_add_provider(np, cpg_mssr_clk_src_twocell_get, priv);
918 if (error)
919 goto out_err;
920
921 return 0;
922
923out_err:
924 if (priv->base)
925 iounmap(priv->base);
926 kfree(priv);
927
928 return error;
929}
930
931void __init cpg_mssr_early_init(struct device_node *np,
932 const struct cpg_mssr_info *info)
933{
934 int error;
935 int i;
936
937 error = cpg_mssr_common_init(NULL, np, info);
938 if (error)
939 return;
940
941 for (i = 0; i < info->num_early_core_clks; i++)
942 cpg_mssr_register_core_clk(&info->early_core_clks[i], info,
943 cpg_mssr_priv);
944
945 for (i = 0; i < info->num_early_mod_clks; i++)
946 cpg_mssr_register_mod_clk(&info->early_mod_clks[i], info,
947 cpg_mssr_priv);
948
949}
950
951static int __init cpg_mssr_probe(struct platform_device *pdev)
952{
953 struct device *dev = &pdev->dev;
954 struct device_node *np = dev->of_node;
955 const struct cpg_mssr_info *info;
956 struct cpg_mssr_priv *priv;
957 unsigned int i;
958 int error;
959
960 info = of_device_get_match_data(dev);
961
962 if (!cpg_mssr_priv) {
963 error = cpg_mssr_common_init(dev, dev->of_node, info);
964 if (error)
965 return error;
966 }
967
968 priv = cpg_mssr_priv;
969 priv->dev = dev;
970 dev_set_drvdata(dev, priv);
971
972 for (i = 0; i < info->num_core_clks; i++)
973 cpg_mssr_register_core_clk(&info->core_clks[i], info, priv);
974
975 for (i = 0; i < info->num_mod_clks; i++)
976 cpg_mssr_register_mod_clk(&info->mod_clks[i], info, priv);
977
978 error = devm_add_action_or_reset(dev,
979 cpg_mssr_del_clk_provider,
980 np);
981 if (error)
982 return error;
983
984 error = cpg_mssr_add_clk_domain(dev, info->core_pm_clks,
985 info->num_core_pm_clks);
986 if (error)
987 return error;
988
989
990 if (info->stbyctrl)
991 return 0;
992
993 error = cpg_mssr_reset_controller_register(priv);
994 if (error)
995 return error;
996
997 return 0;
998}
999
1000static struct platform_driver cpg_mssr_driver = {
1001 .driver = {
1002 .name = "renesas-cpg-mssr",
1003 .of_match_table = cpg_mssr_match,
1004 .pm = DEV_PM_OPS,
1005 },
1006};
1007
1008static int __init cpg_mssr_init(void)
1009{
1010 return platform_driver_probe(&cpg_mssr_driver, cpg_mssr_probe);
1011}
1012
1013subsys_initcall(cpg_mssr_init);
1014
1015void __init cpg_core_nullify_range(struct cpg_core_clk *core_clks,
1016 unsigned int num_core_clks,
1017 unsigned int first_clk,
1018 unsigned int last_clk)
1019{
1020 unsigned int i;
1021
1022 for (i = 0; i < num_core_clks; i++)
1023 if (core_clks[i].id >= first_clk &&
1024 core_clks[i].id <= last_clk)
1025 core_clks[i].name = NULL;
1026}
1027
1028void __init mssr_mod_nullify(struct mssr_mod_clk *mod_clks,
1029 unsigned int num_mod_clks,
1030 const unsigned int *clks, unsigned int n)
1031{
1032 unsigned int i, j;
1033
1034 for (i = 0, j = 0; i < num_mod_clks && j < n; i++)
1035 if (mod_clks[i].id == clks[j]) {
1036 mod_clks[i].name = NULL;
1037 j++;
1038 }
1039}
1040
1041void __init mssr_mod_reparent(struct mssr_mod_clk *mod_clks,
1042 unsigned int num_mod_clks,
1043 const struct mssr_mod_reparent *clks,
1044 unsigned int n)
1045{
1046 unsigned int i, j;
1047
1048 for (i = 0, j = 0; i < num_mod_clks && j < n; i++)
1049 if (mod_clks[i].id == clks[j].clk) {
1050 mod_clks[i].parent = clks[j].parent;
1051 j++;
1052 }
1053}
1054
1055MODULE_DESCRIPTION("Renesas CPG/MSSR Driver");
1056MODULE_LICENSE("GPL v2");
1057