1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#undef DEBUG
16
17#include <linux/kernel.h>
18#include <linux/export.h>
19#include <linux/list.h>
20#include <linux/errno.h>
21#include <linux/err.h>
22#include <linux/delay.h>
23#include <linux/clk-provider.h>
24#include <linux/io.h>
25#include <linux/bitops.h>
26#include <asm/cpu.h>
27
28#include <trace/events/power.h>
29
30#include "soc.h"
31#include "clockdomain.h"
32#include "clock.h"
33#include "cm.h"
34#include "cm2xxx.h"
35#include "cm3xxx.h"
36#include "cm-regbits-24xx.h"
37#include "cm-regbits-34xx.h"
38#include "common.h"
39
40
41
42
43
44#define MAX_MODULE_ENABLE_WAIT 100000
45
46u16 cpu_mask;
47
48
49
50
51struct ti_clk_features ti_clk_features;
52
53
54#define OMAP3430_DPLL_FINT_BAND1_MIN 750000
55#define OMAP3430_DPLL_FINT_BAND1_MAX 2100000
56#define OMAP3430_DPLL_FINT_BAND2_MIN 7500000
57#define OMAP3430_DPLL_FINT_BAND2_MAX 21000000
58
59
60
61
62
63#define OMAP3PLUS_DPLL_FINT_MIN 32000
64#define OMAP3PLUS_DPLL_FINT_MAX 52000000
65
66
67
68
69
70
71
72static bool clkdm_control = true;
73
74static LIST_HEAD(clk_hw_omap_clocks);
75void __iomem *clk_memmaps[CLK_MAX_MEMMAPS];
76
77void omap2_clk_writel(u32 val, struct clk_hw_omap *clk, void __iomem *reg)
78{
79 if (clk->flags & MEMMAP_ADDRESSING) {
80 struct clk_omap_reg *r = (struct clk_omap_reg *)®
81 writel_relaxed(val, clk_memmaps[r->index] + r->offset);
82 } else {
83 writel_relaxed(val, reg);
84 }
85}
86
87u32 omap2_clk_readl(struct clk_hw_omap *clk, void __iomem *reg)
88{
89 u32 val;
90
91 if (clk->flags & MEMMAP_ADDRESSING) {
92 struct clk_omap_reg *r = (struct clk_omap_reg *)®
93 val = readl_relaxed(clk_memmaps[r->index] + r->offset);
94 } else {
95 val = readl_relaxed(reg);
96 }
97
98 return val;
99}
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122static int _wait_idlest_generic(struct clk_hw_omap *clk, void __iomem *reg,
123 u32 mask, u8 idlest, const char *name)
124{
125 int i = 0, ena = 0;
126
127 ena = (idlest) ? 0 : mask;
128
129 omap_test_timeout(((omap2_clk_readl(clk, reg) & mask) == ena),
130 MAX_MODULE_ENABLE_WAIT, i);
131
132 if (i < MAX_MODULE_ENABLE_WAIT)
133 pr_debug("omap clock: module associated with clock %s ready after %d loops\n",
134 name, i);
135 else
136 pr_err("omap clock: module associated with clock %s didn't enable in %d tries\n",
137 name, MAX_MODULE_ENABLE_WAIT);
138
139 return (i < MAX_MODULE_ENABLE_WAIT) ? 1 : 0;
140};
141
142
143
144
145
146
147
148
149
150
151
152static void _omap2_module_wait_ready(struct clk_hw_omap *clk)
153{
154 void __iomem *companion_reg, *idlest_reg;
155 u8 other_bit, idlest_bit, idlest_val, idlest_reg_id;
156 s16 prcm_mod;
157 int r;
158
159
160 if (clk->ops->find_companion) {
161 clk->ops->find_companion(clk, &companion_reg, &other_bit);
162 if (!(omap2_clk_readl(clk, companion_reg) & (1 << other_bit)))
163 return;
164 }
165
166 clk->ops->find_idlest(clk, &idlest_reg, &idlest_bit, &idlest_val);
167 r = cm_split_idlest_reg(idlest_reg, &prcm_mod, &idlest_reg_id);
168 if (r) {
169
170 _wait_idlest_generic(clk, idlest_reg, (1 << idlest_bit),
171 idlest_val, __clk_get_name(clk->hw.clk));
172 } else {
173 omap_cm_wait_module_ready(0, prcm_mod, idlest_reg_id,
174 idlest_bit);
175 };
176}
177
178
179
180
181
182
183
184
185
186
187
188void omap2_init_clk_clkdm(struct clk_hw *hw)
189{
190 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
191 struct clockdomain *clkdm;
192 const char *clk_name;
193
194 if (!clk->clkdm_name)
195 return;
196
197 clk_name = __clk_get_name(hw->clk);
198
199 clkdm = clkdm_lookup(clk->clkdm_name);
200 if (clkdm) {
201 pr_debug("clock: associated clk %s to clkdm %s\n",
202 clk_name, clk->clkdm_name);
203 clk->clkdm = clkdm;
204 } else {
205 pr_debug("clock: could not associate clk %s to clkdm %s\n",
206 clk_name, clk->clkdm_name);
207 }
208}
209
210
211
212
213
214
215
216
217
218void __init omap2_clk_disable_clkdm_control(void)
219{
220 clkdm_control = false;
221}
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244void omap2_clk_dflt_find_companion(struct clk_hw_omap *clk,
245 void __iomem **other_reg, u8 *other_bit)
246{
247 u32 r;
248
249
250
251
252
253 r = ((__force u32)clk->enable_reg ^ (CM_FCLKEN ^ CM_ICLKEN));
254
255 *other_reg = (__force void __iomem *)r;
256 *other_bit = clk->enable_bit;
257}
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273void omap2_clk_dflt_find_idlest(struct clk_hw_omap *clk,
274 void __iomem **idlest_reg, u8 *idlest_bit, u8 *idlest_val)
275{
276 u32 r;
277
278 r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
279 *idlest_reg = (__force void __iomem *)r;
280 *idlest_bit = clk->enable_bit;
281
282
283
284
285
286
287 *idlest_val = ti_clk_features.cm_idlest_val;
288}
289
290
291
292
293
294
295
296
297
298
299
300
301
302int omap2_dflt_clk_enable(struct clk_hw *hw)
303{
304 struct clk_hw_omap *clk;
305 u32 v;
306 int ret = 0;
307
308 clk = to_clk_hw_omap(hw);
309
310 if (clkdm_control && clk->clkdm) {
311 ret = clkdm_clk_enable(clk->clkdm, hw->clk);
312 if (ret) {
313 WARN(1, "%s: could not enable %s's clockdomain %s: %d\n",
314 __func__, __clk_get_name(hw->clk),
315 clk->clkdm->name, ret);
316 return ret;
317 }
318 }
319
320 if (unlikely(clk->enable_reg == NULL)) {
321 pr_err("%s: %s missing enable_reg\n", __func__,
322 __clk_get_name(hw->clk));
323 ret = -EINVAL;
324 goto err;
325 }
326
327
328 v = omap2_clk_readl(clk, clk->enable_reg);
329 if (clk->flags & INVERT_ENABLE)
330 v &= ~(1 << clk->enable_bit);
331 else
332 v |= (1 << clk->enable_bit);
333 omap2_clk_writel(v, clk, clk->enable_reg);
334 v = omap2_clk_readl(clk, clk->enable_reg);
335
336 if (clk->ops && clk->ops->find_idlest)
337 _omap2_module_wait_ready(clk);
338
339 return 0;
340
341err:
342 if (clkdm_control && clk->clkdm)
343 clkdm_clk_disable(clk->clkdm, hw->clk);
344 return ret;
345}
346
347
348
349
350
351
352
353
354
355
356void omap2_dflt_clk_disable(struct clk_hw *hw)
357{
358 struct clk_hw_omap *clk;
359 u32 v;
360
361 clk = to_clk_hw_omap(hw);
362 if (!clk->enable_reg) {
363
364
365
366
367 pr_err("%s: independent clock %s has no enable_reg\n",
368 __func__, __clk_get_name(hw->clk));
369 return;
370 }
371
372 v = omap2_clk_readl(clk, clk->enable_reg);
373 if (clk->flags & INVERT_ENABLE)
374 v |= (1 << clk->enable_bit);
375 else
376 v &= ~(1 << clk->enable_bit);
377 omap2_clk_writel(v, clk, clk->enable_reg);
378
379
380 if (clkdm_control && clk->clkdm)
381 clkdm_clk_disable(clk->clkdm, hw->clk);
382}
383
384
385
386
387
388
389
390
391
392
393
394
395
396int omap2_clkops_enable_clkdm(struct clk_hw *hw)
397{
398 struct clk_hw_omap *clk;
399 int ret = 0;
400
401 clk = to_clk_hw_omap(hw);
402
403 if (unlikely(!clk->clkdm)) {
404 pr_err("%s: %s: no clkdm set ?!\n", __func__,
405 __clk_get_name(hw->clk));
406 return -EINVAL;
407 }
408
409 if (unlikely(clk->enable_reg))
410 pr_err("%s: %s: should use dflt_clk_enable ?!\n", __func__,
411 __clk_get_name(hw->clk));
412
413 if (!clkdm_control) {
414 pr_err("%s: %s: clkfw-based clockdomain control disabled ?!\n",
415 __func__, __clk_get_name(hw->clk));
416 return 0;
417 }
418
419 ret = clkdm_clk_enable(clk->clkdm, hw->clk);
420 WARN(ret, "%s: could not enable %s's clockdomain %s: %d\n",
421 __func__, __clk_get_name(hw->clk), clk->clkdm->name, ret);
422
423 return ret;
424}
425
426
427
428
429
430
431
432
433
434
435void omap2_clkops_disable_clkdm(struct clk_hw *hw)
436{
437 struct clk_hw_omap *clk;
438
439 clk = to_clk_hw_omap(hw);
440
441 if (unlikely(!clk->clkdm)) {
442 pr_err("%s: %s: no clkdm set ?!\n", __func__,
443 __clk_get_name(hw->clk));
444 return;
445 }
446
447 if (unlikely(clk->enable_reg))
448 pr_err("%s: %s: should use dflt_clk_disable ?!\n", __func__,
449 __clk_get_name(hw->clk));
450
451 if (!clkdm_control) {
452 pr_err("%s: %s: clkfw-based clockdomain control disabled ?!\n",
453 __func__, __clk_get_name(hw->clk));
454 return;
455 }
456
457 clkdm_clk_disable(clk->clkdm, hw->clk);
458}
459
460
461
462
463
464
465
466
467
468int omap2_dflt_clk_is_enabled(struct clk_hw *hw)
469{
470 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
471 u32 v;
472
473 v = omap2_clk_readl(clk, clk->enable_reg);
474
475 if (clk->flags & INVERT_ENABLE)
476 v ^= BIT(clk->enable_bit);
477
478 v &= BIT(clk->enable_bit);
479
480 return v ? 1 : 0;
481}
482
483static int __initdata mpurate;
484
485
486
487
488
489static int __init omap_clk_setup(char *str)
490{
491 get_option(&str, &mpurate);
492
493 if (!mpurate)
494 return 1;
495
496 if (mpurate < 1000)
497 mpurate *= 1000000;
498
499 return 1;
500}
501__setup("mpurate=", omap_clk_setup);
502
503
504
505
506
507
508
509
510
511
512void omap2_init_clk_hw_omap_clocks(struct clk *clk)
513{
514 struct clk_hw_omap *c;
515
516 if (__clk_get_flags(clk) & CLK_IS_BASIC)
517 return;
518
519 c = to_clk_hw_omap(__clk_get_hw(clk));
520 list_add(&c->node, &clk_hw_omap_clocks);
521}
522
523
524
525
526
527
528
529
530
531
532int omap2_clk_enable_autoidle_all(void)
533{
534 struct clk_hw_omap *c;
535
536 list_for_each_entry(c, &clk_hw_omap_clocks, node)
537 if (c->ops && c->ops->allow_idle)
538 c->ops->allow_idle(c);
539
540 of_ti_clk_allow_autoidle_all();
541
542 return 0;
543}
544
545
546
547
548
549
550
551
552
553
554int omap2_clk_disable_autoidle_all(void)
555{
556 struct clk_hw_omap *c;
557
558 list_for_each_entry(c, &clk_hw_omap_clocks, node)
559 if (c->ops && c->ops->deny_idle)
560 c->ops->deny_idle(c);
561
562 of_ti_clk_deny_autoidle_all();
563
564 return 0;
565}
566
567
568
569
570
571
572
573int omap2_clk_deny_idle(struct clk *clk)
574{
575 struct clk_hw_omap *c;
576
577 if (__clk_get_flags(clk) & CLK_IS_BASIC)
578 return -EINVAL;
579
580 c = to_clk_hw_omap(__clk_get_hw(clk));
581 if (c->ops && c->ops->deny_idle)
582 c->ops->deny_idle(c);
583 return 0;
584}
585
586
587
588
589
590
591
592int omap2_clk_allow_idle(struct clk *clk)
593{
594 struct clk_hw_omap *c;
595
596 if (__clk_get_flags(clk) & CLK_IS_BASIC)
597 return -EINVAL;
598
599 c = to_clk_hw_omap(__clk_get_hw(clk));
600 if (c->ops && c->ops->allow_idle)
601 c->ops->allow_idle(c);
602 return 0;
603}
604
605
606
607
608
609
610
611
612
613
614
615void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks)
616{
617 struct clk *init_clk;
618 int i;
619
620 for (i = 0; i < num_clocks; i++) {
621 init_clk = clk_get(NULL, clk_names[i]);
622 if (WARN(IS_ERR(init_clk), "could not find init clock %s\n",
623 clk_names[i]))
624 continue;
625 clk_prepare_enable(init_clk);
626 }
627}
628
629const struct clk_hw_omap_ops clkhwops_wait = {
630 .find_idlest = omap2_clk_dflt_find_idlest,
631 .find_companion = omap2_clk_dflt_find_companion,
632};
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649int __init omap2_clk_switch_mpurate_at_boot(const char *mpurate_ck_name)
650{
651 struct clk *mpurate_ck;
652 int r;
653
654 if (!mpurate)
655 return -EINVAL;
656
657 mpurate_ck = clk_get(NULL, mpurate_ck_name);
658 if (WARN(IS_ERR(mpurate_ck), "Failed to get %s.\n", mpurate_ck_name))
659 return -ENOENT;
660
661 r = clk_set_rate(mpurate_ck, mpurate);
662 if (r < 0) {
663 WARN(1, "clock: %s: unable to set MPU rate to %d: %d\n",
664 mpurate_ck_name, mpurate, r);
665 clk_put(mpurate_ck);
666 return -EINVAL;
667 }
668
669 calibrate_delay();
670 clk_put(mpurate_ck);
671
672 return 0;
673}
674
675
676
677
678
679
680
681
682
683
684
685
686
687void __init omap2_clk_print_new_rates(const char *hfclkin_ck_name,
688 const char *core_ck_name,
689 const char *mpu_ck_name)
690{
691 struct clk *hfclkin_ck, *core_ck, *mpu_ck;
692 unsigned long hfclkin_rate;
693
694 mpu_ck = clk_get(NULL, mpu_ck_name);
695 if (WARN(IS_ERR(mpu_ck), "clock: failed to get %s.\n", mpu_ck_name))
696 return;
697
698 core_ck = clk_get(NULL, core_ck_name);
699 if (WARN(IS_ERR(core_ck), "clock: failed to get %s.\n", core_ck_name))
700 return;
701
702 hfclkin_ck = clk_get(NULL, hfclkin_ck_name);
703 if (WARN(IS_ERR(hfclkin_ck), "Failed to get %s.\n", hfclkin_ck_name))
704 return;
705
706 hfclkin_rate = clk_get_rate(hfclkin_ck);
707
708 pr_info("Switched to new clocking rate (Crystal/Core/MPU): %ld.%01ld/%ld/%ld MHz\n",
709 (hfclkin_rate / 1000000), ((hfclkin_rate / 100000) % 10),
710 (clk_get_rate(core_ck) / 1000000),
711 (clk_get_rate(mpu_ck) / 1000000));
712}
713
714
715
716
717
718
719void __init ti_clk_init_features(void)
720{
721
722 if (cpu_is_omap3430()) {
723 ti_clk_features.fint_min = OMAP3430_DPLL_FINT_BAND1_MIN;
724 ti_clk_features.fint_max = OMAP3430_DPLL_FINT_BAND2_MAX;
725 ti_clk_features.fint_band1_max = OMAP3430_DPLL_FINT_BAND1_MAX;
726 ti_clk_features.fint_band2_min = OMAP3430_DPLL_FINT_BAND2_MIN;
727 } else {
728 ti_clk_features.fint_min = OMAP3PLUS_DPLL_FINT_MIN;
729 ti_clk_features.fint_max = OMAP3PLUS_DPLL_FINT_MAX;
730 }
731
732
733 if (cpu_is_omap24xx()) {
734 ti_clk_features.dpll_bypass_vals |=
735 (1 << OMAP2XXX_EN_DPLL_LPBYPASS) |
736 (1 << OMAP2XXX_EN_DPLL_FRBYPASS);
737 } else if (cpu_is_omap34xx()) {
738 ti_clk_features.dpll_bypass_vals |=
739 (1 << OMAP3XXX_EN_DPLL_LPBYPASS) |
740 (1 << OMAP3XXX_EN_DPLL_FRBYPASS);
741 } else if (soc_is_am33xx() || cpu_is_omap44xx() || soc_is_am43xx() ||
742 soc_is_omap54xx() || soc_is_dra7xx()) {
743 ti_clk_features.dpll_bypass_vals |=
744 (1 << OMAP4XXX_EN_DPLL_LPBYPASS) |
745 (1 << OMAP4XXX_EN_DPLL_FRBYPASS) |
746 (1 << OMAP4XXX_EN_DPLL_MNBYPASS);
747 }
748
749
750 if (cpu_is_omap343x())
751 ti_clk_features.flags |= TI_CLK_DPLL_HAS_FREQSEL;
752
753
754
755
756
757
758 if (cpu_is_omap24xx())
759 ti_clk_features.cm_idlest_val = OMAP24XX_CM_IDLEST_VAL;
760 else if (cpu_is_omap34xx())
761 ti_clk_features.cm_idlest_val = OMAP34XX_CM_IDLEST_VAL;
762
763
764 if (omap_rev() == OMAP3430_REV_ES1_0)
765 ti_clk_features.flags |= TI_CLK_DPLL4_DENY_REPROGRAM;
766}
767