1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/kernel.h>
22#include <linux/device.h>
23#include <linux/list.h>
24#include <linux/errno.h>
25#include <linux/delay.h>
26#include <linux/clk.h>
27#include <linux/io.h>
28#include <linux/bitops.h>
29#include <linux/clkdev.h>
30#include <linux/clk/ti.h>
31
32#include "clock.h"
33
34
35#define DPLL_AUTOIDLE_DISABLE 0x0
36#define DPLL_AUTOIDLE_LOW_POWER_STOP 0x1
37
38#define MAX_DPLL_WAIT_TRIES 1000000
39
40#define OMAP3XXX_EN_DPLL_LOCKED 0x7
41
42
43static u32 omap3_dpll_autoidle_read(struct clk_hw_omap *clk);
44static void omap3_dpll_deny_idle(struct clk_hw_omap *clk);
45static void omap3_dpll_allow_idle(struct clk_hw_omap *clk);
46
47
48
49
50static void _omap3_dpll_write_clken(struct clk_hw_omap *clk, u8 clken_bits)
51{
52 const struct dpll_data *dd;
53 u32 v;
54
55 dd = clk->dpll_data;
56
57 v = ti_clk_ll_ops->clk_readl(dd->control_reg);
58 v &= ~dd->enable_mask;
59 v |= clken_bits << __ffs(dd->enable_mask);
60 ti_clk_ll_ops->clk_writel(v, dd->control_reg);
61}
62
63
64static int _omap3_wait_dpll_status(struct clk_hw_omap *clk, u8 state)
65{
66 const struct dpll_data *dd;
67 int i = 0;
68 int ret = -EINVAL;
69 const char *clk_name;
70
71 dd = clk->dpll_data;
72 clk_name = clk_hw_get_name(&clk->hw);
73
74 state <<= __ffs(dd->idlest_mask);
75
76 while (((ti_clk_ll_ops->clk_readl(dd->idlest_reg) & dd->idlest_mask)
77 != state) && i < MAX_DPLL_WAIT_TRIES) {
78 i++;
79 udelay(1);
80 }
81
82 if (i == MAX_DPLL_WAIT_TRIES) {
83 pr_err("clock: %s failed transition to '%s'\n",
84 clk_name, (state) ? "locked" : "bypassed");
85 } else {
86 pr_debug("clock: %s transition to '%s' in %d loops\n",
87 clk_name, (state) ? "locked" : "bypassed", i);
88
89 ret = 0;
90 }
91
92 return ret;
93}
94
95
96static u16 _omap3_dpll_compute_freqsel(struct clk_hw_omap *clk, u8 n)
97{
98 unsigned long fint;
99 u16 f = 0;
100
101 fint = clk_hw_get_rate(clk->dpll_data->clk_ref) / n;
102
103 pr_debug("clock: fint is %lu\n", fint);
104
105 if (fint >= 750000 && fint <= 1000000)
106 f = 0x3;
107 else if (fint > 1000000 && fint <= 1250000)
108 f = 0x4;
109 else if (fint > 1250000 && fint <= 1500000)
110 f = 0x5;
111 else if (fint > 1500000 && fint <= 1750000)
112 f = 0x6;
113 else if (fint > 1750000 && fint <= 2100000)
114 f = 0x7;
115 else if (fint > 7500000 && fint <= 10000000)
116 f = 0xB;
117 else if (fint > 10000000 && fint <= 12500000)
118 f = 0xC;
119 else if (fint > 12500000 && fint <= 15000000)
120 f = 0xD;
121 else if (fint > 15000000 && fint <= 17500000)
122 f = 0xE;
123 else if (fint > 17500000 && fint <= 21000000)
124 f = 0xF;
125 else
126 pr_debug("clock: unknown freqsel setting for %d\n", n);
127
128 return f;
129}
130
131
132
133
134
135
136
137
138
139
140
141static int _omap3_noncore_dpll_lock(struct clk_hw_omap *clk)
142{
143 const struct dpll_data *dd;
144 u8 ai;
145 u8 state = 1;
146 int r = 0;
147
148 pr_debug("clock: locking DPLL %s\n", clk_hw_get_name(&clk->hw));
149
150 dd = clk->dpll_data;
151 state <<= __ffs(dd->idlest_mask);
152
153
154 if ((ti_clk_ll_ops->clk_readl(dd->idlest_reg) & dd->idlest_mask) ==
155 state)
156 goto done;
157
158 ai = omap3_dpll_autoidle_read(clk);
159
160 if (ai)
161 omap3_dpll_deny_idle(clk);
162
163 _omap3_dpll_write_clken(clk, DPLL_LOCKED);
164
165 r = _omap3_wait_dpll_status(clk, 1);
166
167 if (ai)
168 omap3_dpll_allow_idle(clk);
169
170done:
171 return r;
172}
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187static int _omap3_noncore_dpll_bypass(struct clk_hw_omap *clk)
188{
189 int r;
190 u8 ai;
191
192 if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_BYPASS)))
193 return -EINVAL;
194
195 pr_debug("clock: configuring DPLL %s for low-power bypass\n",
196 clk_hw_get_name(&clk->hw));
197
198 ai = omap3_dpll_autoidle_read(clk);
199
200 _omap3_dpll_write_clken(clk, DPLL_LOW_POWER_BYPASS);
201
202 r = _omap3_wait_dpll_status(clk, 0);
203
204 if (ai)
205 omap3_dpll_allow_idle(clk);
206
207 return r;
208}
209
210
211
212
213
214
215
216
217
218
219static int _omap3_noncore_dpll_stop(struct clk_hw_omap *clk)
220{
221 u8 ai;
222
223 if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_STOP)))
224 return -EINVAL;
225
226 pr_debug("clock: stopping DPLL %s\n", clk_hw_get_name(&clk->hw));
227
228 ai = omap3_dpll_autoidle_read(clk);
229
230 _omap3_dpll_write_clken(clk, DPLL_LOW_POWER_STOP);
231
232 if (ai)
233 omap3_dpll_allow_idle(clk);
234
235 return 0;
236}
237
238
239
240
241
242
243
244
245
246
247
248
249
250static void _lookup_dco(struct clk_hw_omap *clk, u8 *dco, u16 m, u8 n)
251{
252 unsigned long fint, clkinp;
253
254 clkinp = clk_hw_get_rate(clk_hw_get_parent(&clk->hw));
255 fint = (clkinp / n) * m;
256
257 if (fint < 1000000000)
258 *dco = 2;
259 else
260 *dco = 4;
261}
262
263
264
265
266
267
268
269
270
271
272
273
274
275static void _lookup_sddiv(struct clk_hw_omap *clk, u8 *sd_div, u16 m, u8 n)
276{
277 unsigned long clkinp, sd;
278 int mod1, mod2;
279
280 clkinp = clk_hw_get_rate(clk_hw_get_parent(&clk->hw));
281
282
283
284
285
286 clkinp /= 100000;
287 mod1 = (clkinp * m) % (250 * n);
288 sd = (clkinp * m) / (250 * n);
289 mod2 = sd % 10;
290 sd /= 10;
291
292 if (mod1 || mod2)
293 sd++;
294 *sd_div = sd;
295}
296
297
298
299
300
301
302
303
304
305static int omap3_noncore_dpll_program(struct clk_hw_omap *clk, u16 freqsel)
306{
307 struct dpll_data *dd = clk->dpll_data;
308 u8 dco, sd_div, ai = 0;
309 u32 v;
310 bool errata_i810;
311
312
313 _omap3_noncore_dpll_bypass(clk);
314
315
316
317
318
319 if (ti_clk_get_features()->flags & TI_CLK_DPLL_HAS_FREQSEL) {
320 v = ti_clk_ll_ops->clk_readl(dd->control_reg);
321 v &= ~dd->freqsel_mask;
322 v |= freqsel << __ffs(dd->freqsel_mask);
323 ti_clk_ll_ops->clk_writel(v, dd->control_reg);
324 }
325
326
327 v = ti_clk_ll_ops->clk_readl(dd->mult_div1_reg);
328
329
330 if (dd->dcc_mask) {
331 if (dd->last_rounded_rate >= dd->dcc_rate)
332 v |= dd->dcc_mask;
333 else
334 v &= ~dd->dcc_mask;
335 }
336
337 v &= ~(dd->mult_mask | dd->div1_mask);
338 v |= dd->last_rounded_m << __ffs(dd->mult_mask);
339 v |= (dd->last_rounded_n - 1) << __ffs(dd->div1_mask);
340
341
342 if (dd->dco_mask) {
343 _lookup_dco(clk, &dco, dd->last_rounded_m, dd->last_rounded_n);
344 v &= ~(dd->dco_mask);
345 v |= dco << __ffs(dd->dco_mask);
346 }
347 if (dd->sddiv_mask) {
348 _lookup_sddiv(clk, &sd_div, dd->last_rounded_m,
349 dd->last_rounded_n);
350 v &= ~(dd->sddiv_mask);
351 v |= sd_div << __ffs(dd->sddiv_mask);
352 }
353
354
355
356
357
358
359
360
361 errata_i810 = ti_clk_get_features()->flags & TI_CLK_ERRATA_I810;
362
363 if (errata_i810) {
364 ai = omap3_dpll_autoidle_read(clk);
365 if (ai) {
366 omap3_dpll_deny_idle(clk);
367
368
369 omap3_dpll_autoidle_read(clk);
370 }
371 }
372
373 ti_clk_ll_ops->clk_writel(v, dd->mult_div1_reg);
374
375
376 if (dd->m4xen_mask || dd->lpmode_mask) {
377 v = ti_clk_ll_ops->clk_readl(dd->control_reg);
378
379 if (dd->m4xen_mask) {
380 if (dd->last_rounded_m4xen)
381 v |= dd->m4xen_mask;
382 else
383 v &= ~dd->m4xen_mask;
384 }
385
386 if (dd->lpmode_mask) {
387 if (dd->last_rounded_lpmode)
388 v |= dd->lpmode_mask;
389 else
390 v &= ~dd->lpmode_mask;
391 }
392
393 ti_clk_ll_ops->clk_writel(v, dd->control_reg);
394 }
395
396
397
398
399
400 _omap3_noncore_dpll_lock(clk);
401
402 if (errata_i810 && ai)
403 omap3_dpll_allow_idle(clk);
404
405 return 0;
406}
407
408
409
410
411
412
413
414
415
416unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate)
417{
418 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
419
420 return omap2_get_dpll_rate(clk);
421}
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439int omap3_noncore_dpll_enable(struct clk_hw *hw)
440{
441 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
442 int r;
443 struct dpll_data *dd;
444 struct clk_hw *parent;
445
446 dd = clk->dpll_data;
447 if (!dd)
448 return -EINVAL;
449
450 if (clk->clkdm) {
451 r = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk);
452 if (r) {
453 WARN(1,
454 "%s: could not enable %s's clockdomain %s: %d\n",
455 __func__, clk_hw_get_name(hw),
456 clk->clkdm_name, r);
457 return r;
458 }
459 }
460
461 parent = clk_hw_get_parent(hw);
462
463 if (clk_hw_get_rate(hw) == clk_hw_get_rate(dd->clk_bypass)) {
464 WARN_ON(parent != dd->clk_bypass);
465 r = _omap3_noncore_dpll_bypass(clk);
466 } else {
467 WARN_ON(parent != dd->clk_ref);
468 r = _omap3_noncore_dpll_lock(clk);
469 }
470
471 return r;
472}
473
474
475
476
477
478
479
480
481void omap3_noncore_dpll_disable(struct clk_hw *hw)
482{
483 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
484
485 _omap3_noncore_dpll_stop(clk);
486 if (clk->clkdm)
487 ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk);
488}
489
490
491
492
493
494
495
496
497
498
499
500
501
502int omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
503 struct clk_rate_request *req)
504{
505 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
506 struct dpll_data *dd;
507
508 if (!req->rate)
509 return -EINVAL;
510
511 dd = clk->dpll_data;
512 if (!dd)
513 return -EINVAL;
514
515 if (clk_hw_get_rate(dd->clk_bypass) == req->rate &&
516 (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) {
517 req->best_parent_hw = dd->clk_bypass;
518 } else {
519 req->rate = omap2_dpll_round_rate(hw, req->rate,
520 &req->best_parent_rate);
521 req->best_parent_hw = dd->clk_ref;
522 }
523
524 req->best_parent_rate = req->rate;
525
526 return 0;
527}
528
529
530
531
532
533
534
535
536
537int omap3_noncore_dpll_set_parent(struct clk_hw *hw, u8 index)
538{
539 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
540 int ret;
541
542 if (!hw)
543 return -EINVAL;
544
545 if (index)
546 ret = _omap3_noncore_dpll_bypass(clk);
547 else
548 ret = _omap3_noncore_dpll_lock(clk);
549
550 return ret;
551}
552
553
554
555
556
557
558
559
560
561
562
563
564int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
565 unsigned long parent_rate)
566{
567 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
568 struct dpll_data *dd;
569 u16 freqsel = 0;
570 int ret;
571
572 if (!hw || !rate)
573 return -EINVAL;
574
575 dd = clk->dpll_data;
576 if (!dd)
577 return -EINVAL;
578
579 if (clk_hw_get_parent(hw) != dd->clk_ref)
580 return -EINVAL;
581
582 if (dd->last_rounded_rate == 0)
583 return -EINVAL;
584
585
586 if (ti_clk_get_features()->flags & TI_CLK_DPLL_HAS_FREQSEL) {
587 freqsel = _omap3_dpll_compute_freqsel(clk, dd->last_rounded_n);
588 WARN_ON(!freqsel);
589 }
590
591 pr_debug("%s: %s: set rate: locking rate to %lu.\n", __func__,
592 clk_hw_get_name(hw), rate);
593
594 ret = omap3_noncore_dpll_program(clk, freqsel);
595
596 return ret;
597}
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw,
613 unsigned long rate,
614 unsigned long parent_rate,
615 u8 index)
616{
617 int ret;
618
619 if (!hw || !rate)
620 return -EINVAL;
621
622
623
624
625
626
627 if (index)
628 ret = omap3_noncore_dpll_set_parent(hw, index);
629 else
630 ret = omap3_noncore_dpll_set_rate(hw, rate, parent_rate);
631
632 return ret;
633}
634
635
636
637
638
639
640
641
642
643
644
645static u32 omap3_dpll_autoidle_read(struct clk_hw_omap *clk)
646{
647 const struct dpll_data *dd;
648 u32 v;
649
650 if (!clk || !clk->dpll_data)
651 return -EINVAL;
652
653 dd = clk->dpll_data;
654
655 if (!dd->autoidle_reg)
656 return -EINVAL;
657
658 v = ti_clk_ll_ops->clk_readl(dd->autoidle_reg);
659 v &= dd->autoidle_mask;
660 v >>= __ffs(dd->autoidle_mask);
661
662 return v;
663}
664
665
666
667
668
669
670
671
672
673
674static void omap3_dpll_allow_idle(struct clk_hw_omap *clk)
675{
676 const struct dpll_data *dd;
677 u32 v;
678
679 if (!clk || !clk->dpll_data)
680 return;
681
682 dd = clk->dpll_data;
683
684 if (!dd->autoidle_reg)
685 return;
686
687
688
689
690
691
692 v = ti_clk_ll_ops->clk_readl(dd->autoidle_reg);
693 v &= ~dd->autoidle_mask;
694 v |= DPLL_AUTOIDLE_LOW_POWER_STOP << __ffs(dd->autoidle_mask);
695 ti_clk_ll_ops->clk_writel(v, dd->autoidle_reg);
696}
697
698
699
700
701
702
703
704static void omap3_dpll_deny_idle(struct clk_hw_omap *clk)
705{
706 const struct dpll_data *dd;
707 u32 v;
708
709 if (!clk || !clk->dpll_data)
710 return;
711
712 dd = clk->dpll_data;
713
714 if (!dd->autoidle_reg)
715 return;
716
717 v = ti_clk_ll_ops->clk_readl(dd->autoidle_reg);
718 v &= ~dd->autoidle_mask;
719 v |= DPLL_AUTOIDLE_DISABLE << __ffs(dd->autoidle_mask);
720 ti_clk_ll_ops->clk_writel(v, dd->autoidle_reg);
721}
722
723
724
725
726static struct clk_hw_omap *omap3_find_clkoutx2_dpll(struct clk_hw *hw)
727{
728 struct clk_hw_omap *pclk = NULL;
729
730
731 do {
732 do {
733 hw = clk_hw_get_parent(hw);
734 } while (hw && (clk_hw_get_flags(hw) & CLK_IS_BASIC));
735 if (!hw)
736 break;
737 pclk = to_clk_hw_omap(hw);
738 } while (pclk && !pclk->dpll_data);
739
740
741 if (!pclk) {
742 WARN_ON(1);
743 return NULL;
744 }
745
746 return pclk;
747}
748
749
750
751
752
753
754
755
756unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
757 unsigned long parent_rate)
758{
759 const struct dpll_data *dd;
760 unsigned long rate;
761 u32 v;
762 struct clk_hw_omap *pclk = NULL;
763
764 if (!parent_rate)
765 return 0;
766
767 pclk = omap3_find_clkoutx2_dpll(hw);
768
769 if (!pclk)
770 return 0;
771
772 dd = pclk->dpll_data;
773
774 WARN_ON(!dd->enable_mask);
775
776 v = ti_clk_ll_ops->clk_readl(dd->control_reg) & dd->enable_mask;
777 v >>= __ffs(dd->enable_mask);
778 if ((v != OMAP3XXX_EN_DPLL_LOCKED) || (dd->flags & DPLL_J_TYPE))
779 rate = parent_rate;
780 else
781 rate = parent_rate * 2;
782 return rate;
783}
784
785
786const struct clk_hw_omap_ops clkhwops_omap3_dpll = {
787 .allow_idle = omap3_dpll_allow_idle,
788 .deny_idle = omap3_dpll_deny_idle,
789};
790
791
792
793
794
795
796
797
798
799
800
801
802int omap3_dpll4_set_rate(struct clk_hw *hw, unsigned long rate,
803 unsigned long parent_rate)
804{
805
806
807
808
809
810 if (ti_clk_get_features()->flags & TI_CLK_DPLL4_DENY_REPROGRAM) {
811 pr_err("clock: DPLL4 cannot change rate due to silicon 'Limitation 2.5' on 3430ES1.\n");
812 return -EINVAL;
813 }
814
815 return omap3_noncore_dpll_set_rate(hw, rate, parent_rate);
816}
817
818
819
820
821
822
823
824
825
826
827
828
829
830int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
831 unsigned long parent_rate, u8 index)
832{
833 if (ti_clk_get_features()->flags & TI_CLK_DPLL4_DENY_REPROGRAM) {
834 pr_err("clock: DPLL4 cannot change rate due to silicon 'Limitation 2.5' on 3430ES1.\n");
835 return -EINVAL;
836 }
837
838 return omap3_noncore_dpll_set_rate_and_parent(hw, rate, parent_rate,
839 index);
840}
841
842
843static bool omap3_dpll5_apply_errata(struct clk_hw *hw,
844 unsigned long parent_rate)
845{
846 struct omap3_dpll5_settings {
847 unsigned int rate, m, n;
848 };
849
850 static const struct omap3_dpll5_settings precomputed[] = {
851
852
853
854
855
856
857 { 12000000, 80, 0 + 1 },
858 { 13000000, 443, 5 + 1 },
859 { 19200000, 50, 0 + 1 },
860 { 26000000, 443, 11 + 1 },
861 { 38400000, 25, 0 + 1 }
862 };
863
864 const struct omap3_dpll5_settings *d;
865 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
866 struct dpll_data *dd;
867 unsigned int i;
868
869 for (i = 0; i < ARRAY_SIZE(precomputed); ++i) {
870 if (parent_rate == precomputed[i].rate)
871 break;
872 }
873
874 if (i == ARRAY_SIZE(precomputed))
875 return false;
876
877 d = &precomputed[i];
878
879
880 dd = clk->dpll_data;
881 dd->last_rounded_m = d->m;
882 dd->last_rounded_n = d->n;
883 dd->last_rounded_rate = div_u64((u64)parent_rate * d->m, d->n);
884 omap3_noncore_dpll_program(clk, 0);
885
886 return true;
887}
888
889
890
891
892
893
894
895
896
897
898int omap3_dpll5_set_rate(struct clk_hw *hw, unsigned long rate,
899 unsigned long parent_rate)
900{
901 if (rate == OMAP3_DPLL5_FREQ_FOR_USBHOST * 8) {
902 if (omap3_dpll5_apply_errata(hw, parent_rate))
903 return 0;
904 }
905
906 return omap3_noncore_dpll_set_rate(hw, rate, parent_rate);
907}
908