1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/kernel.h>
19#include <linux/device.h>
20#include <linux/list.h>
21#include <linux/errno.h>
22#include <linux/delay.h>
23#include <linux/clk.h>
24#include <linux/io.h>
25#include <linux/bitops.h>
26#include <linux/clkdev.h>
27#include <linux/clk/ti.h>
28
29#include "clock.h"
30
31
32#define DPLL_AUTOIDLE_DISABLE 0x0
33#define DPLL_AUTOIDLE_LOW_POWER_STOP 0x1
34
35#define MAX_DPLL_WAIT_TRIES 1000000
36
37#define OMAP3XXX_EN_DPLL_LOCKED 0x7
38
39
40static u32 omap3_dpll_autoidle_read(struct clk_hw_omap *clk);
41static void omap3_dpll_deny_idle(struct clk_hw_omap *clk);
42static void omap3_dpll_allow_idle(struct clk_hw_omap *clk);
43
44
45
46
47static void _omap3_dpll_write_clken(struct clk_hw_omap *clk, u8 clken_bits)
48{
49 const struct dpll_data *dd;
50 u32 v;
51
52 dd = clk->dpll_data;
53
54 v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
55 v &= ~dd->enable_mask;
56 v |= clken_bits << __ffs(dd->enable_mask);
57 ti_clk_ll_ops->clk_writel(v, &dd->control_reg);
58}
59
60
61static int _omap3_wait_dpll_status(struct clk_hw_omap *clk, u8 state)
62{
63 const struct dpll_data *dd;
64 int i = 0;
65 int ret = -EINVAL;
66 const char *clk_name;
67
68 dd = clk->dpll_data;
69 clk_name = clk_hw_get_name(&clk->hw);
70
71 state <<= __ffs(dd->idlest_mask);
72
73 while (((ti_clk_ll_ops->clk_readl(&dd->idlest_reg) & dd->idlest_mask)
74 != state) && i < MAX_DPLL_WAIT_TRIES) {
75 i++;
76 udelay(1);
77 }
78
79 if (i == MAX_DPLL_WAIT_TRIES) {
80 pr_err("clock: %s failed transition to '%s'\n",
81 clk_name, (state) ? "locked" : "bypassed");
82 } else {
83 pr_debug("clock: %s transition to '%s' in %d loops\n",
84 clk_name, (state) ? "locked" : "bypassed", i);
85
86 ret = 0;
87 }
88
89 return ret;
90}
91
92
93static u16 _omap3_dpll_compute_freqsel(struct clk_hw_omap *clk, u8 n)
94{
95 unsigned long fint;
96 u16 f = 0;
97
98 fint = clk_hw_get_rate(clk->dpll_data->clk_ref) / n;
99
100 pr_debug("clock: fint is %lu\n", fint);
101
102 if (fint >= 750000 && fint <= 1000000)
103 f = 0x3;
104 else if (fint > 1000000 && fint <= 1250000)
105 f = 0x4;
106 else if (fint > 1250000 && fint <= 1500000)
107 f = 0x5;
108 else if (fint > 1500000 && fint <= 1750000)
109 f = 0x6;
110 else if (fint > 1750000 && fint <= 2100000)
111 f = 0x7;
112 else if (fint > 7500000 && fint <= 10000000)
113 f = 0xB;
114 else if (fint > 10000000 && fint <= 12500000)
115 f = 0xC;
116 else if (fint > 12500000 && fint <= 15000000)
117 f = 0xD;
118 else if (fint > 15000000 && fint <= 17500000)
119 f = 0xE;
120 else if (fint > 17500000 && fint <= 21000000)
121 f = 0xF;
122 else
123 pr_debug("clock: unknown freqsel setting for %d\n", n);
124
125 return f;
126}
127
128
129
130
131
132
133
134
135
136
137
138static int _omap3_noncore_dpll_lock(struct clk_hw_omap *clk)
139{
140 const struct dpll_data *dd;
141 u8 ai;
142 u8 state = 1;
143 int r = 0;
144
145 pr_debug("clock: locking DPLL %s\n", clk_hw_get_name(&clk->hw));
146
147 dd = clk->dpll_data;
148 state <<= __ffs(dd->idlest_mask);
149
150
151 if ((ti_clk_ll_ops->clk_readl(&dd->idlest_reg) & dd->idlest_mask) ==
152 state)
153 goto done;
154
155 ai = omap3_dpll_autoidle_read(clk);
156
157 if (ai)
158 omap3_dpll_deny_idle(clk);
159
160 _omap3_dpll_write_clken(clk, DPLL_LOCKED);
161
162 r = _omap3_wait_dpll_status(clk, 1);
163
164 if (ai)
165 omap3_dpll_allow_idle(clk);
166
167done:
168 return r;
169}
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184static int _omap3_noncore_dpll_bypass(struct clk_hw_omap *clk)
185{
186 int r;
187 u8 ai;
188
189 if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_BYPASS)))
190 return -EINVAL;
191
192 pr_debug("clock: configuring DPLL %s for low-power bypass\n",
193 clk_hw_get_name(&clk->hw));
194
195 ai = omap3_dpll_autoidle_read(clk);
196
197 _omap3_dpll_write_clken(clk, DPLL_LOW_POWER_BYPASS);
198
199 r = _omap3_wait_dpll_status(clk, 0);
200
201 if (ai)
202 omap3_dpll_allow_idle(clk);
203
204 return r;
205}
206
207
208
209
210
211
212
213
214
215
216static int _omap3_noncore_dpll_stop(struct clk_hw_omap *clk)
217{
218 u8 ai;
219
220 if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_STOP)))
221 return -EINVAL;
222
223 pr_debug("clock: stopping DPLL %s\n", clk_hw_get_name(&clk->hw));
224
225 ai = omap3_dpll_autoidle_read(clk);
226
227 _omap3_dpll_write_clken(clk, DPLL_LOW_POWER_STOP);
228
229 if (ai)
230 omap3_dpll_allow_idle(clk);
231
232 return 0;
233}
234
235
236
237
238
239
240
241
242
243
244
245
246
247static void _lookup_dco(struct clk_hw_omap *clk, u8 *dco, u16 m, u8 n)
248{
249 unsigned long fint, clkinp;
250
251 clkinp = clk_hw_get_rate(clk_hw_get_parent(&clk->hw));
252 fint = (clkinp / n) * m;
253
254 if (fint < 1000000000)
255 *dco = 2;
256 else
257 *dco = 4;
258}
259
260
261
262
263
264
265
266
267
268
269
270
271
272static void _lookup_sddiv(struct clk_hw_omap *clk, u8 *sd_div, u16 m, u8 n)
273{
274 unsigned long clkinp, sd;
275 int mod1, mod2;
276
277 clkinp = clk_hw_get_rate(clk_hw_get_parent(&clk->hw));
278
279
280
281
282
283 clkinp /= 100000;
284 mod1 = (clkinp * m) % (250 * n);
285 sd = (clkinp * m) / (250 * n);
286 mod2 = sd % 10;
287 sd /= 10;
288
289 if (mod1 || mod2)
290 sd++;
291 *sd_div = sd;
292}
293
294
295
296
297
298
299
300
301static void omap3_noncore_dpll_ssc_program(struct clk_hw_omap *clk)
302{
303 struct dpll_data *dd = clk->dpll_data;
304 unsigned long ref_rate;
305 u32 v, ctrl, mod_freq_divider, exponent, mantissa;
306 u32 deltam_step, deltam_ceil;
307
308 ctrl = ti_clk_ll_ops->clk_readl(&dd->control_reg);
309
310 if (dd->ssc_modfreq && dd->ssc_deltam) {
311 ctrl |= dd->ssc_enable_mask;
312
313 if (dd->ssc_downspread)
314 ctrl |= dd->ssc_downspread_mask;
315 else
316 ctrl &= ~dd->ssc_downspread_mask;
317
318 ref_rate = clk_hw_get_rate(dd->clk_ref);
319 mod_freq_divider =
320 (ref_rate / dd->last_rounded_n) / (4 * dd->ssc_modfreq);
321 if (dd->ssc_modfreq > (ref_rate / 70))
322 pr_warn("clock: SSC modulation frequency of DPLL %s greater than %ld\n",
323 __clk_get_name(clk->hw.clk), ref_rate / 70);
324
325 exponent = 0;
326 mantissa = mod_freq_divider;
327 while ((mantissa > 127) && (exponent < 7)) {
328 exponent++;
329 mantissa /= 2;
330 }
331 if (mantissa > 127)
332 mantissa = 127;
333
334 v = ti_clk_ll_ops->clk_readl(&dd->ssc_modfreq_reg);
335 v &= ~(dd->ssc_modfreq_mant_mask | dd->ssc_modfreq_exp_mask);
336 v |= mantissa << __ffs(dd->ssc_modfreq_mant_mask);
337 v |= exponent << __ffs(dd->ssc_modfreq_exp_mask);
338 ti_clk_ll_ops->clk_writel(v, &dd->ssc_modfreq_reg);
339
340 deltam_step = dd->last_rounded_m * dd->ssc_deltam;
341 deltam_step /= 10;
342 if (dd->ssc_downspread)
343 deltam_step /= 2;
344
345 deltam_step <<= __ffs(dd->ssc_deltam_int_mask);
346 deltam_step /= 100;
347 deltam_step /= mod_freq_divider;
348 if (deltam_step > 0xFFFFF)
349 deltam_step = 0xFFFFF;
350
351 deltam_ceil = (deltam_step & dd->ssc_deltam_int_mask) >>
352 __ffs(dd->ssc_deltam_int_mask);
353 if (deltam_step & dd->ssc_deltam_frac_mask)
354 deltam_ceil++;
355
356 if ((dd->ssc_downspread &&
357 ((dd->last_rounded_m - (2 * deltam_ceil)) < 20 ||
358 dd->last_rounded_m > 2045)) ||
359 ((dd->last_rounded_m - deltam_ceil) < 20 ||
360 (dd->last_rounded_m + deltam_ceil) > 2045))
361 pr_warn("clock: SSC multiplier of DPLL %s is out of range\n",
362 __clk_get_name(clk->hw.clk));
363
364 v = ti_clk_ll_ops->clk_readl(&dd->ssc_deltam_reg);
365 v &= ~(dd->ssc_deltam_int_mask | dd->ssc_deltam_frac_mask);
366 v |= deltam_step << __ffs(dd->ssc_deltam_int_mask |
367 dd->ssc_deltam_frac_mask);
368 ti_clk_ll_ops->clk_writel(v, &dd->ssc_deltam_reg);
369 } else {
370 ctrl &= ~dd->ssc_enable_mask;
371 }
372
373 ti_clk_ll_ops->clk_writel(ctrl, &dd->control_reg);
374}
375
376
377
378
379
380
381
382
383
384static int omap3_noncore_dpll_program(struct clk_hw_omap *clk, u16 freqsel)
385{
386 struct dpll_data *dd = clk->dpll_data;
387 u8 dco, sd_div, ai = 0;
388 u32 v;
389 bool errata_i810;
390
391
392 _omap3_noncore_dpll_bypass(clk);
393
394
395
396
397
398 if (ti_clk_get_features()->flags & TI_CLK_DPLL_HAS_FREQSEL) {
399 v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
400 v &= ~dd->freqsel_mask;
401 v |= freqsel << __ffs(dd->freqsel_mask);
402 ti_clk_ll_ops->clk_writel(v, &dd->control_reg);
403 }
404
405
406 v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);
407
408
409 if (dd->dcc_mask) {
410 if (dd->last_rounded_rate >= dd->dcc_rate)
411 v |= dd->dcc_mask;
412 else
413 v &= ~dd->dcc_mask;
414 }
415
416 v &= ~(dd->mult_mask | dd->div1_mask);
417 v |= dd->last_rounded_m << __ffs(dd->mult_mask);
418 v |= (dd->last_rounded_n - 1) << __ffs(dd->div1_mask);
419
420
421 if (dd->dco_mask) {
422 _lookup_dco(clk, &dco, dd->last_rounded_m, dd->last_rounded_n);
423 v &= ~(dd->dco_mask);
424 v |= dco << __ffs(dd->dco_mask);
425 }
426 if (dd->sddiv_mask) {
427 _lookup_sddiv(clk, &sd_div, dd->last_rounded_m,
428 dd->last_rounded_n);
429 v &= ~(dd->sddiv_mask);
430 v |= sd_div << __ffs(dd->sddiv_mask);
431 }
432
433
434
435
436
437
438
439
440 errata_i810 = ti_clk_get_features()->flags & TI_CLK_ERRATA_I810;
441
442 if (errata_i810) {
443 ai = omap3_dpll_autoidle_read(clk);
444 if (ai) {
445 omap3_dpll_deny_idle(clk);
446
447
448 omap3_dpll_autoidle_read(clk);
449 }
450 }
451
452 ti_clk_ll_ops->clk_writel(v, &dd->mult_div1_reg);
453
454
455 if (dd->m4xen_mask || dd->lpmode_mask) {
456 v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
457
458 if (dd->m4xen_mask) {
459 if (dd->last_rounded_m4xen)
460 v |= dd->m4xen_mask;
461 else
462 v &= ~dd->m4xen_mask;
463 }
464
465 if (dd->lpmode_mask) {
466 if (dd->last_rounded_lpmode)
467 v |= dd->lpmode_mask;
468 else
469 v &= ~dd->lpmode_mask;
470 }
471
472 ti_clk_ll_ops->clk_writel(v, &dd->control_reg);
473 }
474
475 if (dd->ssc_enable_mask)
476 omap3_noncore_dpll_ssc_program(clk);
477
478
479
480
481
482 _omap3_noncore_dpll_lock(clk);
483
484 if (errata_i810 && ai)
485 omap3_dpll_allow_idle(clk);
486
487 return 0;
488}
489
490
491
492
493
494
495
496
497
498
499unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate)
500{
501 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
502
503 return omap2_get_dpll_rate(clk);
504}
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522int omap3_noncore_dpll_enable(struct clk_hw *hw)
523{
524 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
525 int r;
526 struct dpll_data *dd;
527 struct clk_hw *parent;
528
529 dd = clk->dpll_data;
530 if (!dd)
531 return -EINVAL;
532
533 if (clk->clkdm) {
534 r = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk);
535 if (r) {
536 WARN(1,
537 "%s: could not enable %s's clockdomain %s: %d\n",
538 __func__, clk_hw_get_name(hw),
539 clk->clkdm_name, r);
540 return r;
541 }
542 }
543
544 parent = clk_hw_get_parent(hw);
545
546 if (clk_hw_get_rate(hw) == clk_hw_get_rate(dd->clk_bypass)) {
547 WARN_ON(parent != dd->clk_bypass);
548 r = _omap3_noncore_dpll_bypass(clk);
549 } else {
550 WARN_ON(parent != dd->clk_ref);
551 r = _omap3_noncore_dpll_lock(clk);
552 }
553
554 return r;
555}
556
557
558
559
560
561
562
563
564void omap3_noncore_dpll_disable(struct clk_hw *hw)
565{
566 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
567
568 _omap3_noncore_dpll_stop(clk);
569 if (clk->clkdm)
570 ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk);
571}
572
573
574
575
576
577
578
579
580
581
582
583
584
585int omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
586 struct clk_rate_request *req)
587{
588 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
589 struct dpll_data *dd;
590
591 if (!req->rate)
592 return -EINVAL;
593
594 dd = clk->dpll_data;
595 if (!dd)
596 return -EINVAL;
597
598 if (clk_hw_get_rate(dd->clk_bypass) == req->rate &&
599 (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) {
600 req->best_parent_hw = dd->clk_bypass;
601 } else {
602 req->rate = omap2_dpll_round_rate(hw, req->rate,
603 &req->best_parent_rate);
604 req->best_parent_hw = dd->clk_ref;
605 }
606
607 req->best_parent_rate = req->rate;
608
609 return 0;
610}
611
612
613
614
615
616
617
618
619
620int omap3_noncore_dpll_set_parent(struct clk_hw *hw, u8 index)
621{
622 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
623 int ret;
624
625 if (!hw)
626 return -EINVAL;
627
628 if (index)
629 ret = _omap3_noncore_dpll_bypass(clk);
630 else
631 ret = _omap3_noncore_dpll_lock(clk);
632
633 return ret;
634}
635
636
637
638
639
640
641
642
643
644
645
646
647int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
648 unsigned long parent_rate)
649{
650 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
651 struct dpll_data *dd;
652 u16 freqsel = 0;
653 int ret;
654
655 if (!hw || !rate)
656 return -EINVAL;
657
658 dd = clk->dpll_data;
659 if (!dd)
660 return -EINVAL;
661
662 if (clk_hw_get_parent(hw) != dd->clk_ref)
663 return -EINVAL;
664
665 if (dd->last_rounded_rate == 0)
666 return -EINVAL;
667
668
669 if (ti_clk_get_features()->flags & TI_CLK_DPLL_HAS_FREQSEL) {
670 freqsel = _omap3_dpll_compute_freqsel(clk, dd->last_rounded_n);
671 WARN_ON(!freqsel);
672 }
673
674 pr_debug("%s: %s: set rate: locking rate to %lu.\n", __func__,
675 clk_hw_get_name(hw), rate);
676
677 ret = omap3_noncore_dpll_program(clk, freqsel);
678
679 return ret;
680}
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw,
696 unsigned long rate,
697 unsigned long parent_rate,
698 u8 index)
699{
700 int ret;
701
702 if (!hw || !rate)
703 return -EINVAL;
704
705
706
707
708
709
710 if (index)
711 ret = omap3_noncore_dpll_set_parent(hw, index);
712 else
713 ret = omap3_noncore_dpll_set_rate(hw, rate, parent_rate);
714
715 return ret;
716}
717
718
719
720
721
722
723
724
725
726
727
728static u32 omap3_dpll_autoidle_read(struct clk_hw_omap *clk)
729{
730 const struct dpll_data *dd;
731 u32 v;
732
733 if (!clk || !clk->dpll_data)
734 return -EINVAL;
735
736 dd = clk->dpll_data;
737
738 if (!dd->autoidle_mask)
739 return -EINVAL;
740
741 v = ti_clk_ll_ops->clk_readl(&dd->autoidle_reg);
742 v &= dd->autoidle_mask;
743 v >>= __ffs(dd->autoidle_mask);
744
745 return v;
746}
747
748
749
750
751
752
753
754
755
756
757static void omap3_dpll_allow_idle(struct clk_hw_omap *clk)
758{
759 const struct dpll_data *dd;
760 u32 v;
761
762 if (!clk || !clk->dpll_data)
763 return;
764
765 dd = clk->dpll_data;
766
767 if (!dd->autoidle_mask)
768 return;
769
770
771
772
773
774
775 v = ti_clk_ll_ops->clk_readl(&dd->autoidle_reg);
776 v &= ~dd->autoidle_mask;
777 v |= DPLL_AUTOIDLE_LOW_POWER_STOP << __ffs(dd->autoidle_mask);
778 ti_clk_ll_ops->clk_writel(v, &dd->autoidle_reg);
779}
780
781
782
783
784
785
786
787static void omap3_dpll_deny_idle(struct clk_hw_omap *clk)
788{
789 const struct dpll_data *dd;
790 u32 v;
791
792 if (!clk || !clk->dpll_data)
793 return;
794
795 dd = clk->dpll_data;
796
797 if (!dd->autoidle_mask)
798 return;
799
800 v = ti_clk_ll_ops->clk_readl(&dd->autoidle_reg);
801 v &= ~dd->autoidle_mask;
802 v |= DPLL_AUTOIDLE_DISABLE << __ffs(dd->autoidle_mask);
803 ti_clk_ll_ops->clk_writel(v, &dd->autoidle_reg);
804}
805
806
807
808
809static struct clk_hw_omap *omap3_find_clkoutx2_dpll(struct clk_hw *hw)
810{
811 struct clk_hw_omap *pclk = NULL;
812
813
814 do {
815 do {
816 hw = clk_hw_get_parent(hw);
817 } while (hw && (!omap2_clk_is_hw_omap(hw)));
818 if (!hw)
819 break;
820 pclk = to_clk_hw_omap(hw);
821 } while (pclk && !pclk->dpll_data);
822
823
824 if (!pclk) {
825 WARN_ON(1);
826 return NULL;
827 }
828
829 return pclk;
830}
831
832
833
834
835
836
837
838
839
840unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
841 unsigned long parent_rate)
842{
843 const struct dpll_data *dd;
844 unsigned long rate;
845 u32 v;
846 struct clk_hw_omap *pclk = NULL;
847
848 if (!parent_rate)
849 return 0;
850
851 pclk = omap3_find_clkoutx2_dpll(hw);
852
853 if (!pclk)
854 return 0;
855
856 dd = pclk->dpll_data;
857
858 WARN_ON(!dd->enable_mask);
859
860 v = ti_clk_ll_ops->clk_readl(&dd->control_reg) & dd->enable_mask;
861 v >>= __ffs(dd->enable_mask);
862 if ((v != OMAP3XXX_EN_DPLL_LOCKED) || (dd->flags & DPLL_J_TYPE))
863 rate = parent_rate;
864 else
865 rate = parent_rate * 2;
866 return rate;
867}
868
869
870
871
872
873
874
875
876int omap3_core_dpll_save_context(struct clk_hw *hw)
877{
878 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
879 struct dpll_data *dd;
880 u32 v;
881
882 dd = clk->dpll_data;
883
884 v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
885 clk->context = (v & dd->enable_mask) >> __ffs(dd->enable_mask);
886
887 if (clk->context == DPLL_LOCKED) {
888 v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);
889 dd->last_rounded_m = (v & dd->mult_mask) >>
890 __ffs(dd->mult_mask);
891 dd->last_rounded_n = ((v & dd->div1_mask) >>
892 __ffs(dd->div1_mask)) + 1;
893 }
894
895 return 0;
896}
897
898
899
900
901
902
903
904
905void omap3_core_dpll_restore_context(struct clk_hw *hw)
906{
907 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
908 const struct dpll_data *dd;
909 u32 v;
910
911 dd = clk->dpll_data;
912
913 if (clk->context == DPLL_LOCKED) {
914 _omap3_dpll_write_clken(clk, 0x4);
915 _omap3_wait_dpll_status(clk, 0);
916
917 v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);
918 v &= ~(dd->mult_mask | dd->div1_mask);
919 v |= dd->last_rounded_m << __ffs(dd->mult_mask);
920 v |= (dd->last_rounded_n - 1) << __ffs(dd->div1_mask);
921 ti_clk_ll_ops->clk_writel(v, &dd->mult_div1_reg);
922
923 _omap3_dpll_write_clken(clk, DPLL_LOCKED);
924 _omap3_wait_dpll_status(clk, 1);
925 } else {
926 _omap3_dpll_write_clken(clk, clk->context);
927 }
928}
929
930
931
932
933
934
935
936
937int omap3_noncore_dpll_save_context(struct clk_hw *hw)
938{
939 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
940 struct dpll_data *dd;
941 u32 v;
942
943 dd = clk->dpll_data;
944
945 v = ti_clk_ll_ops->clk_readl(&dd->control_reg);
946 clk->context = (v & dd->enable_mask) >> __ffs(dd->enable_mask);
947
948 if (clk->context == DPLL_LOCKED) {
949 v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);
950 dd->last_rounded_m = (v & dd->mult_mask) >>
951 __ffs(dd->mult_mask);
952 dd->last_rounded_n = ((v & dd->div1_mask) >>
953 __ffs(dd->div1_mask)) + 1;
954 }
955
956 return 0;
957}
958
959
960
961
962
963
964
965
966void omap3_noncore_dpll_restore_context(struct clk_hw *hw)
967{
968 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
969 const struct dpll_data *dd;
970 u32 ctrl, mult_div1;
971
972 dd = clk->dpll_data;
973
974 ctrl = ti_clk_ll_ops->clk_readl(&dd->control_reg);
975 mult_div1 = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg);
976
977 if (clk->context == ((ctrl & dd->enable_mask) >>
978 __ffs(dd->enable_mask)) &&
979 dd->last_rounded_m == ((mult_div1 & dd->mult_mask) >>
980 __ffs(dd->mult_mask)) &&
981 dd->last_rounded_n == ((mult_div1 & dd->div1_mask) >>
982 __ffs(dd->div1_mask)) + 1) {
983
984 return;
985 }
986
987 if (clk->context == DPLL_LOCKED)
988 omap3_noncore_dpll_program(clk, 0);
989 else
990 _omap3_dpll_write_clken(clk, clk->context);
991}
992
993
994const struct clk_hw_omap_ops clkhwops_omap3_dpll = {
995 .allow_idle = omap3_dpll_allow_idle,
996 .deny_idle = omap3_dpll_deny_idle,
997};
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010int omap3_dpll4_set_rate(struct clk_hw *hw, unsigned long rate,
1011 unsigned long parent_rate)
1012{
1013
1014
1015
1016
1017
1018 if (ti_clk_get_features()->flags & TI_CLK_DPLL4_DENY_REPROGRAM) {
1019 pr_err("clock: DPLL4 cannot change rate due to silicon 'Limitation 2.5' on 3430ES1.\n");
1020 return -EINVAL;
1021 }
1022
1023 return omap3_noncore_dpll_set_rate(hw, rate, parent_rate);
1024}
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
1039 unsigned long parent_rate, u8 index)
1040{
1041 if (ti_clk_get_features()->flags & TI_CLK_DPLL4_DENY_REPROGRAM) {
1042 pr_err("clock: DPLL4 cannot change rate due to silicon 'Limitation 2.5' on 3430ES1.\n");
1043 return -EINVAL;
1044 }
1045
1046 return omap3_noncore_dpll_set_rate_and_parent(hw, rate, parent_rate,
1047 index);
1048}
1049
1050
1051static bool omap3_dpll5_apply_errata(struct clk_hw *hw,
1052 unsigned long parent_rate)
1053{
1054 struct omap3_dpll5_settings {
1055 unsigned int rate, m, n;
1056 };
1057
1058 static const struct omap3_dpll5_settings precomputed[] = {
1059
1060
1061
1062
1063
1064
1065 { 12000000, 80, 0 + 1 },
1066 { 13000000, 443, 5 + 1 },
1067 { 19200000, 50, 0 + 1 },
1068 { 26000000, 443, 11 + 1 },
1069 { 38400000, 25, 0 + 1 }
1070 };
1071
1072 const struct omap3_dpll5_settings *d;
1073 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
1074 struct dpll_data *dd;
1075 unsigned int i;
1076
1077 for (i = 0; i < ARRAY_SIZE(precomputed); ++i) {
1078 if (parent_rate == precomputed[i].rate)
1079 break;
1080 }
1081
1082 if (i == ARRAY_SIZE(precomputed))
1083 return false;
1084
1085 d = &precomputed[i];
1086
1087
1088 dd = clk->dpll_data;
1089 dd->last_rounded_m = d->m;
1090 dd->last_rounded_n = d->n;
1091 dd->last_rounded_rate = div_u64((u64)parent_rate * d->m, d->n);
1092 omap3_noncore_dpll_program(clk, 0);
1093
1094 return true;
1095}
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106int omap3_dpll5_set_rate(struct clk_hw *hw, unsigned long rate,
1107 unsigned long parent_rate)
1108{
1109 if (rate == OMAP3_DPLL5_FREQ_FOR_USBHOST * 8) {
1110 if (omap3_dpll5_apply_errata(hw, parent_rate))
1111 return 0;
1112 }
1113
1114 return omap3_noncore_dpll_set_rate(hw, rate, parent_rate);
1115}
1116