1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/kernel.h>
15#include <linux/export.h>
16#include <linux/list.h>
17#include <linux/errno.h>
18#include <linux/err.h>
19#include <linux/io.h>
20#include <linux/clk.h>
21#include <linux/clkdev.h>
22
23#include <asm/mach-types.h>
24
25#include <mach/hardware.h>
26
27#include "soc.h"
28#include "iomap.h"
29#include "clock.h"
30#include "opp.h"
31#include "sram.h"
32
33__u32 arm_idlect1_mask;
34struct clk *api_ck_p, *ck_dpll1_p, *ck_ref_p;
35
36static LIST_HEAD(clocks);
37static DEFINE_MUTEX(clocks_mutex);
38static DEFINE_SPINLOCK(clockfw_lock);
39
40
41
42
43
44unsigned long omap1_uart_recalc(struct clk *clk)
45{
46 unsigned int val = __raw_readl(clk->enable_reg);
47 return val & clk->enable_bit ? 48000000 : 12000000;
48}
49
50unsigned long omap1_sossi_recalc(struct clk *clk)
51{
52 u32 div = omap_readl(MOD_CONF_CTRL_1);
53
54 div = (div >> 17) & 0x7;
55 div++;
56
57 return clk->parent->rate / div;
58}
59
60static void omap1_clk_allow_idle(struct clk *clk)
61{
62 struct arm_idlect1_clk * iclk = (struct arm_idlect1_clk *)clk;
63
64 if (!(clk->flags & CLOCK_IDLE_CONTROL))
65 return;
66
67 if (iclk->no_idle_count > 0 && !(--iclk->no_idle_count))
68 arm_idlect1_mask |= 1 << iclk->idlect_shift;
69}
70
71static void omap1_clk_deny_idle(struct clk *clk)
72{
73 struct arm_idlect1_clk * iclk = (struct arm_idlect1_clk *)clk;
74
75 if (!(clk->flags & CLOCK_IDLE_CONTROL))
76 return;
77
78 if (iclk->no_idle_count++ == 0)
79 arm_idlect1_mask &= ~(1 << iclk->idlect_shift);
80}
81
82static __u16 verify_ckctl_value(__u16 newval)
83{
84
85
86
87
88
89
90
91
92
93
94
95
96
97 __u8 per_exp;
98 __u8 lcd_exp;
99 __u8 arm_exp;
100 __u8 dsp_exp;
101 __u8 tc_exp;
102 __u8 dspmmu_exp;
103
104 per_exp = (newval >> CKCTL_PERDIV_OFFSET) & 3;
105 lcd_exp = (newval >> CKCTL_LCDDIV_OFFSET) & 3;
106 arm_exp = (newval >> CKCTL_ARMDIV_OFFSET) & 3;
107 dsp_exp = (newval >> CKCTL_DSPDIV_OFFSET) & 3;
108 tc_exp = (newval >> CKCTL_TCDIV_OFFSET) & 3;
109 dspmmu_exp = (newval >> CKCTL_DSPMMUDIV_OFFSET) & 3;
110
111 if (dspmmu_exp < dsp_exp)
112 dspmmu_exp = dsp_exp;
113 if (dspmmu_exp > dsp_exp+1)
114 dspmmu_exp = dsp_exp+1;
115 if (tc_exp < arm_exp)
116 tc_exp = arm_exp;
117 if (tc_exp < dspmmu_exp)
118 tc_exp = dspmmu_exp;
119 if (tc_exp > lcd_exp)
120 lcd_exp = tc_exp;
121 if (tc_exp > per_exp)
122 per_exp = tc_exp;
123
124 newval &= 0xf000;
125 newval |= per_exp << CKCTL_PERDIV_OFFSET;
126 newval |= lcd_exp << CKCTL_LCDDIV_OFFSET;
127 newval |= arm_exp << CKCTL_ARMDIV_OFFSET;
128 newval |= dsp_exp << CKCTL_DSPDIV_OFFSET;
129 newval |= tc_exp << CKCTL_TCDIV_OFFSET;
130 newval |= dspmmu_exp << CKCTL_DSPMMUDIV_OFFSET;
131
132 return newval;
133}
134
135static int calc_dsor_exp(struct clk *clk, unsigned long rate)
136{
137
138
139
140
141
142
143
144
145
146
147
148 unsigned long realrate;
149 struct clk * parent;
150 unsigned dsor_exp;
151
152 parent = clk->parent;
153 if (unlikely(parent == NULL))
154 return -EIO;
155
156 realrate = parent->rate;
157 for (dsor_exp=0; dsor_exp<4; dsor_exp++) {
158 if (realrate <= rate)
159 break;
160
161 realrate /= 2;
162 }
163
164 return dsor_exp;
165}
166
167unsigned long omap1_ckctl_recalc(struct clk *clk)
168{
169
170 int dsor = 1 << (3 & (omap_readw(ARM_CKCTL) >> clk->rate_offset));
171
172 return clk->parent->rate / dsor;
173}
174
175unsigned long omap1_ckctl_recalc_dsp_domain(struct clk *clk)
176{
177 int dsor;
178
179
180
181
182
183
184
185
186 omap1_clk_enable(api_ck_p);
187 dsor = 1 << (3 & (__raw_readw(DSP_CKCTL) >> clk->rate_offset));
188 omap1_clk_disable(api_ck_p);
189
190 return clk->parent->rate / dsor;
191}
192
193
194int omap1_select_table_rate(struct clk *clk, unsigned long rate)
195{
196
197 struct mpu_rate * ptr;
198 unsigned long ref_rate;
199
200 ref_rate = ck_ref_p->rate;
201
202 for (ptr = omap1_rate_table; ptr->rate; ptr++) {
203 if (!(ptr->flags & cpu_mask))
204 continue;
205
206 if (ptr->xtal != ref_rate)
207 continue;
208
209
210 if (ptr->rate <= rate)
211 break;
212 }
213
214 if (!ptr->rate)
215 return -EINVAL;
216
217
218
219
220
221 omap_sram_reprogram_clock(ptr->dpllctl_val, ptr->ckctl_val);
222
223
224 ck_dpll1_p->rate = ptr->pll_rate;
225
226 return 0;
227}
228
229int omap1_clk_set_rate_dsp_domain(struct clk *clk, unsigned long rate)
230{
231 int dsor_exp;
232 u16 regval;
233
234 dsor_exp = calc_dsor_exp(clk, rate);
235 if (dsor_exp > 3)
236 dsor_exp = -EINVAL;
237 if (dsor_exp < 0)
238 return dsor_exp;
239
240 regval = __raw_readw(DSP_CKCTL);
241 regval &= ~(3 << clk->rate_offset);
242 regval |= dsor_exp << clk->rate_offset;
243 __raw_writew(regval, DSP_CKCTL);
244 clk->rate = clk->parent->rate / (1 << dsor_exp);
245
246 return 0;
247}
248
249long omap1_clk_round_rate_ckctl_arm(struct clk *clk, unsigned long rate)
250{
251 int dsor_exp = calc_dsor_exp(clk, rate);
252 if (dsor_exp < 0)
253 return dsor_exp;
254 if (dsor_exp > 3)
255 dsor_exp = 3;
256 return clk->parent->rate / (1 << dsor_exp);
257}
258
259int omap1_clk_set_rate_ckctl_arm(struct clk *clk, unsigned long rate)
260{
261 int dsor_exp;
262 u16 regval;
263
264 dsor_exp = calc_dsor_exp(clk, rate);
265 if (dsor_exp > 3)
266 dsor_exp = -EINVAL;
267 if (dsor_exp < 0)
268 return dsor_exp;
269
270 regval = omap_readw(ARM_CKCTL);
271 regval &= ~(3 << clk->rate_offset);
272 regval |= dsor_exp << clk->rate_offset;
273 regval = verify_ckctl_value(regval);
274 omap_writew(regval, ARM_CKCTL);
275 clk->rate = clk->parent->rate / (1 << dsor_exp);
276 return 0;
277}
278
279long omap1_round_to_table_rate(struct clk *clk, unsigned long rate)
280{
281
282 struct mpu_rate * ptr;
283 long highest_rate;
284 unsigned long ref_rate;
285
286 ref_rate = ck_ref_p->rate;
287
288 highest_rate = -EINVAL;
289
290 for (ptr = omap1_rate_table; ptr->rate; ptr++) {
291 if (!(ptr->flags & cpu_mask))
292 continue;
293
294 if (ptr->xtal != ref_rate)
295 continue;
296
297 highest_rate = ptr->rate;
298
299
300 if (ptr->rate <= rate)
301 break;
302 }
303
304 return highest_rate;
305}
306
307static unsigned calc_ext_dsor(unsigned long rate)
308{
309 unsigned dsor;
310
311
312
313
314
315
316
317
318
319
320 for (dsor = 2; dsor < 96; ++dsor) {
321 if ((dsor & 1) && dsor > 8)
322 continue;
323 if (rate >= 96000000 / dsor)
324 break;
325 }
326 return dsor;
327}
328
329
330int omap1_set_uart_rate(struct clk *clk, unsigned long rate)
331{
332 unsigned int val;
333
334 val = __raw_readl(clk->enable_reg);
335 if (rate == 12000000)
336 val &= ~(1 << clk->enable_bit);
337 else if (rate == 48000000)
338 val |= (1 << clk->enable_bit);
339 else
340 return -EINVAL;
341 __raw_writel(val, clk->enable_reg);
342 clk->rate = rate;
343
344 return 0;
345}
346
347
348int omap1_set_ext_clk_rate(struct clk *clk, unsigned long rate)
349{
350 unsigned dsor;
351 __u16 ratio_bits;
352
353 dsor = calc_ext_dsor(rate);
354 clk->rate = 96000000 / dsor;
355 if (dsor > 8)
356 ratio_bits = ((dsor - 8) / 2 + 6) << 2;
357 else
358 ratio_bits = (dsor - 2) << 2;
359
360 ratio_bits |= __raw_readw(clk->enable_reg) & ~0xfd;
361 __raw_writew(ratio_bits, clk->enable_reg);
362
363 return 0;
364}
365
366int omap1_set_sossi_rate(struct clk *clk, unsigned long rate)
367{
368 u32 l;
369 int div;
370 unsigned long p_rate;
371
372 p_rate = clk->parent->rate;
373
374 div = (p_rate + rate - 1) / rate;
375 div--;
376 if (div < 0 || div > 7)
377 return -EINVAL;
378
379 l = omap_readl(MOD_CONF_CTRL_1);
380 l &= ~(7 << 17);
381 l |= div << 17;
382 omap_writel(l, MOD_CONF_CTRL_1);
383
384 clk->rate = p_rate / (div + 1);
385
386 return 0;
387}
388
389long omap1_round_ext_clk_rate(struct clk *clk, unsigned long rate)
390{
391 return 96000000 / calc_ext_dsor(rate);
392}
393
394void omap1_init_ext_clk(struct clk *clk)
395{
396 unsigned dsor;
397 __u16 ratio_bits;
398
399
400 ratio_bits = __raw_readw(clk->enable_reg) & ~1;
401 __raw_writew(ratio_bits, clk->enable_reg);
402
403 ratio_bits = (ratio_bits & 0xfc) >> 2;
404 if (ratio_bits > 6)
405 dsor = (ratio_bits - 6) * 2 + 8;
406 else
407 dsor = ratio_bits + 2;
408
409 clk-> rate = 96000000 / dsor;
410}
411
412int omap1_clk_enable(struct clk *clk)
413{
414 int ret = 0;
415
416 if (clk->usecount++ == 0) {
417 if (clk->parent) {
418 ret = omap1_clk_enable(clk->parent);
419 if (ret)
420 goto err;
421
422 if (clk->flags & CLOCK_NO_IDLE_PARENT)
423 omap1_clk_deny_idle(clk->parent);
424 }
425
426 ret = clk->ops->enable(clk);
427 if (ret) {
428 if (clk->parent)
429 omap1_clk_disable(clk->parent);
430 goto err;
431 }
432 }
433 return ret;
434
435err:
436 clk->usecount--;
437 return ret;
438}
439
440void omap1_clk_disable(struct clk *clk)
441{
442 if (clk->usecount > 0 && !(--clk->usecount)) {
443 clk->ops->disable(clk);
444 if (likely(clk->parent)) {
445 omap1_clk_disable(clk->parent);
446 if (clk->flags & CLOCK_NO_IDLE_PARENT)
447 omap1_clk_allow_idle(clk->parent);
448 }
449 }
450}
451
452static int omap1_clk_enable_generic(struct clk *clk)
453{
454 __u16 regval16;
455 __u32 regval32;
456
457 if (unlikely(clk->enable_reg == NULL)) {
458 printk(KERN_ERR "clock.c: Enable for %s without enable code\n",
459 clk->name);
460 return -EINVAL;
461 }
462
463 if (clk->flags & ENABLE_REG_32BIT) {
464 regval32 = __raw_readl(clk->enable_reg);
465 regval32 |= (1 << clk->enable_bit);
466 __raw_writel(regval32, clk->enable_reg);
467 } else {
468 regval16 = __raw_readw(clk->enable_reg);
469 regval16 |= (1 << clk->enable_bit);
470 __raw_writew(regval16, clk->enable_reg);
471 }
472
473 return 0;
474}
475
476static void omap1_clk_disable_generic(struct clk *clk)
477{
478 __u16 regval16;
479 __u32 regval32;
480
481 if (clk->enable_reg == NULL)
482 return;
483
484 if (clk->flags & ENABLE_REG_32BIT) {
485 regval32 = __raw_readl(clk->enable_reg);
486 regval32 &= ~(1 << clk->enable_bit);
487 __raw_writel(regval32, clk->enable_reg);
488 } else {
489 regval16 = __raw_readw(clk->enable_reg);
490 regval16 &= ~(1 << clk->enable_bit);
491 __raw_writew(regval16, clk->enable_reg);
492 }
493}
494
495const struct clkops clkops_generic = {
496 .enable = omap1_clk_enable_generic,
497 .disable = omap1_clk_disable_generic,
498};
499
500static int omap1_clk_enable_dsp_domain(struct clk *clk)
501{
502 int retval;
503
504 retval = omap1_clk_enable(api_ck_p);
505 if (!retval) {
506 retval = omap1_clk_enable_generic(clk);
507 omap1_clk_disable(api_ck_p);
508 }
509
510 return retval;
511}
512
513static void omap1_clk_disable_dsp_domain(struct clk *clk)
514{
515 if (omap1_clk_enable(api_ck_p) == 0) {
516 omap1_clk_disable_generic(clk);
517 omap1_clk_disable(api_ck_p);
518 }
519}
520
521const struct clkops clkops_dspck = {
522 .enable = omap1_clk_enable_dsp_domain,
523 .disable = omap1_clk_disable_dsp_domain,
524};
525
526
527static int omap1_clk_enable_uart_functional_16xx(struct clk *clk)
528{
529 int ret;
530 struct uart_clk *uclk;
531
532 ret = omap1_clk_enable_generic(clk);
533 if (ret == 0) {
534
535 uclk = (struct uart_clk *)clk;
536 omap_writeb((omap_readb(uclk->sysc_addr) & ~0x10) | 8,
537 uclk->sysc_addr);
538 }
539
540 return ret;
541}
542
543
544static void omap1_clk_disable_uart_functional_16xx(struct clk *clk)
545{
546 struct uart_clk *uclk;
547
548
549 uclk = (struct uart_clk *)clk;
550 omap_writeb((omap_readb(uclk->sysc_addr) & ~0x18), uclk->sysc_addr);
551
552 omap1_clk_disable_generic(clk);
553}
554
555
556const struct clkops clkops_uart_16xx = {
557 .enable = omap1_clk_enable_uart_functional_16xx,
558 .disable = omap1_clk_disable_uart_functional_16xx,
559};
560
561long omap1_clk_round_rate(struct clk *clk, unsigned long rate)
562{
563 if (clk->round_rate != NULL)
564 return clk->round_rate(clk, rate);
565
566 return clk->rate;
567}
568
569int omap1_clk_set_rate(struct clk *clk, unsigned long rate)
570{
571 int ret = -EINVAL;
572
573 if (clk->set_rate)
574 ret = clk->set_rate(clk, rate);
575 return ret;
576}
577
578
579
580
581
582#ifdef CONFIG_OMAP_RESET_CLOCKS
583
584void omap1_clk_disable_unused(struct clk *clk)
585{
586 __u32 regval32;
587
588
589
590 if (clk->enable_reg == DSP_IDLECT2) {
591 pr_info("Skipping reset check for DSP domain clock \"%s\"\n",
592 clk->name);
593 return;
594 }
595
596
597 if (clk->flags & ENABLE_REG_32BIT)
598 regval32 = __raw_readl(clk->enable_reg);
599 else
600 regval32 = __raw_readw(clk->enable_reg);
601
602 if ((regval32 & (1 << clk->enable_bit)) == 0)
603 return;
604
605 printk(KERN_INFO "Disabling unused clock \"%s\"... ", clk->name);
606 clk->ops->disable(clk);
607 printk(" done\n");
608}
609
610#endif
611
612
613int clk_enable(struct clk *clk)
614{
615 unsigned long flags;
616 int ret;
617
618 if (clk == NULL || IS_ERR(clk))
619 return -EINVAL;
620
621 spin_lock_irqsave(&clockfw_lock, flags);
622 ret = omap1_clk_enable(clk);
623 spin_unlock_irqrestore(&clockfw_lock, flags);
624
625 return ret;
626}
627EXPORT_SYMBOL(clk_enable);
628
629void clk_disable(struct clk *clk)
630{
631 unsigned long flags;
632
633 if (clk == NULL || IS_ERR(clk))
634 return;
635
636 spin_lock_irqsave(&clockfw_lock, flags);
637 if (clk->usecount == 0) {
638 pr_err("Trying disable clock %s with 0 usecount\n",
639 clk->name);
640 WARN_ON(1);
641 goto out;
642 }
643
644 omap1_clk_disable(clk);
645
646out:
647 spin_unlock_irqrestore(&clockfw_lock, flags);
648}
649EXPORT_SYMBOL(clk_disable);
650
651unsigned long clk_get_rate(struct clk *clk)
652{
653 unsigned long flags;
654 unsigned long ret;
655
656 if (clk == NULL || IS_ERR(clk))
657 return 0;
658
659 spin_lock_irqsave(&clockfw_lock, flags);
660 ret = clk->rate;
661 spin_unlock_irqrestore(&clockfw_lock, flags);
662
663 return ret;
664}
665EXPORT_SYMBOL(clk_get_rate);
666
667
668
669
670
671long clk_round_rate(struct clk *clk, unsigned long rate)
672{
673 unsigned long flags;
674 long ret;
675
676 if (clk == NULL || IS_ERR(clk))
677 return 0;
678
679 spin_lock_irqsave(&clockfw_lock, flags);
680 ret = omap1_clk_round_rate(clk, rate);
681 spin_unlock_irqrestore(&clockfw_lock, flags);
682
683 return ret;
684}
685EXPORT_SYMBOL(clk_round_rate);
686
687int clk_set_rate(struct clk *clk, unsigned long rate)
688{
689 unsigned long flags;
690 int ret = -EINVAL;
691
692 if (clk == NULL || IS_ERR(clk))
693 return ret;
694
695 spin_lock_irqsave(&clockfw_lock, flags);
696 ret = omap1_clk_set_rate(clk, rate);
697 if (ret == 0)
698 propagate_rate(clk);
699 spin_unlock_irqrestore(&clockfw_lock, flags);
700
701 return ret;
702}
703EXPORT_SYMBOL(clk_set_rate);
704
705int clk_set_parent(struct clk *clk, struct clk *parent)
706{
707 WARN_ONCE(1, "clk_set_parent() not implemented for OMAP1\n");
708
709 return -EINVAL;
710}
711EXPORT_SYMBOL(clk_set_parent);
712
713struct clk *clk_get_parent(struct clk *clk)
714{
715 return clk->parent;
716}
717EXPORT_SYMBOL(clk_get_parent);
718
719
720
721
722
723int __initdata mpurate;
724
725
726
727
728
729static int __init omap_clk_setup(char *str)
730{
731 get_option(&str, &mpurate);
732
733 if (!mpurate)
734 return 1;
735
736 if (mpurate < 1000)
737 mpurate *= 1000000;
738
739 return 1;
740}
741__setup("mpurate=", omap_clk_setup);
742
743
744unsigned long followparent_recalc(struct clk *clk)
745{
746 return clk->parent->rate;
747}
748
749
750
751
752
753unsigned long omap_fixed_divisor_recalc(struct clk *clk)
754{
755 WARN_ON(!clk->fixed_div);
756
757 return clk->parent->rate / clk->fixed_div;
758}
759
760void clk_reparent(struct clk *child, struct clk *parent)
761{
762 list_del_init(&child->sibling);
763 if (parent)
764 list_add(&child->sibling, &parent->children);
765 child->parent = parent;
766
767
768
769}
770
771
772void propagate_rate(struct clk *tclk)
773{
774 struct clk *clkp;
775
776 list_for_each_entry(clkp, &tclk->children, sibling) {
777 if (clkp->recalc)
778 clkp->rate = clkp->recalc(clkp);
779 propagate_rate(clkp);
780 }
781}
782
783static LIST_HEAD(root_clks);
784
785
786
787
788
789
790
791
792void recalculate_root_clocks(void)
793{
794 struct clk *clkp;
795
796 list_for_each_entry(clkp, &root_clks, sibling) {
797 if (clkp->recalc)
798 clkp->rate = clkp->recalc(clkp);
799 propagate_rate(clkp);
800 }
801}
802
803
804
805
806
807
808
809
810void clk_preinit(struct clk *clk)
811{
812 INIT_LIST_HEAD(&clk->children);
813}
814
815int clk_register(struct clk *clk)
816{
817 if (clk == NULL || IS_ERR(clk))
818 return -EINVAL;
819
820
821
822
823 if (clk->node.next || clk->node.prev)
824 return 0;
825
826 mutex_lock(&clocks_mutex);
827 if (clk->parent)
828 list_add(&clk->sibling, &clk->parent->children);
829 else
830 list_add(&clk->sibling, &root_clks);
831
832 list_add(&clk->node, &clocks);
833 if (clk->init)
834 clk->init(clk);
835 mutex_unlock(&clocks_mutex);
836
837 return 0;
838}
839EXPORT_SYMBOL(clk_register);
840
841void clk_unregister(struct clk *clk)
842{
843 if (clk == NULL || IS_ERR(clk))
844 return;
845
846 mutex_lock(&clocks_mutex);
847 list_del(&clk->sibling);
848 list_del(&clk->node);
849 mutex_unlock(&clocks_mutex);
850}
851EXPORT_SYMBOL(clk_unregister);
852
853void clk_enable_init_clocks(void)
854{
855 struct clk *clkp;
856
857 list_for_each_entry(clkp, &clocks, node)
858 if (clkp->flags & ENABLE_ON_INIT)
859 clk_enable(clkp);
860}
861
862
863
864
865
866
867
868
869
870struct clk *omap_clk_get_by_name(const char *name)
871{
872 struct clk *c;
873 struct clk *ret = NULL;
874
875 mutex_lock(&clocks_mutex);
876
877 list_for_each_entry(c, &clocks, node) {
878 if (!strcmp(c->name, name)) {
879 ret = c;
880 break;
881 }
882 }
883
884 mutex_unlock(&clocks_mutex);
885
886 return ret;
887}
888
889int omap_clk_enable_autoidle_all(void)
890{
891 struct clk *c;
892 unsigned long flags;
893
894 spin_lock_irqsave(&clockfw_lock, flags);
895
896 list_for_each_entry(c, &clocks, node)
897 if (c->ops->allow_idle)
898 c->ops->allow_idle(c);
899
900 spin_unlock_irqrestore(&clockfw_lock, flags);
901
902 return 0;
903}
904
905int omap_clk_disable_autoidle_all(void)
906{
907 struct clk *c;
908 unsigned long flags;
909
910 spin_lock_irqsave(&clockfw_lock, flags);
911
912 list_for_each_entry(c, &clocks, node)
913 if (c->ops->deny_idle)
914 c->ops->deny_idle(c);
915
916 spin_unlock_irqrestore(&clockfw_lock, flags);
917
918 return 0;
919}
920
921
922
923
924static int clkll_enable_null(struct clk *clk)
925{
926 return 0;
927}
928
929static void clkll_disable_null(struct clk *clk)
930{
931}
932
933const struct clkops clkops_null = {
934 .enable = clkll_enable_null,
935 .disable = clkll_disable_null,
936};
937
938
939
940
941
942
943struct clk dummy_ck = {
944 .name = "dummy",
945 .ops = &clkops_null,
946};
947
948
949
950
951
952#ifdef CONFIG_OMAP_RESET_CLOCKS
953
954
955
956static int __init clk_disable_unused(void)
957{
958 struct clk *ck;
959 unsigned long flags;
960
961 pr_info("clock: disabling unused clocks to save power\n");
962
963 spin_lock_irqsave(&clockfw_lock, flags);
964 list_for_each_entry(ck, &clocks, node) {
965 if (ck->ops == &clkops_null)
966 continue;
967
968 if (ck->usecount > 0 || !ck->enable_reg)
969 continue;
970
971 omap1_clk_disable_unused(ck);
972 }
973 spin_unlock_irqrestore(&clockfw_lock, flags);
974
975 return 0;
976}
977late_initcall(clk_disable_unused);
978late_initcall(omap_clk_enable_autoidle_all);
979#endif
980
981#if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
982
983
984
985
986#include <linux/debugfs.h>
987#include <linux/seq_file.h>
988
989static struct dentry *clk_debugfs_root;
990
991static int clk_dbg_show_summary(struct seq_file *s, void *unused)
992{
993 struct clk *c;
994 struct clk *pa;
995
996 mutex_lock(&clocks_mutex);
997 seq_printf(s, "%-30s %-30s %-10s %s\n",
998 "clock-name", "parent-name", "rate", "use-count");
999
1000 list_for_each_entry(c, &clocks, node) {
1001 pa = c->parent;
1002 seq_printf(s, "%-30s %-30s %-10lu %d\n",
1003 c->name, pa ? pa->name : "none", c->rate,
1004 c->usecount);
1005 }
1006 mutex_unlock(&clocks_mutex);
1007
1008 return 0;
1009}
1010
1011static int clk_dbg_open(struct inode *inode, struct file *file)
1012{
1013 return single_open(file, clk_dbg_show_summary, inode->i_private);
1014}
1015
1016static const struct file_operations debug_clock_fops = {
1017 .open = clk_dbg_open,
1018 .read = seq_read,
1019 .llseek = seq_lseek,
1020 .release = single_release,
1021};
1022
1023static int clk_debugfs_register_one(struct clk *c)
1024{
1025 int err;
1026 struct dentry *d;
1027 struct clk *pa = c->parent;
1028
1029 d = debugfs_create_dir(c->name, pa ? pa->dent : clk_debugfs_root);
1030 if (!d)
1031 return -ENOMEM;
1032 c->dent = d;
1033
1034 d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount);
1035 if (!d) {
1036 err = -ENOMEM;
1037 goto err_out;
1038 }
1039 d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
1040 if (!d) {
1041 err = -ENOMEM;
1042 goto err_out;
1043 }
1044 d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
1045 if (!d) {
1046 err = -ENOMEM;
1047 goto err_out;
1048 }
1049 return 0;
1050
1051err_out:
1052 debugfs_remove_recursive(c->dent);
1053 return err;
1054}
1055
1056static int clk_debugfs_register(struct clk *c)
1057{
1058 int err;
1059 struct clk *pa = c->parent;
1060
1061 if (pa && !pa->dent) {
1062 err = clk_debugfs_register(pa);
1063 if (err)
1064 return err;
1065 }
1066
1067 if (!c->dent) {
1068 err = clk_debugfs_register_one(c);
1069 if (err)
1070 return err;
1071 }
1072 return 0;
1073}
1074
1075static int __init clk_debugfs_init(void)
1076{
1077 struct clk *c;
1078 struct dentry *d;
1079 int err;
1080
1081 d = debugfs_create_dir("clock", NULL);
1082 if (!d)
1083 return -ENOMEM;
1084 clk_debugfs_root = d;
1085
1086 list_for_each_entry(c, &clocks, node) {
1087 err = clk_debugfs_register(c);
1088 if (err)
1089 goto err_out;
1090 }
1091
1092 d = debugfs_create_file("summary", S_IRUGO,
1093 d, NULL, &debug_clock_fops);
1094 if (!d)
1095 return -ENOMEM;
1096
1097 return 0;
1098err_out:
1099 debugfs_remove_recursive(clk_debugfs_root);
1100 return err;
1101}
1102late_initcall(clk_debugfs_init);
1103
1104#endif
1105