1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/kernel.h>
15#include <linux/export.h>
16#include <linux/list.h>
17#include <linux/errno.h>
18#include <linux/err.h>
19#include <linux/io.h>
20#include <linux/clk.h>
21#include <linux/clkdev.h>
22
23#include <asm/mach-types.h>
24
25#include <mach/hardware.h>
26
27#include "soc.h"
28#include "iomap.h"
29#include "clock.h"
30#include "opp.h"
31#include "sram.h"
32
33__u32 arm_idlect1_mask;
34struct clk *api_ck_p, *ck_dpll1_p, *ck_ref_p;
35
36static LIST_HEAD(clocks);
37static DEFINE_MUTEX(clocks_mutex);
38static DEFINE_SPINLOCK(clockfw_lock);
39
40
41
42
43
44unsigned long omap1_uart_recalc(struct clk *clk)
45{
46 unsigned int val = __raw_readl(clk->enable_reg);
47 return val & clk->enable_bit ? 48000000 : 12000000;
48}
49
50unsigned long omap1_sossi_recalc(struct clk *clk)
51{
52 u32 div = omap_readl(MOD_CONF_CTRL_1);
53
54 div = (div >> 17) & 0x7;
55 div++;
56
57 return clk->parent->rate / div;
58}
59
60static void omap1_clk_allow_idle(struct clk *clk)
61{
62 struct arm_idlect1_clk * iclk = (struct arm_idlect1_clk *)clk;
63
64 if (!(clk->flags & CLOCK_IDLE_CONTROL))
65 return;
66
67 if (iclk->no_idle_count > 0 && !(--iclk->no_idle_count))
68 arm_idlect1_mask |= 1 << iclk->idlect_shift;
69}
70
71static void omap1_clk_deny_idle(struct clk *clk)
72{
73 struct arm_idlect1_clk * iclk = (struct arm_idlect1_clk *)clk;
74
75 if (!(clk->flags & CLOCK_IDLE_CONTROL))
76 return;
77
78 if (iclk->no_idle_count++ == 0)
79 arm_idlect1_mask &= ~(1 << iclk->idlect_shift);
80}
81
82static __u16 verify_ckctl_value(__u16 newval)
83{
84
85
86
87
88
89
90
91
92
93
94
95
96
97 __u8 per_exp;
98 __u8 lcd_exp;
99 __u8 arm_exp;
100 __u8 dsp_exp;
101 __u8 tc_exp;
102 __u8 dspmmu_exp;
103
104 per_exp = (newval >> CKCTL_PERDIV_OFFSET) & 3;
105 lcd_exp = (newval >> CKCTL_LCDDIV_OFFSET) & 3;
106 arm_exp = (newval >> CKCTL_ARMDIV_OFFSET) & 3;
107 dsp_exp = (newval >> CKCTL_DSPDIV_OFFSET) & 3;
108 tc_exp = (newval >> CKCTL_TCDIV_OFFSET) & 3;
109 dspmmu_exp = (newval >> CKCTL_DSPMMUDIV_OFFSET) & 3;
110
111 if (dspmmu_exp < dsp_exp)
112 dspmmu_exp = dsp_exp;
113 if (dspmmu_exp > dsp_exp+1)
114 dspmmu_exp = dsp_exp+1;
115 if (tc_exp < arm_exp)
116 tc_exp = arm_exp;
117 if (tc_exp < dspmmu_exp)
118 tc_exp = dspmmu_exp;
119 if (tc_exp > lcd_exp)
120 lcd_exp = tc_exp;
121 if (tc_exp > per_exp)
122 per_exp = tc_exp;
123
124 newval &= 0xf000;
125 newval |= per_exp << CKCTL_PERDIV_OFFSET;
126 newval |= lcd_exp << CKCTL_LCDDIV_OFFSET;
127 newval |= arm_exp << CKCTL_ARMDIV_OFFSET;
128 newval |= dsp_exp << CKCTL_DSPDIV_OFFSET;
129 newval |= tc_exp << CKCTL_TCDIV_OFFSET;
130 newval |= dspmmu_exp << CKCTL_DSPMMUDIV_OFFSET;
131
132 return newval;
133}
134
135static int calc_dsor_exp(struct clk *clk, unsigned long rate)
136{
137
138
139
140
141
142
143
144
145
146
147
148 unsigned long realrate;
149 struct clk * parent;
150 unsigned dsor_exp;
151
152 parent = clk->parent;
153 if (unlikely(parent == NULL))
154 return -EIO;
155
156 realrate = parent->rate;
157 for (dsor_exp=0; dsor_exp<4; dsor_exp++) {
158 if (realrate <= rate)
159 break;
160
161 realrate /= 2;
162 }
163
164 return dsor_exp;
165}
166
167unsigned long omap1_ckctl_recalc(struct clk *clk)
168{
169
170 int dsor = 1 << (3 & (omap_readw(ARM_CKCTL) >> clk->rate_offset));
171
172 return clk->parent->rate / dsor;
173}
174
175unsigned long omap1_ckctl_recalc_dsp_domain(struct clk *clk)
176{
177 int dsor;
178
179
180
181
182
183
184
185
186 omap1_clk_enable(api_ck_p);
187 dsor = 1 << (3 & (__raw_readw(DSP_CKCTL) >> clk->rate_offset));
188 omap1_clk_disable(api_ck_p);
189
190 return clk->parent->rate / dsor;
191}
192
193
194int omap1_select_table_rate(struct clk *clk, unsigned long rate)
195{
196
197 struct mpu_rate * ptr;
198 unsigned long ref_rate;
199
200 ref_rate = ck_ref_p->rate;
201
202 for (ptr = omap1_rate_table; ptr->rate; ptr++) {
203 if (!(ptr->flags & cpu_mask))
204 continue;
205
206 if (ptr->xtal != ref_rate)
207 continue;
208
209
210 if (ptr->rate <= rate)
211 break;
212 }
213
214 if (!ptr->rate)
215 return -EINVAL;
216
217
218
219
220
221 omap_sram_reprogram_clock(ptr->dpllctl_val, ptr->ckctl_val);
222
223
224 ck_dpll1_p->rate = ptr->pll_rate;
225
226 return 0;
227}
228
229int omap1_clk_set_rate_dsp_domain(struct clk *clk, unsigned long rate)
230{
231 int dsor_exp;
232 u16 regval;
233
234 dsor_exp = calc_dsor_exp(clk, rate);
235 if (dsor_exp > 3)
236 dsor_exp = -EINVAL;
237 if (dsor_exp < 0)
238 return dsor_exp;
239
240 regval = __raw_readw(DSP_CKCTL);
241 regval &= ~(3 << clk->rate_offset);
242 regval |= dsor_exp << clk->rate_offset;
243 __raw_writew(regval, DSP_CKCTL);
244 clk->rate = clk->parent->rate / (1 << dsor_exp);
245
246 return 0;
247}
248
249long omap1_clk_round_rate_ckctl_arm(struct clk *clk, unsigned long rate)
250{
251 int dsor_exp = calc_dsor_exp(clk, rate);
252 if (dsor_exp < 0)
253 return dsor_exp;
254 if (dsor_exp > 3)
255 dsor_exp = 3;
256 return clk->parent->rate / (1 << dsor_exp);
257}
258
259int omap1_clk_set_rate_ckctl_arm(struct clk *clk, unsigned long rate)
260{
261 int dsor_exp;
262 u16 regval;
263
264 dsor_exp = calc_dsor_exp(clk, rate);
265 if (dsor_exp > 3)
266 dsor_exp = -EINVAL;
267 if (dsor_exp < 0)
268 return dsor_exp;
269
270 regval = omap_readw(ARM_CKCTL);
271 regval &= ~(3 << clk->rate_offset);
272 regval |= dsor_exp << clk->rate_offset;
273 regval = verify_ckctl_value(regval);
274 omap_writew(regval, ARM_CKCTL);
275 clk->rate = clk->parent->rate / (1 << dsor_exp);
276 return 0;
277}
278
279long omap1_round_to_table_rate(struct clk *clk, unsigned long rate)
280{
281
282 struct mpu_rate * ptr;
283 long highest_rate;
284 unsigned long ref_rate;
285
286 ref_rate = ck_ref_p->rate;
287
288 highest_rate = -EINVAL;
289
290 for (ptr = omap1_rate_table; ptr->rate; ptr++) {
291 if (!(ptr->flags & cpu_mask))
292 continue;
293
294 if (ptr->xtal != ref_rate)
295 continue;
296
297 highest_rate = ptr->rate;
298
299
300 if (ptr->rate <= rate)
301 break;
302 }
303
304 return highest_rate;
305}
306
307static unsigned calc_ext_dsor(unsigned long rate)
308{
309 unsigned dsor;
310
311
312
313
314
315
316
317
318
319
320 for (dsor = 2; dsor < 96; ++dsor) {
321 if ((dsor & 1) && dsor > 8)
322 continue;
323 if (rate >= 96000000 / dsor)
324 break;
325 }
326 return dsor;
327}
328
329
330int omap1_set_uart_rate(struct clk *clk, unsigned long rate)
331{
332 unsigned int val;
333
334 val = __raw_readl(clk->enable_reg);
335 if (rate == 12000000)
336 val &= ~(1 << clk->enable_bit);
337 else if (rate == 48000000)
338 val |= (1 << clk->enable_bit);
339 else
340 return -EINVAL;
341 __raw_writel(val, clk->enable_reg);
342 clk->rate = rate;
343
344 return 0;
345}
346
347
348int omap1_set_ext_clk_rate(struct clk *clk, unsigned long rate)
349{
350 unsigned dsor;
351 __u16 ratio_bits;
352
353 dsor = calc_ext_dsor(rate);
354 clk->rate = 96000000 / dsor;
355 if (dsor > 8)
356 ratio_bits = ((dsor - 8) / 2 + 6) << 2;
357 else
358 ratio_bits = (dsor - 2) << 2;
359
360 ratio_bits |= __raw_readw(clk->enable_reg) & ~0xfd;
361 __raw_writew(ratio_bits, clk->enable_reg);
362
363 return 0;
364}
365
366int omap1_set_sossi_rate(struct clk *clk, unsigned long rate)
367{
368 u32 l;
369 int div;
370 unsigned long p_rate;
371
372 p_rate = clk->parent->rate;
373
374 div = (p_rate + rate - 1) / rate;
375 div--;
376 if (div < 0 || div > 7)
377 return -EINVAL;
378
379 l = omap_readl(MOD_CONF_CTRL_1);
380 l &= ~(7 << 17);
381 l |= div << 17;
382 omap_writel(l, MOD_CONF_CTRL_1);
383
384 clk->rate = p_rate / (div + 1);
385
386 return 0;
387}
388
389long omap1_round_ext_clk_rate(struct clk *clk, unsigned long rate)
390{
391 return 96000000 / calc_ext_dsor(rate);
392}
393
394void omap1_init_ext_clk(struct clk *clk)
395{
396 unsigned dsor;
397 __u16 ratio_bits;
398
399
400 ratio_bits = __raw_readw(clk->enable_reg) & ~1;
401 __raw_writew(ratio_bits, clk->enable_reg);
402
403 ratio_bits = (ratio_bits & 0xfc) >> 2;
404 if (ratio_bits > 6)
405 dsor = (ratio_bits - 6) * 2 + 8;
406 else
407 dsor = ratio_bits + 2;
408
409 clk-> rate = 96000000 / dsor;
410}
411
412int omap1_clk_enable(struct clk *clk)
413{
414 int ret = 0;
415
416 if (clk->usecount++ == 0) {
417 if (clk->parent) {
418 ret = omap1_clk_enable(clk->parent);
419 if (ret)
420 goto err;
421
422 if (clk->flags & CLOCK_NO_IDLE_PARENT)
423 omap1_clk_deny_idle(clk->parent);
424 }
425
426 ret = clk->ops->enable(clk);
427 if (ret) {
428 if (clk->parent)
429 omap1_clk_disable(clk->parent);
430 goto err;
431 }
432 }
433 return ret;
434
435err:
436 clk->usecount--;
437 return ret;
438}
439
440void omap1_clk_disable(struct clk *clk)
441{
442 if (clk->usecount > 0 && !(--clk->usecount)) {
443 clk->ops->disable(clk);
444 if (likely(clk->parent)) {
445 omap1_clk_disable(clk->parent);
446 if (clk->flags & CLOCK_NO_IDLE_PARENT)
447 omap1_clk_allow_idle(clk->parent);
448 }
449 }
450}
451
452static int omap1_clk_enable_generic(struct clk *clk)
453{
454 __u16 regval16;
455 __u32 regval32;
456
457 if (unlikely(clk->enable_reg == NULL)) {
458 printk(KERN_ERR "clock.c: Enable for %s without enable code\n",
459 clk->name);
460 return -EINVAL;
461 }
462
463 if (clk->flags & ENABLE_REG_32BIT) {
464 regval32 = __raw_readl(clk->enable_reg);
465 regval32 |= (1 << clk->enable_bit);
466 __raw_writel(regval32, clk->enable_reg);
467 } else {
468 regval16 = __raw_readw(clk->enable_reg);
469 regval16 |= (1 << clk->enable_bit);
470 __raw_writew(regval16, clk->enable_reg);
471 }
472
473 return 0;
474}
475
476static void omap1_clk_disable_generic(struct clk *clk)
477{
478 __u16 regval16;
479 __u32 regval32;
480
481 if (clk->enable_reg == NULL)
482 return;
483
484 if (clk->flags & ENABLE_REG_32BIT) {
485 regval32 = __raw_readl(clk->enable_reg);
486 regval32 &= ~(1 << clk->enable_bit);
487 __raw_writel(regval32, clk->enable_reg);
488 } else {
489 regval16 = __raw_readw(clk->enable_reg);
490 regval16 &= ~(1 << clk->enable_bit);
491 __raw_writew(regval16, clk->enable_reg);
492 }
493}
494
495const struct clkops clkops_generic = {
496 .enable = omap1_clk_enable_generic,
497 .disable = omap1_clk_disable_generic,
498};
499
500static int omap1_clk_enable_dsp_domain(struct clk *clk)
501{
502 int retval;
503
504 retval = omap1_clk_enable(api_ck_p);
505 if (!retval) {
506 retval = omap1_clk_enable_generic(clk);
507 omap1_clk_disable(api_ck_p);
508 }
509
510 return retval;
511}
512
513static void omap1_clk_disable_dsp_domain(struct clk *clk)
514{
515 if (omap1_clk_enable(api_ck_p) == 0) {
516 omap1_clk_disable_generic(clk);
517 omap1_clk_disable(api_ck_p);
518 }
519}
520
521const struct clkops clkops_dspck = {
522 .enable = omap1_clk_enable_dsp_domain,
523 .disable = omap1_clk_disable_dsp_domain,
524};
525
526
527static int omap1_clk_enable_uart_functional_16xx(struct clk *clk)
528{
529 int ret;
530 struct uart_clk *uclk;
531
532 ret = omap1_clk_enable_generic(clk);
533 if (ret == 0) {
534
535 uclk = (struct uart_clk *)clk;
536 omap_writeb((omap_readb(uclk->sysc_addr) & ~0x10) | 8,
537 uclk->sysc_addr);
538 }
539
540 return ret;
541}
542
543
544static void omap1_clk_disable_uart_functional_16xx(struct clk *clk)
545{
546 struct uart_clk *uclk;
547
548
549 uclk = (struct uart_clk *)clk;
550 omap_writeb((omap_readb(uclk->sysc_addr) & ~0x18), uclk->sysc_addr);
551
552 omap1_clk_disable_generic(clk);
553}
554
555
556const struct clkops clkops_uart_16xx = {
557 .enable = omap1_clk_enable_uart_functional_16xx,
558 .disable = omap1_clk_disable_uart_functional_16xx,
559};
560
561long omap1_clk_round_rate(struct clk *clk, unsigned long rate)
562{
563 if (clk->round_rate != NULL)
564 return clk->round_rate(clk, rate);
565
566 return clk->rate;
567}
568
569int omap1_clk_set_rate(struct clk *clk, unsigned long rate)
570{
571 int ret = -EINVAL;
572
573 if (clk->set_rate)
574 ret = clk->set_rate(clk, rate);
575 return ret;
576}
577
578
579
580
581
582#ifdef CONFIG_OMAP_RESET_CLOCKS
583
584void omap1_clk_disable_unused(struct clk *clk)
585{
586 __u32 regval32;
587
588
589
590 if (clk->enable_reg == DSP_IDLECT2) {
591 pr_info("Skipping reset check for DSP domain clock \"%s\"\n",
592 clk->name);
593 return;
594 }
595
596
597 if (clk->flags & ENABLE_REG_32BIT)
598 regval32 = __raw_readl(clk->enable_reg);
599 else
600 regval32 = __raw_readw(clk->enable_reg);
601
602 if ((regval32 & (1 << clk->enable_bit)) == 0)
603 return;
604
605 printk(KERN_INFO "Disabling unused clock \"%s\"... ", clk->name);
606 clk->ops->disable(clk);
607 printk(" done\n");
608}
609
610#endif
611
612
613int clk_enable(struct clk *clk)
614{
615 unsigned long flags;
616 int ret;
617
618 if (clk == NULL || IS_ERR(clk))
619 return -EINVAL;
620
621 spin_lock_irqsave(&clockfw_lock, flags);
622 ret = omap1_clk_enable(clk);
623 spin_unlock_irqrestore(&clockfw_lock, flags);
624
625 return ret;
626}
627EXPORT_SYMBOL(clk_enable);
628
629void clk_disable(struct clk *clk)
630{
631 unsigned long flags;
632
633 if (clk == NULL || IS_ERR(clk))
634 return;
635
636 spin_lock_irqsave(&clockfw_lock, flags);
637 if (clk->usecount == 0) {
638 pr_err("Trying disable clock %s with 0 usecount\n",
639 clk->name);
640 WARN_ON(1);
641 goto out;
642 }
643
644 omap1_clk_disable(clk);
645
646out:
647 spin_unlock_irqrestore(&clockfw_lock, flags);
648}
649EXPORT_SYMBOL(clk_disable);
650
651unsigned long clk_get_rate(struct clk *clk)
652{
653 unsigned long flags;
654 unsigned long ret;
655
656 if (clk == NULL || IS_ERR(clk))
657 return 0;
658
659 spin_lock_irqsave(&clockfw_lock, flags);
660 ret = clk->rate;
661 spin_unlock_irqrestore(&clockfw_lock, flags);
662
663 return ret;
664}
665EXPORT_SYMBOL(clk_get_rate);
666
667
668
669
670
671long clk_round_rate(struct clk *clk, unsigned long rate)
672{
673 unsigned long flags;
674 long ret;
675
676 if (clk == NULL || IS_ERR(clk))
677 return 0;
678
679 spin_lock_irqsave(&clockfw_lock, flags);
680 ret = omap1_clk_round_rate(clk, rate);
681 spin_unlock_irqrestore(&clockfw_lock, flags);
682
683 return ret;
684}
685EXPORT_SYMBOL(clk_round_rate);
686
687int clk_set_rate(struct clk *clk, unsigned long rate)
688{
689 unsigned long flags;
690 int ret = -EINVAL;
691
692 if (clk == NULL || IS_ERR(clk))
693 return ret;
694
695 spin_lock_irqsave(&clockfw_lock, flags);
696 ret = omap1_clk_set_rate(clk, rate);
697 if (ret == 0)
698 propagate_rate(clk);
699 spin_unlock_irqrestore(&clockfw_lock, flags);
700
701 return ret;
702}
703EXPORT_SYMBOL(clk_set_rate);
704
705int clk_set_parent(struct clk *clk, struct clk *parent)
706{
707 WARN_ONCE(1, "clk_set_parent() not implemented for OMAP1\n");
708
709 return -EINVAL;
710}
711EXPORT_SYMBOL(clk_set_parent);
712
713struct clk *clk_get_parent(struct clk *clk)
714{
715 return clk->parent;
716}
717EXPORT_SYMBOL(clk_get_parent);
718
719
720
721
722
723
724unsigned long followparent_recalc(struct clk *clk)
725{
726 return clk->parent->rate;
727}
728
729
730
731
732
733unsigned long omap_fixed_divisor_recalc(struct clk *clk)
734{
735 WARN_ON(!clk->fixed_div);
736
737 return clk->parent->rate / clk->fixed_div;
738}
739
740void clk_reparent(struct clk *child, struct clk *parent)
741{
742 list_del_init(&child->sibling);
743 if (parent)
744 list_add(&child->sibling, &parent->children);
745 child->parent = parent;
746
747
748
749}
750
751
752void propagate_rate(struct clk *tclk)
753{
754 struct clk *clkp;
755
756 list_for_each_entry(clkp, &tclk->children, sibling) {
757 if (clkp->recalc)
758 clkp->rate = clkp->recalc(clkp);
759 propagate_rate(clkp);
760 }
761}
762
763static LIST_HEAD(root_clks);
764
765
766
767
768
769
770
771
772void recalculate_root_clocks(void)
773{
774 struct clk *clkp;
775
776 list_for_each_entry(clkp, &root_clks, sibling) {
777 if (clkp->recalc)
778 clkp->rate = clkp->recalc(clkp);
779 propagate_rate(clkp);
780 }
781}
782
783
784
785
786
787
788
789
790void clk_preinit(struct clk *clk)
791{
792 INIT_LIST_HEAD(&clk->children);
793}
794
795int clk_register(struct clk *clk)
796{
797 if (clk == NULL || IS_ERR(clk))
798 return -EINVAL;
799
800
801
802
803 if (clk->node.next || clk->node.prev)
804 return 0;
805
806 mutex_lock(&clocks_mutex);
807 if (clk->parent)
808 list_add(&clk->sibling, &clk->parent->children);
809 else
810 list_add(&clk->sibling, &root_clks);
811
812 list_add(&clk->node, &clocks);
813 if (clk->init)
814 clk->init(clk);
815 mutex_unlock(&clocks_mutex);
816
817 return 0;
818}
819EXPORT_SYMBOL(clk_register);
820
821void clk_unregister(struct clk *clk)
822{
823 if (clk == NULL || IS_ERR(clk))
824 return;
825
826 mutex_lock(&clocks_mutex);
827 list_del(&clk->sibling);
828 list_del(&clk->node);
829 mutex_unlock(&clocks_mutex);
830}
831EXPORT_SYMBOL(clk_unregister);
832
833void clk_enable_init_clocks(void)
834{
835 struct clk *clkp;
836
837 list_for_each_entry(clkp, &clocks, node)
838 if (clkp->flags & ENABLE_ON_INIT)
839 clk_enable(clkp);
840}
841
842
843
844
845
846
847
848
849
850struct clk *omap_clk_get_by_name(const char *name)
851{
852 struct clk *c;
853 struct clk *ret = NULL;
854
855 mutex_lock(&clocks_mutex);
856
857 list_for_each_entry(c, &clocks, node) {
858 if (!strcmp(c->name, name)) {
859 ret = c;
860 break;
861 }
862 }
863
864 mutex_unlock(&clocks_mutex);
865
866 return ret;
867}
868
869int omap_clk_enable_autoidle_all(void)
870{
871 struct clk *c;
872 unsigned long flags;
873
874 spin_lock_irqsave(&clockfw_lock, flags);
875
876 list_for_each_entry(c, &clocks, node)
877 if (c->ops->allow_idle)
878 c->ops->allow_idle(c);
879
880 spin_unlock_irqrestore(&clockfw_lock, flags);
881
882 return 0;
883}
884
885int omap_clk_disable_autoidle_all(void)
886{
887 struct clk *c;
888 unsigned long flags;
889
890 spin_lock_irqsave(&clockfw_lock, flags);
891
892 list_for_each_entry(c, &clocks, node)
893 if (c->ops->deny_idle)
894 c->ops->deny_idle(c);
895
896 spin_unlock_irqrestore(&clockfw_lock, flags);
897
898 return 0;
899}
900
901
902
903
904static int clkll_enable_null(struct clk *clk)
905{
906 return 0;
907}
908
909static void clkll_disable_null(struct clk *clk)
910{
911}
912
913const struct clkops clkops_null = {
914 .enable = clkll_enable_null,
915 .disable = clkll_disable_null,
916};
917
918
919
920
921
922
923struct clk dummy_ck = {
924 .name = "dummy",
925 .ops = &clkops_null,
926};
927
928
929
930
931
932#ifdef CONFIG_OMAP_RESET_CLOCKS
933
934
935
936static int __init clk_disable_unused(void)
937{
938 struct clk *ck;
939 unsigned long flags;
940
941 pr_info("clock: disabling unused clocks to save power\n");
942
943 spin_lock_irqsave(&clockfw_lock, flags);
944 list_for_each_entry(ck, &clocks, node) {
945 if (ck->ops == &clkops_null)
946 continue;
947
948 if (ck->usecount > 0 || !ck->enable_reg)
949 continue;
950
951 omap1_clk_disable_unused(ck);
952 }
953 spin_unlock_irqrestore(&clockfw_lock, flags);
954
955 return 0;
956}
957late_initcall(clk_disable_unused);
958late_initcall(omap_clk_enable_autoidle_all);
959#endif
960
961#if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
962
963
964
965
966#include <linux/debugfs.h>
967#include <linux/seq_file.h>
968
969static struct dentry *clk_debugfs_root;
970
971static int clk_dbg_show_summary(struct seq_file *s, void *unused)
972{
973 struct clk *c;
974 struct clk *pa;
975
976 mutex_lock(&clocks_mutex);
977 seq_printf(s, "%-30s %-30s %-10s %s\n",
978 "clock-name", "parent-name", "rate", "use-count");
979
980 list_for_each_entry(c, &clocks, node) {
981 pa = c->parent;
982 seq_printf(s, "%-30s %-30s %-10lu %d\n",
983 c->name, pa ? pa->name : "none", c->rate,
984 c->usecount);
985 }
986 mutex_unlock(&clocks_mutex);
987
988 return 0;
989}
990
991static int clk_dbg_open(struct inode *inode, struct file *file)
992{
993 return single_open(file, clk_dbg_show_summary, inode->i_private);
994}
995
996static const struct file_operations debug_clock_fops = {
997 .open = clk_dbg_open,
998 .read = seq_read,
999 .llseek = seq_lseek,
1000 .release = single_release,
1001};
1002
1003static int clk_debugfs_register_one(struct clk *c)
1004{
1005 int err;
1006 struct dentry *d;
1007 struct clk *pa = c->parent;
1008
1009 d = debugfs_create_dir(c->name, pa ? pa->dent : clk_debugfs_root);
1010 if (!d)
1011 return -ENOMEM;
1012 c->dent = d;
1013
1014 d = debugfs_create_u8("usecount", S_IRUGO, c->dent, &c->usecount);
1015 if (!d) {
1016 err = -ENOMEM;
1017 goto err_out;
1018 }
1019 d = debugfs_create_ulong("rate", S_IRUGO, c->dent, &c->rate);
1020 if (!d) {
1021 err = -ENOMEM;
1022 goto err_out;
1023 }
1024 d = debugfs_create_x8("flags", S_IRUGO, c->dent, &c->flags);
1025 if (!d) {
1026 err = -ENOMEM;
1027 goto err_out;
1028 }
1029 return 0;
1030
1031err_out:
1032 debugfs_remove_recursive(c->dent);
1033 return err;
1034}
1035
1036static int clk_debugfs_register(struct clk *c)
1037{
1038 int err;
1039 struct clk *pa = c->parent;
1040
1041 if (pa && !pa->dent) {
1042 err = clk_debugfs_register(pa);
1043 if (err)
1044 return err;
1045 }
1046
1047 if (!c->dent) {
1048 err = clk_debugfs_register_one(c);
1049 if (err)
1050 return err;
1051 }
1052 return 0;
1053}
1054
1055static int __init clk_debugfs_init(void)
1056{
1057 struct clk *c;
1058 struct dentry *d;
1059 int err;
1060
1061 d = debugfs_create_dir("clock", NULL);
1062 if (!d)
1063 return -ENOMEM;
1064 clk_debugfs_root = d;
1065
1066 list_for_each_entry(c, &clocks, node) {
1067 err = clk_debugfs_register(c);
1068 if (err)
1069 goto err_out;
1070 }
1071
1072 d = debugfs_create_file("summary", S_IRUGO,
1073 d, NULL, &debug_clock_fops);
1074 if (!d)
1075 return -ENOMEM;
1076
1077 return 0;
1078err_out:
1079 debugfs_remove_recursive(clk_debugfs_root);
1080 return err;
1081}
1082late_initcall(clk_debugfs_init);
1083
1084#endif
1085