1
2
3
4
5
6
7
8
9
10
11
12#include <linux/clk.h>
13#include <linux/clk-provider.h>
14#include <linux/clk/clk-conf.h>
15#include <linux/module.h>
16#include <linux/mutex.h>
17#include <linux/spinlock.h>
18#include <linux/err.h>
19#include <linux/list.h>
20#include <linux/slab.h>
21#include <linux/of.h>
22#include <linux/device.h>
23#include <linux/init.h>
24#include <linux/sched.h>
25#include <linux/clkdev.h>
26
27#include "clk.h"
28
29static DEFINE_SPINLOCK(enable_lock);
30static DEFINE_MUTEX(prepare_lock);
31
32static struct task_struct *prepare_owner;
33static struct task_struct *enable_owner;
34
35static int prepare_refcnt;
36static int enable_refcnt;
37
38static HLIST_HEAD(clk_root_list);
39static HLIST_HEAD(clk_orphan_list);
40static LIST_HEAD(clk_notifier_list);
41
42
43
44struct clk_core {
45 const char *name;
46 const struct clk_ops *ops;
47 struct clk_hw *hw;
48 struct module *owner;
49 struct clk_core *parent;
50 const char **parent_names;
51 struct clk_core **parents;
52 u8 num_parents;
53 u8 new_parent_index;
54 unsigned long rate;
55 unsigned long req_rate;
56 unsigned long new_rate;
57 struct clk_core *new_parent;
58 struct clk_core *new_child;
59 unsigned long flags;
60 bool orphan;
61 unsigned int enable_count;
62 unsigned int prepare_count;
63 unsigned long min_rate;
64 unsigned long max_rate;
65 unsigned long accuracy;
66 int phase;
67 struct hlist_head children;
68 struct hlist_node child_node;
69 struct hlist_head clks;
70 unsigned int notifier_count;
71#ifdef CONFIG_DEBUG_FS
72 struct dentry *dentry;
73 struct hlist_node debug_node;
74#endif
75 struct kref ref;
76};
77
78#define CREATE_TRACE_POINTS
79#include <trace/events/clk.h>
80
81struct clk {
82 struct clk_core *core;
83 const char *dev_id;
84 const char *con_id;
85 unsigned long min_rate;
86 unsigned long max_rate;
87 struct hlist_node clks_node;
88};
89
90
91static void clk_prepare_lock(void)
92{
93 if (!mutex_trylock(&prepare_lock)) {
94 if (prepare_owner == current) {
95 prepare_refcnt++;
96 return;
97 }
98 mutex_lock(&prepare_lock);
99 }
100 WARN_ON_ONCE(prepare_owner != NULL);
101 WARN_ON_ONCE(prepare_refcnt != 0);
102 prepare_owner = current;
103 prepare_refcnt = 1;
104}
105
106static void clk_prepare_unlock(void)
107{
108 WARN_ON_ONCE(prepare_owner != current);
109 WARN_ON_ONCE(prepare_refcnt == 0);
110
111 if (--prepare_refcnt)
112 return;
113 prepare_owner = NULL;
114 mutex_unlock(&prepare_lock);
115}
116
117static unsigned long clk_enable_lock(void)
118 __acquires(enable_lock)
119{
120 unsigned long flags;
121
122 if (!spin_trylock_irqsave(&enable_lock, flags)) {
123 if (enable_owner == current) {
124 enable_refcnt++;
125 __acquire(enable_lock);
126 return flags;
127 }
128 spin_lock_irqsave(&enable_lock, flags);
129 }
130 WARN_ON_ONCE(enable_owner != NULL);
131 WARN_ON_ONCE(enable_refcnt != 0);
132 enable_owner = current;
133 enable_refcnt = 1;
134 return flags;
135}
136
137static void clk_enable_unlock(unsigned long flags)
138 __releases(enable_lock)
139{
140 WARN_ON_ONCE(enable_owner != current);
141 WARN_ON_ONCE(enable_refcnt == 0);
142
143 if (--enable_refcnt) {
144 __release(enable_lock);
145 return;
146 }
147 enable_owner = NULL;
148 spin_unlock_irqrestore(&enable_lock, flags);
149}
150
151static bool clk_core_is_prepared(struct clk_core *core)
152{
153
154
155
156
157 if (!core->ops->is_prepared)
158 return core->prepare_count;
159
160 return core->ops->is_prepared(core->hw);
161}
162
163static bool clk_core_is_enabled(struct clk_core *core)
164{
165
166
167
168
169 if (!core->ops->is_enabled)
170 return core->enable_count;
171
172 return core->ops->is_enabled(core->hw);
173}
174
175
176
177const char *__clk_get_name(const struct clk *clk)
178{
179 return !clk ? NULL : clk->core->name;
180}
181EXPORT_SYMBOL_GPL(__clk_get_name);
182
183const char *clk_hw_get_name(const struct clk_hw *hw)
184{
185 return hw->core->name;
186}
187EXPORT_SYMBOL_GPL(clk_hw_get_name);
188
189struct clk_hw *__clk_get_hw(struct clk *clk)
190{
191 return !clk ? NULL : clk->core->hw;
192}
193EXPORT_SYMBOL_GPL(__clk_get_hw);
194
195unsigned int clk_hw_get_num_parents(const struct clk_hw *hw)
196{
197 return hw->core->num_parents;
198}
199EXPORT_SYMBOL_GPL(clk_hw_get_num_parents);
200
201struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw)
202{
203 return hw->core->parent ? hw->core->parent->hw : NULL;
204}
205EXPORT_SYMBOL_GPL(clk_hw_get_parent);
206
207static unsigned int sibling;
208
209static void clk_show_subtree(struct clk_core *c,
210 int level)
211{
212 struct clk_core *child;
213
214 if (!c)
215 return;
216
217 if (level == 1)
218 sibling++;
219
220 hlist_for_each_entry(child, &c->children, child_node)
221 clk_show_subtree(child, level + 1);
222}
223
224unsigned int clk_get_children(char *name)
225{
226 struct clk_core *core;
227 struct clk *pclk = __clk_lookup(name);
228 sibling = 0;
229
230 core = pclk->core;
231 clk_show_subtree(core, 0);
232 return sibling;
233}
234
235static struct clk_core *__clk_lookup_subtree(const char *name,
236 struct clk_core *core)
237{
238 struct clk_core *child;
239 struct clk_core *ret;
240
241 if (!strcmp(core->name, name))
242 return core;
243
244 hlist_for_each_entry(child, &core->children, child_node) {
245 ret = __clk_lookup_subtree(name, child);
246 if (ret)
247 return ret;
248 }
249
250 return NULL;
251}
252
253static struct clk_core *clk_core_lookup(const char *name)
254{
255 struct clk_core *root_clk;
256 struct clk_core *ret;
257
258 if (!name)
259 return NULL;
260
261
262 hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
263 ret = __clk_lookup_subtree(name, root_clk);
264 if (ret)
265 return ret;
266 }
267
268
269 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
270 ret = __clk_lookup_subtree(name, root_clk);
271 if (ret)
272 return ret;
273 }
274
275 return NULL;
276}
277
278static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
279 u8 index)
280{
281 if (!core || index >= core->num_parents)
282 return NULL;
283
284 if (!core->parents[index])
285 core->parents[index] =
286 clk_core_lookup(core->parent_names[index]);
287
288 return core->parents[index];
289}
290
291struct clk_hw *
292clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index)
293{
294 struct clk_core *parent;
295
296 parent = clk_core_get_parent_by_index(hw->core, index);
297
298 return !parent ? NULL : parent->hw;
299}
300EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index);
301
302unsigned int __clk_get_enable_count(struct clk *clk)
303{
304 return !clk ? 0 : clk->core->enable_count;
305}
306
307static unsigned long clk_core_get_rate_nolock(struct clk_core *core)
308{
309 unsigned long ret;
310
311 if (!core) {
312 ret = 0;
313 goto out;
314 }
315
316 ret = core->rate;
317
318 if (!core->num_parents)
319 goto out;
320
321 if (!core->parent)
322 ret = 0;
323
324out:
325 return ret;
326}
327
328unsigned long clk_hw_get_rate(const struct clk_hw *hw)
329{
330 return clk_core_get_rate_nolock(hw->core);
331}
332EXPORT_SYMBOL_GPL(clk_hw_get_rate);
333
334static unsigned long __clk_get_accuracy(struct clk_core *core)
335{
336 if (!core)
337 return 0;
338
339 return core->accuracy;
340}
341
342unsigned long __clk_get_flags(struct clk *clk)
343{
344 return !clk ? 0 : clk->core->flags;
345}
346EXPORT_SYMBOL_GPL(__clk_get_flags);
347
348unsigned long clk_hw_get_flags(const struct clk_hw *hw)
349{
350 return hw->core->flags;
351}
352EXPORT_SYMBOL_GPL(clk_hw_get_flags);
353
354bool clk_hw_is_prepared(const struct clk_hw *hw)
355{
356 return clk_core_is_prepared(hw->core);
357}
358
359bool clk_hw_is_enabled(const struct clk_hw *hw)
360{
361 return clk_core_is_enabled(hw->core);
362}
363
364bool __clk_is_enabled(struct clk *clk)
365{
366 if (!clk)
367 return false;
368
369 return clk_core_is_enabled(clk->core);
370}
371EXPORT_SYMBOL_GPL(__clk_is_enabled);
372
373static bool mux_is_better_rate(unsigned long rate, unsigned long now,
374 unsigned long best, unsigned long flags)
375{
376 if (flags & CLK_MUX_ROUND_CLOSEST)
377 return abs(now - rate) < abs(best - rate);
378
379 return now <= rate && now > best;
380}
381
382static int
383clk_mux_determine_rate_flags(struct clk_hw *hw, struct clk_rate_request *req,
384 unsigned long flags)
385{
386 struct clk_core *core = hw->core, *parent, *best_parent = NULL;
387 int i, num_parents, ret;
388 unsigned long best = 0;
389 struct clk_rate_request parent_req = *req;
390
391
392 if (core->flags & CLK_SET_RATE_NO_REPARENT) {
393 parent = core->parent;
394 if (core->flags & CLK_SET_RATE_PARENT) {
395 ret = __clk_determine_rate(parent ? parent->hw : NULL,
396 &parent_req);
397 if (ret)
398 return ret;
399
400 best = parent_req.rate;
401 } else if (parent) {
402 best = clk_core_get_rate_nolock(parent);
403 } else {
404 best = clk_core_get_rate_nolock(core);
405 }
406
407 goto out;
408 }
409
410
411 num_parents = core->num_parents;
412 for (i = 0; i < num_parents; i++) {
413 parent = clk_core_get_parent_by_index(core, i);
414 if (!parent)
415 continue;
416
417 if (core->flags & CLK_SET_RATE_PARENT) {
418 parent_req = *req;
419 ret = __clk_determine_rate(parent->hw, &parent_req);
420 if (ret)
421 continue;
422 } else {
423 parent_req.rate = clk_core_get_rate_nolock(parent);
424 }
425
426 if (mux_is_better_rate(req->rate, parent_req.rate,
427 best, flags)) {
428 best_parent = parent;
429 best = parent_req.rate;
430 }
431 }
432
433 if (!best_parent)
434 return -EINVAL;
435
436out:
437 if (best_parent)
438 req->best_parent_hw = best_parent->hw;
439 req->best_parent_rate = best;
440 req->rate = best;
441
442 return 0;
443}
444
445struct clk *__clk_lookup(const char *name)
446{
447 struct clk_core *core = clk_core_lookup(name);
448
449 return !core ? NULL : core->hw->clk;
450}
451
452static void clk_core_get_boundaries(struct clk_core *core,
453 unsigned long *min_rate,
454 unsigned long *max_rate)
455{
456 struct clk *clk_user;
457
458 *min_rate = core->min_rate;
459 *max_rate = core->max_rate;
460
461 hlist_for_each_entry(clk_user, &core->clks, clks_node)
462 *min_rate = max(*min_rate, clk_user->min_rate);
463
464 hlist_for_each_entry(clk_user, &core->clks, clks_node)
465 *max_rate = min(*max_rate, clk_user->max_rate);
466}
467
468void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
469 unsigned long max_rate)
470{
471 hw->core->min_rate = min_rate;
472 hw->core->max_rate = max_rate;
473}
474EXPORT_SYMBOL_GPL(clk_hw_set_rate_range);
475
476
477
478
479
480
481int __clk_mux_determine_rate(struct clk_hw *hw,
482 struct clk_rate_request *req)
483{
484 return clk_mux_determine_rate_flags(hw, req, 0);
485}
486EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
487
488int __clk_mux_determine_rate_closest(struct clk_hw *hw,
489 struct clk_rate_request *req)
490{
491 return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST);
492}
493EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
494
495
496
497static void clk_core_unprepare(struct clk_core *core)
498{
499 lockdep_assert_held(&prepare_lock);
500
501 if (!core)
502 return;
503
504 if (WARN_ON(core->prepare_count == 0))
505 return;
506
507 if (WARN_ON(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL))
508 return;
509
510 if (--core->prepare_count > 0)
511 return;
512
513 WARN_ON(core->enable_count > 0);
514
515 trace_clk_unprepare(core);
516
517 if (core->ops->unprepare)
518 core->ops->unprepare(core->hw);
519
520 trace_clk_unprepare_complete(core);
521 clk_core_unprepare(core->parent);
522}
523
524static void clk_core_unprepare_lock(struct clk_core *core)
525{
526 clk_prepare_lock();
527 clk_core_unprepare(core);
528 clk_prepare_unlock();
529}
530
531
532
533
534
535
536
537
538
539
540
541
542void clk_unprepare(struct clk *clk)
543{
544 if (IS_ERR_OR_NULL(clk))
545 return;
546
547 clk_core_unprepare_lock(clk->core);
548}
549EXPORT_SYMBOL_GPL(clk_unprepare);
550
551static int clk_core_prepare(struct clk_core *core)
552{
553 int ret = 0;
554
555 lockdep_assert_held(&prepare_lock);
556
557 if (!core)
558 return 0;
559
560 if (core->prepare_count == 0) {
561 ret = clk_core_prepare(core->parent);
562 if (ret)
563 return ret;
564
565 trace_clk_prepare(core);
566
567 if (core->ops->prepare)
568 ret = core->ops->prepare(core->hw);
569
570 trace_clk_prepare_complete(core);
571
572 if (ret) {
573 clk_core_unprepare(core->parent);
574 return ret;
575 }
576 }
577
578 core->prepare_count++;
579
580 return 0;
581}
582
583static int clk_core_prepare_lock(struct clk_core *core)
584{
585 int ret;
586
587 clk_prepare_lock();
588 ret = clk_core_prepare(core);
589 clk_prepare_unlock();
590
591 return ret;
592}
593
594
595
596
597
598
599
600
601
602
603
604
605
606int clk_prepare(struct clk *clk)
607{
608 if (!clk)
609 return 0;
610
611 return clk_core_prepare_lock(clk->core);
612}
613EXPORT_SYMBOL_GPL(clk_prepare);
614
615static void clk_core_disable(struct clk_core *core)
616{
617 lockdep_assert_held(&enable_lock);
618
619 if (!core)
620 return;
621
622 if (WARN_ON(core->enable_count == 0))
623 return;
624
625 if (WARN_ON(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL))
626 return;
627
628 if (--core->enable_count > 0)
629 return;
630
631 trace_clk_disable_rcuidle(core);
632
633 if (core->ops->disable)
634 core->ops->disable(core->hw);
635
636 trace_clk_disable_complete_rcuidle(core);
637
638 clk_core_disable(core->parent);
639}
640
641static void clk_core_disable_lock(struct clk_core *core)
642{
643 unsigned long flags;
644
645 flags = clk_enable_lock();
646 clk_core_disable(core);
647 clk_enable_unlock(flags);
648}
649
650
651
652
653
654
655
656
657
658
659
660
661
662void clk_disable(struct clk *clk)
663{
664 if (IS_ERR_OR_NULL(clk))
665 return;
666
667 clk_core_disable_lock(clk->core);
668}
669EXPORT_SYMBOL_GPL(clk_disable);
670
671static int clk_core_enable(struct clk_core *core)
672{
673 int ret = 0;
674
675 lockdep_assert_held(&enable_lock);
676
677 if (!core)
678 return 0;
679
680 if (WARN_ON(core->prepare_count == 0))
681 return -ESHUTDOWN;
682
683 if (core->enable_count == 0) {
684 ret = clk_core_enable(core->parent);
685
686 if (ret)
687 return ret;
688
689 trace_clk_enable_rcuidle(core);
690
691 if (core->ops->enable)
692 ret = core->ops->enable(core->hw);
693
694 trace_clk_enable_complete_rcuidle(core);
695
696 if (ret) {
697 clk_core_disable(core->parent);
698 return ret;
699 }
700 }
701
702 core->enable_count++;
703 return 0;
704}
705
706static int clk_core_enable_lock(struct clk_core *core)
707{
708 unsigned long flags;
709 int ret;
710
711 flags = clk_enable_lock();
712 ret = clk_core_enable(core);
713 clk_enable_unlock(flags);
714
715 return ret;
716}
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731int clk_enable(struct clk *clk)
732{
733 if (!clk)
734 return 0;
735
736 return clk_core_enable_lock(clk->core);
737}
738EXPORT_SYMBOL_GPL(clk_enable);
739
740static int clk_core_prepare_enable(struct clk_core *core)
741{
742 int ret;
743
744 ret = clk_core_prepare_lock(core);
745 if (ret)
746 return ret;
747
748 ret = clk_core_enable_lock(core);
749 if (ret)
750 clk_core_unprepare_lock(core);
751
752 return ret;
753}
754
755static void clk_core_disable_unprepare(struct clk_core *core)
756{
757 clk_core_disable_lock(core);
758 clk_core_unprepare_lock(core);
759}
760
761static void clk_unprepare_unused_subtree(struct clk_core *core)
762{
763 struct clk_core *child;
764
765 lockdep_assert_held(&prepare_lock);
766
767 hlist_for_each_entry(child, &core->children, child_node)
768 clk_unprepare_unused_subtree(child);
769
770 if (core->prepare_count)
771 return;
772
773 if (core->flags & CLK_IGNORE_UNUSED)
774 return;
775
776 if (clk_core_is_prepared(core)) {
777 trace_clk_unprepare(core);
778 if (core->ops->unprepare_unused)
779 core->ops->unprepare_unused(core->hw);
780 else if (core->ops->unprepare)
781 core->ops->unprepare(core->hw);
782 trace_clk_unprepare_complete(core);
783 }
784}
785
786static void clk_disable_unused_subtree(struct clk_core *core)
787{
788 struct clk_core *child;
789 unsigned long flags;
790
791 lockdep_assert_held(&prepare_lock);
792
793 hlist_for_each_entry(child, &core->children, child_node)
794 clk_disable_unused_subtree(child);
795
796 if (core->flags & CLK_OPS_PARENT_ENABLE)
797 clk_core_prepare_enable(core->parent);
798
799 flags = clk_enable_lock();
800
801 if (core->enable_count)
802 goto unlock_out;
803
804 if (core->flags & CLK_IGNORE_UNUSED)
805 goto unlock_out;
806
807
808
809
810
811
812 if (clk_core_is_enabled(core)) {
813 trace_clk_disable(core);
814 if (core->ops->disable_unused)
815 core->ops->disable_unused(core->hw);
816 else if (core->ops->disable)
817 core->ops->disable(core->hw);
818 trace_clk_disable_complete(core);
819 }
820
821unlock_out:
822 clk_enable_unlock(flags);
823 if (core->flags & CLK_OPS_PARENT_ENABLE)
824 clk_core_disable_unprepare(core->parent);
825}
826
827static bool clk_ignore_unused;
828static int __init clk_ignore_unused_setup(char *__unused)
829{
830 clk_ignore_unused = true;
831 return 1;
832}
833__setup("clk_ignore_unused", clk_ignore_unused_setup);
834
835static int clk_disable_unused(void)
836{
837 struct clk_core *core;
838
839 if (clk_ignore_unused) {
840 pr_warn("clk: Not disabling unused clocks\n");
841 return 0;
842 }
843
844 clk_prepare_lock();
845
846 hlist_for_each_entry(core, &clk_root_list, child_node)
847 clk_disable_unused_subtree(core);
848
849 hlist_for_each_entry(core, &clk_orphan_list, child_node)
850 clk_disable_unused_subtree(core);
851
852 hlist_for_each_entry(core, &clk_root_list, child_node)
853 clk_unprepare_unused_subtree(core);
854
855 hlist_for_each_entry(core, &clk_orphan_list, child_node)
856 clk_unprepare_unused_subtree(core);
857
858 clk_prepare_unlock();
859
860 return 0;
861}
862late_initcall_sync(clk_disable_unused);
863
864static int clk_core_round_rate_nolock(struct clk_core *core,
865 struct clk_rate_request *req)
866{
867 struct clk_core *parent;
868 long rate;
869
870 lockdep_assert_held(&prepare_lock);
871
872 if (!core)
873 return 0;
874
875 parent = core->parent;
876 if (parent) {
877 req->best_parent_hw = parent->hw;
878 req->best_parent_rate = parent->rate;
879 } else {
880 req->best_parent_hw = NULL;
881 req->best_parent_rate = 0;
882 }
883
884 if (core->ops->determine_rate) {
885 return core->ops->determine_rate(core->hw, req);
886 } else if (core->ops->round_rate) {
887 rate = core->ops->round_rate(core->hw, req->rate,
888 &req->best_parent_rate);
889 if (rate < 0)
890 return rate;
891
892 req->rate = rate;
893 } else if (core->flags & CLK_SET_RATE_PARENT) {
894 return clk_core_round_rate_nolock(parent, req);
895 } else {
896 req->rate = core->rate;
897 }
898
899 return 0;
900}
901
902
903
904
905
906
907
908
909int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
910{
911 if (!hw) {
912 req->rate = 0;
913 return 0;
914 }
915
916 return clk_core_round_rate_nolock(hw->core, req);
917}
918EXPORT_SYMBOL_GPL(__clk_determine_rate);
919
920unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate)
921{
922 int ret;
923 struct clk_rate_request req;
924
925 clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate);
926 req.rate = rate;
927
928 ret = clk_core_round_rate_nolock(hw->core, &req);
929 if (ret)
930 return 0;
931
932 return req.rate;
933}
934EXPORT_SYMBOL_GPL(clk_hw_round_rate);
935
936
937
938
939
940
941
942
943
944
945long clk_round_rate(struct clk *clk, unsigned long rate)
946{
947 struct clk_rate_request req;
948 int ret;
949
950 if (!clk)
951 return 0;
952
953 clk_prepare_lock();
954
955 clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate);
956 req.rate = rate;
957
958 ret = clk_core_round_rate_nolock(clk->core, &req);
959 clk_prepare_unlock();
960
961 if (ret)
962 return ret;
963
964 return req.rate;
965}
966EXPORT_SYMBOL_GPL(clk_round_rate);
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982static int __clk_notify(struct clk_core *core, unsigned long msg,
983 unsigned long old_rate, unsigned long new_rate)
984{
985 struct clk_notifier *cn;
986 struct clk_notifier_data cnd;
987 int ret = NOTIFY_DONE;
988
989 cnd.old_rate = old_rate;
990 cnd.new_rate = new_rate;
991
992 list_for_each_entry(cn, &clk_notifier_list, node) {
993 if (cn->clk->core == core) {
994 cnd.clk = cn->clk;
995 ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
996 &cnd);
997 if (ret & NOTIFY_STOP_MASK)
998 return ret;
999 }
1000 }
1001
1002 return ret;
1003}
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014static void __clk_recalc_accuracies(struct clk_core *core)
1015{
1016 unsigned long parent_accuracy = 0;
1017 struct clk_core *child;
1018
1019 lockdep_assert_held(&prepare_lock);
1020
1021 if (core->parent)
1022 parent_accuracy = core->parent->accuracy;
1023
1024 if (core->ops->recalc_accuracy)
1025 core->accuracy = core->ops->recalc_accuracy(core->hw,
1026 parent_accuracy);
1027 else
1028 core->accuracy = parent_accuracy;
1029
1030 hlist_for_each_entry(child, &core->children, child_node)
1031 __clk_recalc_accuracies(child);
1032}
1033
1034static long clk_core_get_accuracy(struct clk_core *core)
1035{
1036 unsigned long accuracy;
1037
1038 clk_prepare_lock();
1039 if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE))
1040 __clk_recalc_accuracies(core);
1041
1042 accuracy = __clk_get_accuracy(core);
1043 clk_prepare_unlock();
1044
1045 return accuracy;
1046}
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057long clk_get_accuracy(struct clk *clk)
1058{
1059 if (!clk)
1060 return 0;
1061
1062 return clk_core_get_accuracy(clk->core);
1063}
1064EXPORT_SYMBOL_GPL(clk_get_accuracy);
1065
1066static unsigned long clk_recalc(struct clk_core *core,
1067 unsigned long parent_rate)
1068{
1069 if (core->ops->recalc_rate)
1070 return core->ops->recalc_rate(core->hw, parent_rate);
1071 return parent_rate;
1072}
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
1087{
1088 unsigned long old_rate;
1089 unsigned long parent_rate = 0;
1090 struct clk_core *child;
1091
1092 lockdep_assert_held(&prepare_lock);
1093
1094 old_rate = core->rate;
1095
1096 if (core->parent)
1097 parent_rate = core->parent->rate;
1098
1099 core->rate = clk_recalc(core, parent_rate);
1100
1101
1102
1103
1104
1105 if (core->notifier_count && msg)
1106 __clk_notify(core, msg, old_rate, core->rate);
1107
1108 hlist_for_each_entry(child, &core->children, child_node)
1109 __clk_recalc_rates(child, msg);
1110}
1111
1112static unsigned long clk_core_get_rate(struct clk_core *core)
1113{
1114 unsigned long rate;
1115
1116 clk_prepare_lock();
1117
1118 if (core && (core->flags & CLK_GET_RATE_NOCACHE))
1119 __clk_recalc_rates(core, 0);
1120
1121 rate = clk_core_get_rate_nolock(core);
1122 clk_prepare_unlock();
1123
1124 return rate;
1125}
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135unsigned long clk_get_rate(struct clk *clk)
1136{
1137 if (!clk)
1138 return 0;
1139
1140 return clk_core_get_rate(clk->core);
1141}
1142EXPORT_SYMBOL_GPL(clk_get_rate);
1143
1144static int clk_fetch_parent_index(struct clk_core *core,
1145 struct clk_core *parent)
1146{
1147 int i;
1148
1149 if (!parent)
1150 return -EINVAL;
1151
1152 for (i = 0; i < core->num_parents; i++)
1153 if (clk_core_get_parent_by_index(core, i) == parent)
1154 return i;
1155
1156 return -EINVAL;
1157}
1158
1159
1160
1161
1162static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan)
1163{
1164 struct clk_core *child;
1165
1166 core->orphan = is_orphan;
1167
1168 hlist_for_each_entry(child, &core->children, child_node)
1169 clk_core_update_orphan_status(child, is_orphan);
1170}
1171
1172static void clk_reparent(struct clk_core *core, struct clk_core *new_parent)
1173{
1174 bool was_orphan = core->orphan;
1175
1176 hlist_del(&core->child_node);
1177
1178 if (new_parent) {
1179 bool becomes_orphan = new_parent->orphan;
1180
1181
1182 if (new_parent->new_child == core)
1183 new_parent->new_child = NULL;
1184
1185 hlist_add_head(&core->child_node, &new_parent->children);
1186
1187 if (was_orphan != becomes_orphan)
1188 clk_core_update_orphan_status(core, becomes_orphan);
1189 } else {
1190 hlist_add_head(&core->child_node, &clk_orphan_list);
1191 if (!was_orphan)
1192 clk_core_update_orphan_status(core, true);
1193 }
1194
1195 core->parent = new_parent;
1196}
1197
1198static struct clk_core *__clk_set_parent_before(struct clk_core *core,
1199 struct clk_core *parent)
1200{
1201 unsigned long flags;
1202 struct clk_core *old_parent = core->parent;
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225 if (core->flags & CLK_OPS_PARENT_ENABLE) {
1226 clk_core_prepare_enable(old_parent);
1227 clk_core_prepare_enable(parent);
1228 }
1229
1230
1231 if (core->prepare_count) {
1232 clk_core_prepare_enable(parent);
1233 clk_core_enable_lock(core);
1234 }
1235
1236
1237 flags = clk_enable_lock();
1238 clk_reparent(core, parent);
1239 clk_enable_unlock(flags);
1240
1241 return old_parent;
1242}
1243
1244static void __clk_set_parent_after(struct clk_core *core,
1245 struct clk_core *parent,
1246 struct clk_core *old_parent)
1247{
1248
1249
1250
1251
1252 if (core->prepare_count) {
1253 clk_core_disable_lock(core);
1254 clk_core_disable_unprepare(old_parent);
1255 }
1256
1257
1258 if (core->flags & CLK_OPS_PARENT_ENABLE) {
1259 clk_core_disable_unprepare(parent);
1260 clk_core_disable_unprepare(old_parent);
1261 }
1262}
1263
1264static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
1265 u8 p_index)
1266{
1267 unsigned long flags;
1268 int ret = 0;
1269 struct clk_core *old_parent;
1270
1271 old_parent = __clk_set_parent_before(core, parent);
1272
1273 trace_clk_set_parent(core, parent);
1274
1275
1276 if (parent && core->ops->set_parent)
1277 ret = core->ops->set_parent(core->hw, p_index);
1278
1279 trace_clk_set_parent_complete(core, parent);
1280
1281 if (ret) {
1282 flags = clk_enable_lock();
1283 clk_reparent(core, old_parent);
1284 clk_enable_unlock(flags);
1285 __clk_set_parent_after(core, old_parent, parent);
1286
1287 return ret;
1288 }
1289
1290 __clk_set_parent_after(core, parent, old_parent);
1291
1292 return 0;
1293}
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309static int __clk_speculate_rates(struct clk_core *core,
1310 unsigned long parent_rate)
1311{
1312 struct clk_core *child;
1313 unsigned long new_rate;
1314 int ret = NOTIFY_DONE;
1315
1316 lockdep_assert_held(&prepare_lock);
1317
1318 new_rate = clk_recalc(core, parent_rate);
1319
1320
1321 if (core->notifier_count)
1322 ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate);
1323
1324 if (ret & NOTIFY_STOP_MASK) {
1325 pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
1326 __func__, core->name, ret);
1327 goto out;
1328 }
1329
1330 hlist_for_each_entry(child, &core->children, child_node) {
1331 ret = __clk_speculate_rates(child, new_rate);
1332 if (ret & NOTIFY_STOP_MASK)
1333 break;
1334 }
1335
1336out:
1337 return ret;
1338}
1339
1340static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
1341 struct clk_core *new_parent, u8 p_index)
1342{
1343 struct clk_core *child;
1344
1345 core->new_rate = new_rate;
1346 core->new_parent = new_parent;
1347 core->new_parent_index = p_index;
1348
1349 core->new_child = NULL;
1350 if (new_parent && new_parent != core->parent)
1351 new_parent->new_child = core;
1352
1353 hlist_for_each_entry(child, &core->children, child_node) {
1354 child->new_rate = clk_recalc(child, new_rate);
1355 clk_calc_subtree(child, child->new_rate, NULL, 0);
1356 }
1357}
1358
1359
1360
1361
1362
1363static struct clk_core *clk_calc_new_rates(struct clk_core *core,
1364 unsigned long rate)
1365{
1366 struct clk_core *top = core;
1367 struct clk_core *old_parent, *parent;
1368 unsigned long best_parent_rate = 0;
1369 unsigned long new_rate;
1370 unsigned long min_rate;
1371 unsigned long max_rate;
1372 int p_index = 0;
1373 long ret;
1374
1375
1376 if (IS_ERR_OR_NULL(core))
1377 return NULL;
1378
1379
1380 parent = old_parent = core->parent;
1381 if (parent)
1382 best_parent_rate = parent->rate;
1383
1384 clk_core_get_boundaries(core, &min_rate, &max_rate);
1385
1386
1387 if (core->ops->determine_rate) {
1388 struct clk_rate_request req;
1389
1390 req.rate = rate;
1391 req.min_rate = min_rate;
1392 req.max_rate = max_rate;
1393 if (parent) {
1394 req.best_parent_hw = parent->hw;
1395 req.best_parent_rate = parent->rate;
1396 } else {
1397 req.best_parent_hw = NULL;
1398 req.best_parent_rate = 0;
1399 }
1400
1401 ret = core->ops->determine_rate(core->hw, &req);
1402 if (ret < 0)
1403 return NULL;
1404
1405 best_parent_rate = req.best_parent_rate;
1406 new_rate = req.rate;
1407 parent = req.best_parent_hw ? req.best_parent_hw->core : NULL;
1408 } else if (core->ops->round_rate) {
1409 ret = core->ops->round_rate(core->hw, rate,
1410 &best_parent_rate);
1411 if (ret < 0)
1412 return NULL;
1413
1414 new_rate = ret;
1415 if (new_rate < min_rate || new_rate > max_rate)
1416 return NULL;
1417 } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) {
1418
1419 core->new_rate = core->rate;
1420 return NULL;
1421 } else {
1422
1423 top = clk_calc_new_rates(parent, rate);
1424 new_rate = parent->new_rate;
1425 goto out;
1426 }
1427
1428
1429 if (parent != old_parent &&
1430 (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
1431 pr_debug("%s: %s not gated but wants to reparent\n",
1432 __func__, core->name);
1433 return NULL;
1434 }
1435
1436
1437 if (parent && core->num_parents > 1) {
1438 p_index = clk_fetch_parent_index(core, parent);
1439 if (p_index < 0) {
1440 pr_debug("%s: clk %s can not be parent of clk %s\n",
1441 __func__, parent->name, core->name);
1442 return NULL;
1443 }
1444 }
1445
1446 if ((core->flags & CLK_SET_RATE_PARENT) && parent &&
1447 best_parent_rate != parent->rate)
1448 top = clk_calc_new_rates(parent, best_parent_rate);
1449
1450out:
1451 clk_calc_subtree(core, new_rate, parent, p_index);
1452
1453 return top;
1454}
1455
1456
1457
1458
1459
1460
1461static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
1462 unsigned long event)
1463{
1464 struct clk_core *child, *tmp_clk, *fail_clk = NULL;
1465 int ret = NOTIFY_DONE;
1466
1467 if (core->rate == core->new_rate)
1468 return NULL;
1469
1470 if (core->notifier_count) {
1471 ret = __clk_notify(core, event, core->rate, core->new_rate);
1472 if (ret & NOTIFY_STOP_MASK)
1473 fail_clk = core;
1474 }
1475
1476 hlist_for_each_entry(child, &core->children, child_node) {
1477
1478 if (child->new_parent && child->new_parent != core)
1479 continue;
1480 tmp_clk = clk_propagate_rate_change(child, event);
1481 if (tmp_clk)
1482 fail_clk = tmp_clk;
1483 }
1484
1485
1486 if (core->new_child) {
1487 tmp_clk = clk_propagate_rate_change(core->new_child, event);
1488 if (tmp_clk)
1489 fail_clk = tmp_clk;
1490 }
1491
1492 return fail_clk;
1493}
1494
1495
1496
1497
1498
1499static void clk_change_rate(struct clk_core *core)
1500{
1501 struct clk_core *child;
1502 struct hlist_node *tmp;
1503 unsigned long old_rate;
1504 unsigned long best_parent_rate = 0;
1505 bool skip_set_rate = false;
1506 struct clk_core *old_parent;
1507 struct clk_core *parent = NULL;
1508
1509 old_rate = core->rate;
1510
1511 if (core->new_parent) {
1512 parent = core->new_parent;
1513 best_parent_rate = core->new_parent->rate;
1514 } else if (core->parent) {
1515 parent = core->parent;
1516 best_parent_rate = core->parent->rate;
1517 }
1518
1519 if (core->flags & CLK_SET_RATE_UNGATE) {
1520 unsigned long flags;
1521
1522 clk_core_prepare(core);
1523 flags = clk_enable_lock();
1524 clk_core_enable(core);
1525 clk_enable_unlock(flags);
1526 }
1527
1528 if (core->new_parent && core->new_parent != core->parent) {
1529 old_parent = __clk_set_parent_before(core, core->new_parent);
1530 trace_clk_set_parent(core, core->new_parent);
1531
1532 if (core->ops->set_rate_and_parent) {
1533 skip_set_rate = true;
1534 core->ops->set_rate_and_parent(core->hw, core->new_rate,
1535 best_parent_rate,
1536 core->new_parent_index);
1537 } else if (core->ops->set_parent) {
1538 core->ops->set_parent(core->hw, core->new_parent_index);
1539 }
1540
1541 trace_clk_set_parent_complete(core, core->new_parent);
1542 __clk_set_parent_after(core, core->new_parent, old_parent);
1543 }
1544
1545 if (core->flags & CLK_OPS_PARENT_ENABLE)
1546 clk_core_prepare_enable(parent);
1547
1548 trace_clk_set_rate(core, core->new_rate);
1549
1550 if (!skip_set_rate && core->ops->set_rate)
1551 core->ops->set_rate(core->hw, core->new_rate, best_parent_rate);
1552
1553 trace_clk_set_rate_complete(core, core->new_rate);
1554
1555 core->rate = clk_recalc(core, best_parent_rate);
1556
1557 if (core->flags & CLK_SET_RATE_UNGATE) {
1558 unsigned long flags;
1559
1560 flags = clk_enable_lock();
1561 clk_core_disable(core);
1562 clk_enable_unlock(flags);
1563 clk_core_unprepare(core);
1564 }
1565
1566 if (core->flags & CLK_OPS_PARENT_ENABLE)
1567 clk_core_disable_unprepare(parent);
1568
1569 if (core->notifier_count && old_rate != core->rate)
1570 __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate);
1571
1572 if (core->flags & CLK_RECALC_NEW_RATES)
1573 (void)clk_calc_new_rates(core, core->new_rate);
1574
1575
1576
1577
1578
1579 hlist_for_each_entry_safe(child, tmp, &core->children, child_node) {
1580
1581 if (child->new_parent && child->new_parent != core)
1582 continue;
1583 clk_change_rate(child);
1584 }
1585
1586
1587 if (core->new_child)
1588 clk_change_rate(core->new_child);
1589}
1590
1591static int clk_core_set_rate_nolock(struct clk_core *core,
1592 unsigned long req_rate)
1593{
1594 struct clk_core *top, *fail_clk;
1595 unsigned long rate = req_rate;
1596
1597 if (!core)
1598 return 0;
1599
1600
1601 if (rate == clk_core_get_rate_nolock(core))
1602 return 0;
1603
1604 if ((core->flags & CLK_SET_RATE_GATE) && core->prepare_count)
1605 return -EBUSY;
1606
1607
1608 top = clk_calc_new_rates(core, rate);
1609 if (!top)
1610 return -EINVAL;
1611
1612
1613 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
1614 if (fail_clk) {
1615 pr_debug("%s: failed to set %s rate\n", __func__,
1616 fail_clk->name);
1617 clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
1618 return -EBUSY;
1619 }
1620
1621
1622 clk_change_rate(top);
1623
1624 core->req_rate = req_rate;
1625
1626 return 0;
1627}
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650int clk_set_rate(struct clk *clk, unsigned long rate)
1651{
1652 int ret;
1653
1654 if (!clk)
1655 return 0;
1656
1657
1658 clk_prepare_lock();
1659
1660 ret = clk_core_set_rate_nolock(clk->core, rate);
1661
1662 clk_prepare_unlock();
1663
1664 return ret;
1665}
1666EXPORT_SYMBOL_GPL(clk_set_rate);
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
1677{
1678 int ret = 0;
1679
1680 if (!clk)
1681 return 0;
1682
1683 if (min > max) {
1684 pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
1685 __func__, clk->core->name, clk->dev_id, clk->con_id,
1686 min, max);
1687 return -EINVAL;
1688 }
1689
1690 clk_prepare_lock();
1691
1692 if (min != clk->min_rate || max != clk->max_rate) {
1693 clk->min_rate = min;
1694 clk->max_rate = max;
1695 ret = clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
1696 }
1697
1698 clk_prepare_unlock();
1699
1700 return ret;
1701}
1702EXPORT_SYMBOL_GPL(clk_set_rate_range);
1703
1704
1705
1706
1707
1708
1709
1710
1711int clk_set_min_rate(struct clk *clk, unsigned long rate)
1712{
1713 if (!clk)
1714 return 0;
1715
1716 return clk_set_rate_range(clk, rate, clk->max_rate);
1717}
1718EXPORT_SYMBOL_GPL(clk_set_min_rate);
1719
1720
1721
1722
1723
1724
1725
1726
1727int clk_set_max_rate(struct clk *clk, unsigned long rate)
1728{
1729 if (!clk)
1730 return 0;
1731
1732 return clk_set_rate_range(clk, clk->min_rate, rate);
1733}
1734EXPORT_SYMBOL_GPL(clk_set_max_rate);
1735
1736
1737
1738
1739
1740
1741
1742struct clk *clk_get_parent(struct clk *clk)
1743{
1744 struct clk *parent;
1745
1746 if (!clk)
1747 return NULL;
1748
1749 clk_prepare_lock();
1750
1751 parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk;
1752 clk_prepare_unlock();
1753
1754 return parent;
1755}
1756EXPORT_SYMBOL_GPL(clk_get_parent);
1757
1758static struct clk_core *__clk_init_parent(struct clk_core *core)
1759{
1760 u8 index = 0;
1761
1762 if (core->num_parents > 1 && core->ops->get_parent)
1763 index = core->ops->get_parent(core->hw);
1764
1765 return clk_core_get_parent_by_index(core, index);
1766}
1767
1768static void clk_core_reparent(struct clk_core *core,
1769 struct clk_core *new_parent)
1770{
1771 clk_reparent(core, new_parent);
1772 __clk_recalc_accuracies(core);
1773 __clk_recalc_rates(core, POST_RATE_CHANGE);
1774}
1775
1776void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent)
1777{
1778 if (!hw)
1779 return;
1780
1781 clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core);
1782}
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794bool clk_has_parent(struct clk *clk, struct clk *parent)
1795{
1796 struct clk_core *core, *parent_core;
1797 unsigned int i;
1798
1799
1800 if (!clk || !parent)
1801 return true;
1802
1803 core = clk->core;
1804 parent_core = parent->core;
1805
1806
1807 if (core->parent == parent_core)
1808 return true;
1809
1810 for (i = 0; i < core->num_parents; i++)
1811 if (strcmp(core->parent_names[i], parent_core->name) == 0)
1812 return true;
1813
1814 return false;
1815}
1816EXPORT_SYMBOL_GPL(clk_has_parent);
1817
1818static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent)
1819{
1820 int ret = 0;
1821 int p_index = 0;
1822 unsigned long p_rate = 0;
1823
1824 if (!core)
1825 return 0;
1826
1827
1828 clk_prepare_lock();
1829
1830 if (core->parent == parent)
1831 goto out;
1832
1833
1834 if ((core->num_parents > 1) && (!core->ops->set_parent)) {
1835 ret = -ENOSYS;
1836 goto out;
1837 }
1838
1839
1840 if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
1841 ret = -EBUSY;
1842 goto out;
1843 }
1844
1845
1846 if (parent) {
1847 p_index = clk_fetch_parent_index(core, parent);
1848 if (p_index < 0) {
1849 pr_debug("%s: clk %s can not be parent of clk %s\n",
1850 __func__, parent->name, core->name);
1851 ret = p_index;
1852 goto out;
1853 }
1854 p_rate = parent->rate;
1855 }
1856
1857
1858 ret = __clk_speculate_rates(core, p_rate);
1859
1860
1861 if (ret & NOTIFY_STOP_MASK)
1862 goto out;
1863
1864
1865 ret = __clk_set_parent(core, parent, p_index);
1866
1867
1868 if (ret) {
1869 __clk_recalc_rates(core, ABORT_RATE_CHANGE);
1870 } else {
1871 __clk_recalc_rates(core, POST_RATE_CHANGE);
1872 __clk_recalc_accuracies(core);
1873 }
1874
1875out:
1876 clk_prepare_unlock();
1877
1878 return ret;
1879}
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898int clk_set_parent(struct clk *clk, struct clk *parent)
1899{
1900 if (!clk)
1901 return 0;
1902
1903 return clk_core_set_parent(clk->core, parent ? parent->core : NULL);
1904}
1905EXPORT_SYMBOL_GPL(clk_set_parent);
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927int clk_set_phase(struct clk *clk, int degrees)
1928{
1929 int ret = -EINVAL;
1930
1931 if (!clk)
1932 return 0;
1933
1934
1935 degrees %= 360;
1936 if (degrees < 0)
1937 degrees += 360;
1938
1939 clk_prepare_lock();
1940
1941 trace_clk_set_phase(clk->core, degrees);
1942
1943 if (clk->core->ops->set_phase)
1944 ret = clk->core->ops->set_phase(clk->core->hw, degrees);
1945
1946 trace_clk_set_phase_complete(clk->core, degrees);
1947
1948 if (!ret)
1949 clk->core->phase = degrees;
1950
1951 clk_prepare_unlock();
1952
1953 return ret;
1954}
1955EXPORT_SYMBOL_GPL(clk_set_phase);
1956
1957static int clk_core_get_phase(struct clk_core *core)
1958{
1959 int ret;
1960
1961 clk_prepare_lock();
1962 ret = core->phase;
1963 clk_prepare_unlock();
1964
1965 return ret;
1966}
1967
1968
1969
1970
1971
1972
1973
1974
1975int clk_get_phase(struct clk *clk)
1976{
1977 if (!clk)
1978 return 0;
1979
1980 return clk_core_get_phase(clk->core);
1981}
1982EXPORT_SYMBOL_GPL(clk_get_phase);
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995bool clk_is_match(const struct clk *p, const struct clk *q)
1996{
1997
1998 if (p == q)
1999 return true;
2000
2001
2002 if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q))
2003 if (p->core == q->core)
2004 return true;
2005
2006 return false;
2007}
2008EXPORT_SYMBOL_GPL(clk_is_match);
2009
2010
2011
2012#ifdef CONFIG_DEBUG_FS
2013#include <linux/debugfs.h>
2014
2015static struct dentry *rootdir;
2016static int inited = 0;
2017static DEFINE_MUTEX(clk_debug_lock);
2018static HLIST_HEAD(clk_debug_list);
2019
2020static struct hlist_head *all_lists[] = {
2021 &clk_root_list,
2022 &clk_orphan_list,
2023 NULL,
2024};
2025
2026static struct hlist_head *orphan_list[] = {
2027 &clk_orphan_list,
2028 NULL,
2029};
2030
2031static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
2032 int level)
2033{
2034 if (!c)
2035 return;
2036
2037 seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n",
2038 level * 3 + 1, "",
2039 30 - level * 3, c->name,
2040 c->enable_count, c->prepare_count, clk_core_get_rate(c),
2041 clk_core_get_accuracy(c), clk_core_get_phase(c));
2042}
2043
2044static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
2045 int level)
2046{
2047 struct clk_core *child;
2048
2049 if (!c)
2050 return;
2051
2052 clk_summary_show_one(s, c, level);
2053
2054 hlist_for_each_entry(child, &c->children, child_node)
2055 clk_summary_show_subtree(s, child, level + 1);
2056}
2057
2058static int clk_summary_show(struct seq_file *s, void *data)
2059{
2060 struct clk_core *c;
2061 struct hlist_head **lists = (struct hlist_head **)s->private;
2062
2063 seq_puts(s, " clock enable_cnt prepare_cnt rate accuracy phase\n");
2064 seq_puts(s, "----------------------------------------------------------------------------------------\n");
2065
2066 clk_prepare_lock();
2067
2068 for (; *lists; lists++)
2069 hlist_for_each_entry(c, *lists, child_node)
2070 clk_summary_show_subtree(s, c, 0);
2071
2072 clk_prepare_unlock();
2073
2074 return 0;
2075}
2076
2077
2078static int clk_summary_open(struct inode *inode, struct file *file)
2079{
2080 return single_open(file, clk_summary_show, inode->i_private);
2081}
2082
2083static const struct file_operations clk_summary_fops = {
2084 .open = clk_summary_open,
2085 .read = seq_read,
2086 .llseek = seq_lseek,
2087 .release = single_release,
2088};
2089
2090static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
2091{
2092 if (!c)
2093 return;
2094
2095
2096 seq_printf(s, "\"%s\": { ", c->name);
2097 seq_printf(s, "\"enable_count\": %d,", c->enable_count);
2098 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
2099 seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
2100 seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
2101 seq_printf(s, "\"phase\": %d", clk_core_get_phase(c));
2102}
2103
2104static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
2105{
2106 struct clk_core *child;
2107
2108 if (!c)
2109 return;
2110
2111 clk_dump_one(s, c, level);
2112
2113 hlist_for_each_entry(child, &c->children, child_node) {
2114 seq_putc(s, ',');
2115 clk_dump_subtree(s, child, level + 1);
2116 }
2117
2118 seq_putc(s, '}');
2119}
2120
2121static int clk_dump(struct seq_file *s, void *data)
2122{
2123 struct clk_core *c;
2124 bool first_node = true;
2125 struct hlist_head **lists = (struct hlist_head **)s->private;
2126
2127 seq_putc(s, '{');
2128 clk_prepare_lock();
2129
2130 for (; *lists; lists++) {
2131 hlist_for_each_entry(c, *lists, child_node) {
2132 if (!first_node)
2133 seq_putc(s, ',');
2134 first_node = false;
2135 clk_dump_subtree(s, c, 0);
2136 }
2137 }
2138
2139 clk_prepare_unlock();
2140
2141 seq_puts(s, "}\n");
2142 return 0;
2143}
2144
2145
2146static int clk_dump_open(struct inode *inode, struct file *file)
2147{
2148 return single_open(file, clk_dump, inode->i_private);
2149}
2150
2151static const struct file_operations clk_dump_fops = {
2152 .open = clk_dump_open,
2153 .read = seq_read,
2154 .llseek = seq_lseek,
2155 .release = single_release,
2156};
2157
2158static int possible_parents_dump(struct seq_file *s, void *data)
2159{
2160 struct clk_core *core = s->private;
2161 int i;
2162
2163 for (i = 0; i < core->num_parents - 1; i++)
2164 seq_printf(s, "%s ", core->parent_names[i]);
2165
2166 seq_printf(s, "%s\n", core->parent_names[i]);
2167
2168 return 0;
2169}
2170
2171static int possible_parents_open(struct inode *inode, struct file *file)
2172{
2173 return single_open(file, possible_parents_dump, inode->i_private);
2174}
2175
2176static const struct file_operations possible_parents_fops = {
2177 .open = possible_parents_open,
2178 .read = seq_read,
2179 .llseek = seq_lseek,
2180 .release = single_release,
2181};
2182
2183static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
2184{
2185 struct dentry *d;
2186 int ret = -ENOMEM;
2187
2188 if (!core || !pdentry) {
2189 ret = -EINVAL;
2190 goto out;
2191 }
2192
2193 d = debugfs_create_dir(core->name, pdentry);
2194 if (!d)
2195 goto out;
2196
2197 core->dentry = d;
2198
2199 d = debugfs_create_u32("clk_rate", S_IRUGO, core->dentry,
2200 (u32 *)&core->rate);
2201 if (!d)
2202 goto err_out;
2203
2204 d = debugfs_create_u32("clk_accuracy", S_IRUGO, core->dentry,
2205 (u32 *)&core->accuracy);
2206 if (!d)
2207 goto err_out;
2208
2209 d = debugfs_create_u32("clk_phase", S_IRUGO, core->dentry,
2210 (u32 *)&core->phase);
2211 if (!d)
2212 goto err_out;
2213
2214 d = debugfs_create_x32("clk_flags", S_IRUGO, core->dentry,
2215 (u32 *)&core->flags);
2216 if (!d)
2217 goto err_out;
2218
2219 d = debugfs_create_u32("clk_prepare_count", S_IRUGO, core->dentry,
2220 (u32 *)&core->prepare_count);
2221 if (!d)
2222 goto err_out;
2223
2224 d = debugfs_create_u32("clk_enable_count", S_IRUGO, core->dentry,
2225 (u32 *)&core->enable_count);
2226 if (!d)
2227 goto err_out;
2228
2229 d = debugfs_create_u32("clk_notifier_count", S_IRUGO, core->dentry,
2230 (u32 *)&core->notifier_count);
2231 if (!d)
2232 goto err_out;
2233
2234 if (core->num_parents > 1) {
2235 d = debugfs_create_file("clk_possible_parents", S_IRUGO,
2236 core->dentry, core, &possible_parents_fops);
2237 if (!d)
2238 goto err_out;
2239 }
2240
2241 if (core->ops->debug_init) {
2242 ret = core->ops->debug_init(core->hw, core->dentry);
2243 if (ret)
2244 goto err_out;
2245 }
2246
2247 ret = 0;
2248 goto out;
2249
2250err_out:
2251 debugfs_remove_recursive(core->dentry);
2252 core->dentry = NULL;
2253out:
2254 return ret;
2255}
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265static int clk_debug_register(struct clk_core *core)
2266{
2267 int ret = 0;
2268
2269 mutex_lock(&clk_debug_lock);
2270 hlist_add_head(&core->debug_node, &clk_debug_list);
2271
2272 if (!inited)
2273 goto unlock;
2274
2275 ret = clk_debug_create_one(core, rootdir);
2276unlock:
2277 mutex_unlock(&clk_debug_lock);
2278
2279 return ret;
2280}
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290static void clk_debug_unregister(struct clk_core *core)
2291{
2292 mutex_lock(&clk_debug_lock);
2293 hlist_del_init(&core->debug_node);
2294 debugfs_remove_recursive(core->dentry);
2295 core->dentry = NULL;
2296 mutex_unlock(&clk_debug_lock);
2297}
2298
2299struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode,
2300 void *data, const struct file_operations *fops)
2301{
2302 struct dentry *d = NULL;
2303
2304 if (hw->core->dentry)
2305 d = debugfs_create_file(name, mode, hw->core->dentry, data,
2306 fops);
2307
2308 return d;
2309}
2310EXPORT_SYMBOL_GPL(clk_debugfs_add_file);
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321static int __init clk_debug_init(void)
2322{
2323 struct clk_core *core;
2324 struct dentry *d;
2325
2326 rootdir = debugfs_create_dir("clk", NULL);
2327
2328 if (!rootdir)
2329 return -ENOMEM;
2330
2331 d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, &all_lists,
2332 &clk_summary_fops);
2333 if (!d)
2334 return -ENOMEM;
2335
2336 d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, &all_lists,
2337 &clk_dump_fops);
2338 if (!d)
2339 return -ENOMEM;
2340
2341 d = debugfs_create_file("clk_orphan_summary", S_IRUGO, rootdir,
2342 &orphan_list, &clk_summary_fops);
2343 if (!d)
2344 return -ENOMEM;
2345
2346 d = debugfs_create_file("clk_orphan_dump", S_IRUGO, rootdir,
2347 &orphan_list, &clk_dump_fops);
2348 if (!d)
2349 return -ENOMEM;
2350
2351 mutex_lock(&clk_debug_lock);
2352 hlist_for_each_entry(core, &clk_debug_list, debug_node)
2353 clk_debug_create_one(core, rootdir);
2354
2355 inited = 1;
2356 mutex_unlock(&clk_debug_lock);
2357
2358 return 0;
2359}
2360late_initcall(clk_debug_init);
2361#else
2362static inline int clk_debug_register(struct clk_core *core) { return 0; }
2363static inline void clk_debug_reparent(struct clk_core *core,
2364 struct clk_core *new_parent)
2365{
2366}
2367static inline void clk_debug_unregister(struct clk_core *core)
2368{
2369}
2370#endif
2371
2372
2373
2374
2375
2376
2377
2378
2379static int __clk_core_init(struct clk_core *core)
2380{
2381 int i, ret = 0;
2382 struct clk_core *orphan;
2383 struct hlist_node *tmp2;
2384 unsigned long rate;
2385
2386 if (!core)
2387 return -EINVAL;
2388
2389 clk_prepare_lock();
2390
2391
2392 if (clk_core_lookup(core->name)) {
2393 pr_debug("%s: clk %s already initialized\n",
2394 __func__, core->name);
2395 ret = -EEXIST;
2396 goto out;
2397 }
2398
2399
2400 if (core->ops->set_rate &&
2401 !((core->ops->round_rate || core->ops->determine_rate) &&
2402 core->ops->recalc_rate)) {
2403 pr_err("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
2404 __func__, core->name);
2405 ret = -EINVAL;
2406 goto out;
2407 }
2408
2409 if (core->ops->set_parent && !core->ops->get_parent) {
2410 pr_err("%s: %s must implement .get_parent & .set_parent\n",
2411 __func__, core->name);
2412 ret = -EINVAL;
2413 goto out;
2414 }
2415
2416 if (core->num_parents > 1 && !core->ops->get_parent) {
2417 pr_err("%s: %s must implement .get_parent as it has multi parents\n",
2418 __func__, core->name);
2419 ret = -EINVAL;
2420 goto out;
2421 }
2422
2423 if (core->ops->set_rate_and_parent &&
2424 !(core->ops->set_parent && core->ops->set_rate)) {
2425 pr_err("%s: %s must implement .set_parent & .set_rate\n",
2426 __func__, core->name);
2427 ret = -EINVAL;
2428 goto out;
2429 }
2430
2431
2432 for (i = 0; i < core->num_parents; i++)
2433 WARN(!core->parent_names[i],
2434 "%s: invalid NULL in %s's .parent_names\n",
2435 __func__, core->name);
2436
2437 core->parent = __clk_init_parent(core);
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449 if (core->parent) {
2450 hlist_add_head(&core->child_node,
2451 &core->parent->children);
2452 core->orphan = core->parent->orphan;
2453 } else if (!core->num_parents) {
2454 hlist_add_head(&core->child_node, &clk_root_list);
2455 core->orphan = false;
2456 } else {
2457 hlist_add_head(&core->child_node, &clk_orphan_list);
2458 core->orphan = true;
2459 }
2460
2461
2462
2463
2464
2465
2466
2467
2468 if (core->ops->recalc_accuracy)
2469 core->accuracy = core->ops->recalc_accuracy(core->hw,
2470 __clk_get_accuracy(core->parent));
2471 else if (core->parent)
2472 core->accuracy = core->parent->accuracy;
2473 else
2474 core->accuracy = 0;
2475
2476
2477
2478
2479
2480
2481 if (core->ops->get_phase)
2482 core->phase = core->ops->get_phase(core->hw);
2483 else
2484 core->phase = 0;
2485
2486
2487
2488
2489
2490
2491
2492 if (core->ops->recalc_rate)
2493 rate = core->ops->recalc_rate(core->hw,
2494 clk_core_get_rate_nolock(core->parent));
2495 else if (core->parent)
2496 rate = core->parent->rate;
2497 else
2498 rate = 0;
2499 core->rate = core->req_rate = rate;
2500
2501
2502
2503
2504
2505 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
2506 struct clk_core *parent = __clk_init_parent(orphan);
2507
2508
2509
2510
2511
2512 if (parent) {
2513 __clk_set_parent_before(orphan, parent);
2514 __clk_set_parent_after(orphan, parent, NULL);
2515 __clk_recalc_accuracies(orphan);
2516 __clk_recalc_rates(orphan, 0);
2517 }
2518 }
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528 if (core->ops->init)
2529 core->ops->init(core->hw);
2530
2531 if (core->flags & CLK_IS_CRITICAL) {
2532 unsigned long flags;
2533
2534 clk_core_prepare(core);
2535
2536 flags = clk_enable_lock();
2537 clk_core_enable(core);
2538 clk_enable_unlock(flags);
2539 }
2540
2541 kref_init(&core->ref);
2542out:
2543 clk_prepare_unlock();
2544
2545 if (!ret)
2546 clk_debug_register(core);
2547
2548 return ret;
2549}
2550
2551struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
2552 const char *con_id)
2553{
2554 struct clk *clk;
2555
2556
2557 if (IS_ERR_OR_NULL(hw))
2558 return ERR_CAST(hw);
2559
2560 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
2561 if (!clk)
2562 return ERR_PTR(-ENOMEM);
2563
2564 clk->core = hw->core;
2565 clk->dev_id = dev_id;
2566 clk->con_id = kstrdup_const(con_id, GFP_KERNEL);
2567 clk->max_rate = ULONG_MAX;
2568
2569 clk_prepare_lock();
2570 hlist_add_head(&clk->clks_node, &hw->core->clks);
2571 clk_prepare_unlock();
2572
2573 return clk;
2574}
2575
2576void __clk_free_clk(struct clk *clk)
2577{
2578 clk_prepare_lock();
2579 hlist_del(&clk->clks_node);
2580 clk_prepare_unlock();
2581
2582 kfree_const(clk->con_id);
2583 kfree(clk);
2584}
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597struct clk *clk_register(struct device *dev, struct clk_hw *hw)
2598{
2599 int i, ret;
2600 struct clk_core *core;
2601
2602 core = kzalloc(sizeof(*core), GFP_KERNEL);
2603 if (!core) {
2604 ret = -ENOMEM;
2605 goto fail_out;
2606 }
2607
2608 core->name = kstrdup_const(hw->init->name, GFP_KERNEL);
2609 if (!core->name) {
2610 ret = -ENOMEM;
2611 goto fail_name;
2612 }
2613 core->ops = hw->init->ops;
2614 if (dev && dev->driver)
2615 core->owner = dev->driver->owner;
2616 core->hw = hw;
2617 core->flags = hw->init->flags;
2618 core->num_parents = hw->init->num_parents;
2619 core->min_rate = 0;
2620 core->max_rate = ULONG_MAX;
2621 hw->core = core;
2622
2623
2624 core->parent_names = kcalloc(core->num_parents, sizeof(char *),
2625 GFP_KERNEL);
2626
2627 if (!core->parent_names) {
2628 ret = -ENOMEM;
2629 goto fail_parent_names;
2630 }
2631
2632
2633
2634 for (i = 0; i < core->num_parents; i++) {
2635 core->parent_names[i] = kstrdup_const(hw->init->parent_names[i],
2636 GFP_KERNEL);
2637 if (!core->parent_names[i]) {
2638 ret = -ENOMEM;
2639 goto fail_parent_names_copy;
2640 }
2641 }
2642
2643
2644 core->parents = kcalloc(core->num_parents, sizeof(*core->parents),
2645 GFP_KERNEL);
2646 if (!core->parents) {
2647 ret = -ENOMEM;
2648 goto fail_parents;
2649 };
2650
2651 INIT_HLIST_HEAD(&core->clks);
2652
2653 hw->clk = __clk_create_clk(hw, NULL, NULL);
2654 if (IS_ERR(hw->clk)) {
2655 ret = PTR_ERR(hw->clk);
2656 goto fail_parents;
2657 }
2658
2659 ret = __clk_core_init(core);
2660 if (!ret)
2661 return hw->clk;
2662
2663 __clk_free_clk(hw->clk);
2664 hw->clk = NULL;
2665
2666fail_parents:
2667 kfree(core->parents);
2668fail_parent_names_copy:
2669 while (--i >= 0)
2670 kfree_const(core->parent_names[i]);
2671 kfree(core->parent_names);
2672fail_parent_names:
2673 kfree_const(core->name);
2674fail_name:
2675 kfree(core);
2676fail_out:
2677 return ERR_PTR(ret);
2678}
2679EXPORT_SYMBOL_GPL(clk_register);
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691int clk_hw_register(struct device *dev, struct clk_hw *hw)
2692{
2693 return PTR_ERR_OR_ZERO(clk_register(dev, hw));
2694}
2695EXPORT_SYMBOL_GPL(clk_hw_register);
2696
2697
2698static void __clk_release(struct kref *ref)
2699{
2700 struct clk_core *core = container_of(ref, struct clk_core, ref);
2701 int i = core->num_parents;
2702
2703 lockdep_assert_held(&prepare_lock);
2704
2705 kfree(core->parents);
2706 while (--i >= 0)
2707 kfree_const(core->parent_names[i]);
2708
2709 kfree(core->parent_names);
2710 kfree_const(core->name);
2711 kfree(core);
2712}
2713
2714
2715
2716
2717
2718
2719static int clk_nodrv_prepare_enable(struct clk_hw *hw)
2720{
2721 return -ENXIO;
2722}
2723
2724static void clk_nodrv_disable_unprepare(struct clk_hw *hw)
2725{
2726 WARN_ON_ONCE(1);
2727}
2728
2729static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate,
2730 unsigned long parent_rate)
2731{
2732 return -ENXIO;
2733}
2734
2735static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
2736{
2737 return -ENXIO;
2738}
2739
2740static const struct clk_ops clk_nodrv_ops = {
2741 .enable = clk_nodrv_prepare_enable,
2742 .disable = clk_nodrv_disable_unprepare,
2743 .prepare = clk_nodrv_prepare_enable,
2744 .unprepare = clk_nodrv_disable_unprepare,
2745 .set_rate = clk_nodrv_set_rate,
2746 .set_parent = clk_nodrv_set_parent,
2747};
2748
2749
2750
2751
2752
2753void clk_unregister(struct clk *clk)
2754{
2755 unsigned long flags;
2756
2757 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
2758 return;
2759
2760 clk_debug_unregister(clk->core);
2761
2762 clk_prepare_lock();
2763
2764 if (clk->core->ops == &clk_nodrv_ops) {
2765 pr_err("%s: unregistered clock: %s\n", __func__,
2766 clk->core->name);
2767 goto unlock;
2768 }
2769
2770
2771
2772
2773 flags = clk_enable_lock();
2774 clk->core->ops = &clk_nodrv_ops;
2775 clk_enable_unlock(flags);
2776
2777 if (!hlist_empty(&clk->core->children)) {
2778 struct clk_core *child;
2779 struct hlist_node *t;
2780
2781
2782 hlist_for_each_entry_safe(child, t, &clk->core->children,
2783 child_node)
2784 clk_core_set_parent(child, NULL);
2785 }
2786
2787 hlist_del_init(&clk->core->child_node);
2788
2789 if (clk->core->prepare_count)
2790 pr_warn("%s: unregistering prepared clock: %s\n",
2791 __func__, clk->core->name);
2792 kref_put(&clk->core->ref, __clk_release);
2793unlock:
2794 clk_prepare_unlock();
2795}
2796EXPORT_SYMBOL_GPL(clk_unregister);
2797
2798
2799
2800
2801
2802void clk_hw_unregister(struct clk_hw *hw)
2803{
2804 clk_unregister(hw->clk);
2805}
2806EXPORT_SYMBOL_GPL(clk_hw_unregister);
2807
2808static void devm_clk_release(struct device *dev, void *res)
2809{
2810 clk_unregister(*(struct clk **)res);
2811}
2812
2813static void devm_clk_hw_release(struct device *dev, void *res)
2814{
2815 clk_hw_unregister(*(struct clk_hw **)res);
2816}
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
2828{
2829 struct clk *clk;
2830 struct clk **clkp;
2831
2832 clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
2833 if (!clkp)
2834 return ERR_PTR(-ENOMEM);
2835
2836 clk = clk_register(dev, hw);
2837 if (!IS_ERR(clk)) {
2838 *clkp = clk;
2839 devres_add(dev, clkp);
2840 } else {
2841 devres_free(clkp);
2842 }
2843
2844 return clk;
2845}
2846EXPORT_SYMBOL_GPL(devm_clk_register);
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857int devm_clk_hw_register(struct device *dev, struct clk_hw *hw)
2858{
2859 struct clk_hw **hwp;
2860 int ret;
2861
2862 hwp = devres_alloc(devm_clk_hw_release, sizeof(*hwp), GFP_KERNEL);
2863 if (!hwp)
2864 return -ENOMEM;
2865
2866 ret = clk_hw_register(dev, hw);
2867 if (!ret) {
2868 *hwp = hw;
2869 devres_add(dev, hwp);
2870 } else {
2871 devres_free(hwp);
2872 }
2873
2874 return ret;
2875}
2876EXPORT_SYMBOL_GPL(devm_clk_hw_register);
2877
2878static int devm_clk_match(struct device *dev, void *res, void *data)
2879{
2880 struct clk *c = res;
2881 if (WARN_ON(!c))
2882 return 0;
2883 return c == data;
2884}
2885
2886static int devm_clk_hw_match(struct device *dev, void *res, void *data)
2887{
2888 struct clk_hw *hw = res;
2889
2890 if (WARN_ON(!hw))
2891 return 0;
2892 return hw == data;
2893}
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903void devm_clk_unregister(struct device *dev, struct clk *clk)
2904{
2905 WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
2906}
2907EXPORT_SYMBOL_GPL(devm_clk_unregister);
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw)
2919{
2920 WARN_ON(devres_release(dev, devm_clk_hw_release, devm_clk_hw_match,
2921 hw));
2922}
2923EXPORT_SYMBOL_GPL(devm_clk_hw_unregister);
2924
2925
2926
2927
2928int __clk_get(struct clk *clk)
2929{
2930 struct clk_core *core = !clk ? NULL : clk->core;
2931
2932 if (core) {
2933 if (!try_module_get(core->owner))
2934 return 0;
2935
2936 kref_get(&core->ref);
2937 }
2938 return 1;
2939}
2940
2941void __clk_put(struct clk *clk)
2942{
2943 struct module *owner;
2944
2945 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
2946 return;
2947
2948 clk_prepare_lock();
2949
2950 hlist_del(&clk->clks_node);
2951 if (clk->min_rate > clk->core->req_rate ||
2952 clk->max_rate < clk->core->req_rate)
2953 clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
2954
2955 owner = clk->core->owner;
2956 kref_put(&clk->core->ref, __clk_release);
2957
2958 clk_prepare_unlock();
2959
2960 module_put(owner);
2961
2962 kfree(clk);
2963}
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
2988{
2989 struct clk_notifier *cn;
2990 int ret = -ENOMEM;
2991
2992 if (!clk || !nb)
2993 return -EINVAL;
2994
2995 clk_prepare_lock();
2996
2997
2998 list_for_each_entry(cn, &clk_notifier_list, node)
2999 if (cn->clk == clk)
3000 break;
3001
3002
3003 if (cn->clk != clk) {
3004 cn = kzalloc(sizeof(*cn), GFP_KERNEL);
3005 if (!cn)
3006 goto out;
3007
3008 cn->clk = clk;
3009 srcu_init_notifier_head(&cn->notifier_head);
3010
3011 list_add(&cn->node, &clk_notifier_list);
3012 }
3013
3014 ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
3015
3016 clk->core->notifier_count++;
3017
3018out:
3019 clk_prepare_unlock();
3020
3021 return ret;
3022}
3023EXPORT_SYMBOL_GPL(clk_notifier_register);
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
3037{
3038 struct clk_notifier *cn = NULL;
3039 int ret = -EINVAL;
3040
3041 if (!clk || !nb)
3042 return -EINVAL;
3043
3044 clk_prepare_lock();
3045
3046 list_for_each_entry(cn, &clk_notifier_list, node)
3047 if (cn->clk == clk)
3048 break;
3049
3050 if (cn->clk == clk) {
3051 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
3052
3053 clk->core->notifier_count--;
3054
3055
3056 if (!cn->notifier_head.head) {
3057 srcu_cleanup_notifier_head(&cn->notifier_head);
3058 list_del(&cn->node);
3059 kfree(cn);
3060 }
3061
3062 } else {
3063 ret = -ENOENT;
3064 }
3065
3066 clk_prepare_unlock();
3067
3068 return ret;
3069}
3070EXPORT_SYMBOL_GPL(clk_notifier_unregister);
3071
3072#ifdef CONFIG_OF
3073
3074
3075
3076
3077
3078
3079
3080
3081struct of_clk_provider {
3082 struct list_head link;
3083
3084 struct device_node *node;
3085 struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
3086 struct clk_hw *(*get_hw)(struct of_phandle_args *clkspec, void *data);
3087 void *data;
3088};
3089
3090static const struct of_device_id __clk_of_table_sentinel
3091 __used __section(__clk_of_table_end);
3092
3093static LIST_HEAD(of_clk_providers);
3094static DEFINE_MUTEX(of_clk_mutex);
3095
3096struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
3097 void *data)
3098{
3099 return data;
3100}
3101EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
3102
3103struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data)
3104{
3105 return data;
3106}
3107EXPORT_SYMBOL_GPL(of_clk_hw_simple_get);
3108
3109struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
3110{
3111 struct clk_onecell_data *clk_data = data;
3112 unsigned int idx = clkspec->args[0];
3113
3114 if (idx >= clk_data->clk_num) {
3115 pr_err("%s: invalid clock index %u\n", __func__, idx);
3116 return ERR_PTR(-EINVAL);
3117 }
3118
3119 return clk_data->clks[idx];
3120}
3121EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
3122
3123struct clk_hw *
3124of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data)
3125{
3126 struct clk_hw_onecell_data *hw_data = data;
3127 unsigned int idx = clkspec->args[0];
3128
3129 if (idx >= hw_data->num) {
3130 pr_err("%s: invalid index %u\n", __func__, idx);
3131 return ERR_PTR(-EINVAL);
3132 }
3133
3134 return hw_data->hws[idx];
3135}
3136EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get);
3137
3138
3139
3140
3141
3142
3143
3144int of_clk_add_provider(struct device_node *np,
3145 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
3146 void *data),
3147 void *data)
3148{
3149 struct of_clk_provider *cp;
3150 int ret;
3151
3152 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
3153 if (!cp)
3154 return -ENOMEM;
3155
3156 cp->node = of_node_get(np);
3157 cp->data = data;
3158 cp->get = clk_src_get;
3159
3160 mutex_lock(&of_clk_mutex);
3161 list_add(&cp->link, &of_clk_providers);
3162 mutex_unlock(&of_clk_mutex);
3163 pr_debug("Added clock from %pOF\n", np);
3164
3165 ret = of_clk_set_defaults(np, true);
3166 if (ret < 0)
3167 of_clk_del_provider(np);
3168
3169 return ret;
3170}
3171EXPORT_SYMBOL_GPL(of_clk_add_provider);
3172
3173
3174
3175
3176
3177
3178
3179int of_clk_add_hw_provider(struct device_node *np,
3180 struct clk_hw *(*get)(struct of_phandle_args *clkspec,
3181 void *data),
3182 void *data)
3183{
3184 struct of_clk_provider *cp;
3185 int ret;
3186
3187 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
3188 if (!cp)
3189 return -ENOMEM;
3190
3191 cp->node = of_node_get(np);
3192 cp->data = data;
3193 cp->get_hw = get;
3194
3195 mutex_lock(&of_clk_mutex);
3196 list_add(&cp->link, &of_clk_providers);
3197 mutex_unlock(&of_clk_mutex);
3198 pr_debug("Added clk_hw provider from %pOF\n", np);
3199
3200 ret = of_clk_set_defaults(np, true);
3201 if (ret < 0)
3202 of_clk_del_provider(np);
3203
3204 return ret;
3205}
3206EXPORT_SYMBOL_GPL(of_clk_add_hw_provider);
3207
3208
3209
3210
3211
3212void of_clk_del_provider(struct device_node *np)
3213{
3214 struct of_clk_provider *cp;
3215
3216 mutex_lock(&of_clk_mutex);
3217 list_for_each_entry(cp, &of_clk_providers, link) {
3218 if (cp->node == np) {
3219 list_del(&cp->link);
3220 of_node_put(cp->node);
3221 kfree(cp);
3222 break;
3223 }
3224 }
3225 mutex_unlock(&of_clk_mutex);
3226}
3227EXPORT_SYMBOL_GPL(of_clk_del_provider);
3228
3229static struct clk_hw *
3230__of_clk_get_hw_from_provider(struct of_clk_provider *provider,
3231 struct of_phandle_args *clkspec)
3232{
3233 struct clk *clk;
3234
3235 if (provider->get_hw)
3236 return provider->get_hw(clkspec, provider->data);
3237
3238 clk = provider->get(clkspec, provider->data);
3239 if (IS_ERR(clk))
3240 return ERR_CAST(clk);
3241 return __clk_get_hw(clk);
3242}
3243
3244struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
3245 const char *dev_id, const char *con_id)
3246{
3247 struct of_clk_provider *provider;
3248 struct clk *clk = ERR_PTR(-EPROBE_DEFER);
3249 struct clk_hw *hw;
3250
3251 if (!clkspec)
3252 return ERR_PTR(-EINVAL);
3253
3254
3255 mutex_lock(&of_clk_mutex);
3256 list_for_each_entry(provider, &of_clk_providers, link) {
3257 if (provider->node == clkspec->np) {
3258 hw = __of_clk_get_hw_from_provider(provider, clkspec);
3259 clk = __clk_create_clk(hw, dev_id, con_id);
3260 }
3261
3262 if (!IS_ERR(clk)) {
3263 if (!__clk_get(clk)) {
3264 __clk_free_clk(clk);
3265 clk = ERR_PTR(-ENOENT);
3266 }
3267
3268 break;
3269 }
3270 }
3271 mutex_unlock(&of_clk_mutex);
3272
3273 return clk;
3274}
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
3285{
3286 return __of_clk_get_from_provider(clkspec, NULL, __func__);
3287}
3288EXPORT_SYMBOL_GPL(of_clk_get_from_provider);
3289
3290
3291
3292
3293
3294
3295
3296unsigned int of_clk_get_parent_count(struct device_node *np)
3297{
3298 int count;
3299
3300 count = of_count_phandle_with_args(np, "clocks", "#clock-cells");
3301 if (count < 0)
3302 return 0;
3303
3304 return count;
3305}
3306EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
3307
3308const char *of_clk_get_parent_name(struct device_node *np, int index)
3309{
3310 struct of_phandle_args clkspec;
3311 struct property *prop;
3312 const char *clk_name;
3313 const __be32 *vp;
3314 u32 pv;
3315 int rc;
3316 int count;
3317 struct clk *clk;
3318
3319 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
3320 &clkspec);
3321 if (rc)
3322 return NULL;
3323
3324 index = clkspec.args_count ? clkspec.args[0] : 0;
3325 count = 0;
3326
3327
3328
3329
3330 of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) {
3331 if (index == pv) {
3332 index = count;
3333 break;
3334 }
3335 count++;
3336 }
3337
3338 if (prop && !vp)
3339 return NULL;
3340
3341 if (of_property_read_string_index(clkspec.np, "clock-output-names",
3342 index,
3343 &clk_name) < 0) {
3344
3345
3346
3347
3348
3349
3350 clk = of_clk_get_from_provider(&clkspec);
3351 if (IS_ERR(clk)) {
3352 if (clkspec.args_count == 0)
3353 clk_name = clkspec.np->name;
3354 else
3355 clk_name = NULL;
3356 } else {
3357 clk_name = __clk_get_name(clk);
3358 clk_put(clk);
3359 }
3360 }
3361
3362
3363 of_node_put(clkspec.np);
3364 return clk_name;
3365}
3366EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377int of_clk_parent_fill(struct device_node *np, const char **parents,
3378 unsigned int size)
3379{
3380 unsigned int i = 0;
3381
3382 while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL)
3383 i++;
3384
3385 return i;
3386}
3387EXPORT_SYMBOL_GPL(of_clk_parent_fill);
3388
3389struct clock_provider {
3390 of_clk_init_cb_t clk_init_cb;
3391 struct device_node *np;
3392 struct list_head node;
3393};
3394
3395
3396
3397
3398
3399
3400static int parent_ready(struct device_node *np)
3401{
3402 int i = 0;
3403
3404 while (true) {
3405 struct clk *clk = of_clk_get(np, i);
3406
3407
3408 if (!IS_ERR(clk)) {
3409 clk_put(clk);
3410 i++;
3411 continue;
3412 }
3413
3414
3415 if (PTR_ERR(clk) == -EPROBE_DEFER)
3416 return 0;
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426 return 1;
3427 }
3428}
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448int of_clk_detect_critical(struct device_node *np,
3449 int index, unsigned long *flags)
3450{
3451 struct property *prop;
3452 const __be32 *cur;
3453 uint32_t idx;
3454
3455 if (!np || !flags)
3456 return -EINVAL;
3457
3458 of_property_for_each_u32(np, "clock-critical", prop, cur, idx)
3459 if (index == idx)
3460 *flags |= CLK_IS_CRITICAL;
3461
3462 return 0;
3463}
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473void __init of_clk_init(const struct of_device_id *matches)
3474{
3475 const struct of_device_id *match;
3476 struct device_node *np;
3477 struct clock_provider *clk_provider, *next;
3478 bool is_init_done;
3479 bool force = false;
3480 LIST_HEAD(clk_provider_list);
3481
3482 if (!matches)
3483 matches = &__clk_of_table;
3484
3485
3486 for_each_matching_node_and_match(np, matches, &match) {
3487 struct clock_provider *parent;
3488
3489 if (!of_device_is_available(np))
3490 continue;
3491
3492 parent = kzalloc(sizeof(*parent), GFP_KERNEL);
3493 if (!parent) {
3494 list_for_each_entry_safe(clk_provider, next,
3495 &clk_provider_list, node) {
3496 list_del(&clk_provider->node);
3497 of_node_put(clk_provider->np);
3498 kfree(clk_provider);
3499 }
3500 of_node_put(np);
3501 return;
3502 }
3503
3504 parent->clk_init_cb = match->data;
3505 parent->np = of_node_get(np);
3506 list_add_tail(&parent->node, &clk_provider_list);
3507 }
3508
3509 while (!list_empty(&clk_provider_list)) {
3510 is_init_done = false;
3511 list_for_each_entry_safe(clk_provider, next,
3512 &clk_provider_list, node) {
3513 if (force || parent_ready(clk_provider->np)) {
3514
3515
3516 of_node_set_flag(clk_provider->np,
3517 OF_POPULATED);
3518
3519 clk_provider->clk_init_cb(clk_provider->np);
3520 of_clk_set_defaults(clk_provider->np, true);
3521
3522 list_del(&clk_provider->node);
3523 of_node_put(clk_provider->np);
3524 kfree(clk_provider);
3525 is_init_done = true;
3526 }
3527 }
3528
3529
3530
3531
3532
3533
3534
3535 if (!is_init_done)
3536 force = true;
3537 }
3538}
3539#endif
3540