1
2
3
4
5
6
7
8
9#include <linux/clk.h>
10#include <linux/clk-provider.h>
11#include <linux/clk/clk-conf.h>
12#include <linux/module.h>
13#include <linux/mutex.h>
14#include <linux/spinlock.h>
15#include <linux/err.h>
16#include <linux/list.h>
17#include <linux/slab.h>
18#include <linux/of.h>
19#include <linux/device.h>
20#include <linux/init.h>
21#include <linux/pm_runtime.h>
22#include <linux/sched.h>
23#include <linux/clkdev.h>
24
25#include "clk.h"
26
27static DEFINE_SPINLOCK(enable_lock);
28static DEFINE_MUTEX(prepare_lock);
29
30static struct task_struct *prepare_owner;
31static struct task_struct *enable_owner;
32
33static int prepare_refcnt;
34static int enable_refcnt;
35
36static HLIST_HEAD(clk_root_list);
37static HLIST_HEAD(clk_orphan_list);
38static LIST_HEAD(clk_notifier_list);
39
40static struct hlist_head *all_lists[] = {
41 &clk_root_list,
42 &clk_orphan_list,
43 NULL,
44};
45
46
47
48struct clk_parent_map {
49 const struct clk_hw *hw;
50 struct clk_core *core;
51 const char *fw_name;
52 const char *name;
53 int index;
54};
55
56struct clk_core {
57 const char *name;
58 const struct clk_ops *ops;
59 struct clk_hw *hw;
60 struct module *owner;
61 struct device *dev;
62 struct device_node *of_node;
63 struct clk_core *parent;
64 struct clk_parent_map *parents;
65 u8 num_parents;
66 u8 new_parent_index;
67 unsigned long rate;
68 unsigned long req_rate;
69 unsigned long new_rate;
70 struct clk_core *new_parent;
71 struct clk_core *new_child;
72 unsigned long flags;
73 bool orphan;
74 bool rpm_enabled;
75 unsigned int enable_count;
76 unsigned int prepare_count;
77 unsigned int protect_count;
78 unsigned long min_rate;
79 unsigned long max_rate;
80 unsigned long accuracy;
81 int phase;
82 struct clk_duty duty;
83 struct hlist_head children;
84 struct hlist_node child_node;
85 struct hlist_head clks;
86 unsigned int notifier_count;
87#ifdef CONFIG_DEBUG_FS
88 struct dentry *dentry;
89 struct hlist_node debug_node;
90#endif
91 struct kref ref;
92};
93
94#define CREATE_TRACE_POINTS
95#include <trace/events/clk.h>
96
97struct clk {
98 struct clk_core *core;
99 struct device *dev;
100 const char *dev_id;
101 const char *con_id;
102 unsigned long min_rate;
103 unsigned long max_rate;
104 unsigned int exclusive_count;
105 struct hlist_node clks_node;
106};
107
108
109static int clk_pm_runtime_get(struct clk_core *core)
110{
111 int ret;
112
113 if (!core->rpm_enabled)
114 return 0;
115
116 ret = pm_runtime_get_sync(core->dev);
117 return ret < 0 ? ret : 0;
118}
119
120static void clk_pm_runtime_put(struct clk_core *core)
121{
122 if (!core->rpm_enabled)
123 return;
124
125 pm_runtime_put_sync(core->dev);
126}
127
128
129static void clk_prepare_lock(void)
130{
131 if (!mutex_trylock(&prepare_lock)) {
132 if (prepare_owner == current) {
133 prepare_refcnt++;
134 return;
135 }
136 mutex_lock(&prepare_lock);
137 }
138 WARN_ON_ONCE(prepare_owner != NULL);
139 WARN_ON_ONCE(prepare_refcnt != 0);
140 prepare_owner = current;
141 prepare_refcnt = 1;
142}
143
144static void clk_prepare_unlock(void)
145{
146 WARN_ON_ONCE(prepare_owner != current);
147 WARN_ON_ONCE(prepare_refcnt == 0);
148
149 if (--prepare_refcnt)
150 return;
151 prepare_owner = NULL;
152 mutex_unlock(&prepare_lock);
153}
154
155static unsigned long clk_enable_lock(void)
156 __acquires(enable_lock)
157{
158 unsigned long flags;
159
160
161
162
163
164
165 if (!IS_ENABLED(CONFIG_SMP) ||
166 !spin_trylock_irqsave(&enable_lock, flags)) {
167 if (enable_owner == current) {
168 enable_refcnt++;
169 __acquire(enable_lock);
170 if (!IS_ENABLED(CONFIG_SMP))
171 local_save_flags(flags);
172 return flags;
173 }
174 spin_lock_irqsave(&enable_lock, flags);
175 }
176 WARN_ON_ONCE(enable_owner != NULL);
177 WARN_ON_ONCE(enable_refcnt != 0);
178 enable_owner = current;
179 enable_refcnt = 1;
180 return flags;
181}
182
183static void clk_enable_unlock(unsigned long flags)
184 __releases(enable_lock)
185{
186 WARN_ON_ONCE(enable_owner != current);
187 WARN_ON_ONCE(enable_refcnt == 0);
188
189 if (--enable_refcnt) {
190 __release(enable_lock);
191 return;
192 }
193 enable_owner = NULL;
194 spin_unlock_irqrestore(&enable_lock, flags);
195}
196
197static bool clk_core_rate_is_protected(struct clk_core *core)
198{
199 return core->protect_count;
200}
201
202static bool clk_core_is_prepared(struct clk_core *core)
203{
204 bool ret = false;
205
206
207
208
209
210 if (!core->ops->is_prepared)
211 return core->prepare_count;
212
213 if (!clk_pm_runtime_get(core)) {
214 ret = core->ops->is_prepared(core->hw);
215 clk_pm_runtime_put(core);
216 }
217
218 return ret;
219}
220
221static bool clk_core_is_enabled(struct clk_core *core)
222{
223 bool ret = false;
224
225
226
227
228
229 if (!core->ops->is_enabled)
230 return core->enable_count;
231
232
233
234
235
236
237
238
239
240
241
242 if (core->rpm_enabled) {
243 pm_runtime_get_noresume(core->dev);
244 if (!pm_runtime_active(core->dev)) {
245 ret = false;
246 goto done;
247 }
248 }
249
250 ret = core->ops->is_enabled(core->hw);
251done:
252 if (core->rpm_enabled)
253 pm_runtime_put(core->dev);
254
255 return ret;
256}
257
258
259
260const char *__clk_get_name(const struct clk *clk)
261{
262 return !clk ? NULL : clk->core->name;
263}
264EXPORT_SYMBOL_GPL(__clk_get_name);
265
266const char *clk_hw_get_name(const struct clk_hw *hw)
267{
268 return hw->core->name;
269}
270EXPORT_SYMBOL_GPL(clk_hw_get_name);
271
272struct clk_hw *__clk_get_hw(struct clk *clk)
273{
274 return !clk ? NULL : clk->core->hw;
275}
276EXPORT_SYMBOL_GPL(__clk_get_hw);
277
278unsigned int clk_hw_get_num_parents(const struct clk_hw *hw)
279{
280 return hw->core->num_parents;
281}
282EXPORT_SYMBOL_GPL(clk_hw_get_num_parents);
283
284struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw)
285{
286 return hw->core->parent ? hw->core->parent->hw : NULL;
287}
288EXPORT_SYMBOL_GPL(clk_hw_get_parent);
289
290static struct clk_core *__clk_lookup_subtree(const char *name,
291 struct clk_core *core)
292{
293 struct clk_core *child;
294 struct clk_core *ret;
295
296 if (!strcmp(core->name, name))
297 return core;
298
299 hlist_for_each_entry(child, &core->children, child_node) {
300 ret = __clk_lookup_subtree(name, child);
301 if (ret)
302 return ret;
303 }
304
305 return NULL;
306}
307
308static struct clk_core *clk_core_lookup(const char *name)
309{
310 struct clk_core *root_clk;
311 struct clk_core *ret;
312
313 if (!name)
314 return NULL;
315
316
317 hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
318 ret = __clk_lookup_subtree(name, root_clk);
319 if (ret)
320 return ret;
321 }
322
323
324 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
325 ret = __clk_lookup_subtree(name, root_clk);
326 if (ret)
327 return ret;
328 }
329
330 return NULL;
331}
332
333#ifdef CONFIG_OF
334static int of_parse_clkspec(const struct device_node *np, int index,
335 const char *name, struct of_phandle_args *out_args);
336static struct clk_hw *
337of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec);
338#else
339static inline int of_parse_clkspec(const struct device_node *np, int index,
340 const char *name,
341 struct of_phandle_args *out_args)
342{
343 return -ENOENT;
344}
345static inline struct clk_hw *
346of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
347{
348 return ERR_PTR(-ENOENT);
349}
350#endif
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
389{
390 const char *name = core->parents[p_index].fw_name;
391 int index = core->parents[p_index].index;
392 struct clk_hw *hw = ERR_PTR(-ENOENT);
393 struct device *dev = core->dev;
394 const char *dev_id = dev ? dev_name(dev) : NULL;
395 struct device_node *np = core->of_node;
396 struct of_phandle_args clkspec;
397
398 if (np && (name || index >= 0) &&
399 !of_parse_clkspec(np, index, name, &clkspec)) {
400 hw = of_clk_get_hw_from_clkspec(&clkspec);
401 of_node_put(clkspec.np);
402 } else if (name) {
403
404
405
406
407 hw = clk_find_hw(dev_id, name);
408 }
409
410 if (IS_ERR(hw))
411 return ERR_CAST(hw);
412
413 return hw->core;
414}
415
416static void clk_core_fill_parent_index(struct clk_core *core, u8 index)
417{
418 struct clk_parent_map *entry = &core->parents[index];
419 struct clk_core *parent = ERR_PTR(-ENOENT);
420
421 if (entry->hw) {
422 parent = entry->hw->core;
423
424
425
426
427
428 if (!parent)
429 parent = ERR_PTR(-EPROBE_DEFER);
430 } else {
431 parent = clk_core_get(core, index);
432 if (PTR_ERR(parent) == -ENOENT && entry->name)
433 parent = clk_core_lookup(entry->name);
434 }
435
436
437 if (!IS_ERR(parent))
438 entry->core = parent;
439}
440
441static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
442 u8 index)
443{
444 if (!core || index >= core->num_parents || !core->parents)
445 return NULL;
446
447 if (!core->parents[index].core)
448 clk_core_fill_parent_index(core, index);
449
450 return core->parents[index].core;
451}
452
453struct clk_hw *
454clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index)
455{
456 struct clk_core *parent;
457
458 parent = clk_core_get_parent_by_index(hw->core, index);
459
460 return !parent ? NULL : parent->hw;
461}
462EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index);
463
464unsigned int __clk_get_enable_count(struct clk *clk)
465{
466 return !clk ? 0 : clk->core->enable_count;
467}
468
469static unsigned long clk_core_get_rate_nolock(struct clk_core *core)
470{
471 if (!core)
472 return 0;
473
474 if (!core->num_parents || core->parent)
475 return core->rate;
476
477
478
479
480
481
482 return 0;
483}
484
485unsigned long clk_hw_get_rate(const struct clk_hw *hw)
486{
487 return clk_core_get_rate_nolock(hw->core);
488}
489EXPORT_SYMBOL_GPL(clk_hw_get_rate);
490
491static unsigned long clk_core_get_accuracy_no_lock(struct clk_core *core)
492{
493 if (!core)
494 return 0;
495
496 return core->accuracy;
497}
498
499unsigned long __clk_get_flags(struct clk *clk)
500{
501 return !clk ? 0 : clk->core->flags;
502}
503EXPORT_SYMBOL_GPL(__clk_get_flags);
504
505unsigned long clk_hw_get_flags(const struct clk_hw *hw)
506{
507 return hw->core->flags;
508}
509EXPORT_SYMBOL_GPL(clk_hw_get_flags);
510
511bool clk_hw_is_prepared(const struct clk_hw *hw)
512{
513 return clk_core_is_prepared(hw->core);
514}
515EXPORT_SYMBOL_GPL(clk_hw_is_prepared);
516
517bool clk_hw_rate_is_protected(const struct clk_hw *hw)
518{
519 return clk_core_rate_is_protected(hw->core);
520}
521EXPORT_SYMBOL_GPL(clk_hw_rate_is_protected);
522
523bool clk_hw_is_enabled(const struct clk_hw *hw)
524{
525 return clk_core_is_enabled(hw->core);
526}
527EXPORT_SYMBOL_GPL(clk_hw_is_enabled);
528
529bool __clk_is_enabled(struct clk *clk)
530{
531 if (!clk)
532 return false;
533
534 return clk_core_is_enabled(clk->core);
535}
536EXPORT_SYMBOL_GPL(__clk_is_enabled);
537
538static bool mux_is_better_rate(unsigned long rate, unsigned long now,
539 unsigned long best, unsigned long flags)
540{
541 if (flags & CLK_MUX_ROUND_CLOSEST)
542 return abs(now - rate) < abs(best - rate);
543
544 return now <= rate && now > best;
545}
546
547int clk_mux_determine_rate_flags(struct clk_hw *hw,
548 struct clk_rate_request *req,
549 unsigned long flags)
550{
551 struct clk_core *core = hw->core, *parent, *best_parent = NULL;
552 int i, num_parents, ret;
553 unsigned long best = 0;
554 struct clk_rate_request parent_req = *req;
555
556
557 if (core->flags & CLK_SET_RATE_NO_REPARENT) {
558 parent = core->parent;
559 if (core->flags & CLK_SET_RATE_PARENT) {
560 ret = __clk_determine_rate(parent ? parent->hw : NULL,
561 &parent_req);
562 if (ret)
563 return ret;
564
565 best = parent_req.rate;
566 } else if (parent) {
567 best = clk_core_get_rate_nolock(parent);
568 } else {
569 best = clk_core_get_rate_nolock(core);
570 }
571
572 goto out;
573 }
574
575
576 num_parents = core->num_parents;
577 for (i = 0; i < num_parents; i++) {
578 parent = clk_core_get_parent_by_index(core, i);
579 if (!parent)
580 continue;
581
582 if (core->flags & CLK_SET_RATE_PARENT) {
583 parent_req = *req;
584 ret = __clk_determine_rate(parent->hw, &parent_req);
585 if (ret)
586 continue;
587 } else {
588 parent_req.rate = clk_core_get_rate_nolock(parent);
589 }
590
591 if (mux_is_better_rate(req->rate, parent_req.rate,
592 best, flags)) {
593 best_parent = parent;
594 best = parent_req.rate;
595 }
596 }
597
598 if (!best_parent)
599 return -EINVAL;
600
601out:
602 if (best_parent)
603 req->best_parent_hw = best_parent->hw;
604 req->best_parent_rate = best;
605 req->rate = best;
606
607 return 0;
608}
609EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags);
610
611struct clk *__clk_lookup(const char *name)
612{
613 struct clk_core *core = clk_core_lookup(name);
614
615 return !core ? NULL : core->hw->clk;
616}
617
618static void clk_core_get_boundaries(struct clk_core *core,
619 unsigned long *min_rate,
620 unsigned long *max_rate)
621{
622 struct clk *clk_user;
623
624 lockdep_assert_held(&prepare_lock);
625
626 *min_rate = core->min_rate;
627 *max_rate = core->max_rate;
628
629 hlist_for_each_entry(clk_user, &core->clks, clks_node)
630 *min_rate = max(*min_rate, clk_user->min_rate);
631
632 hlist_for_each_entry(clk_user, &core->clks, clks_node)
633 *max_rate = min(*max_rate, clk_user->max_rate);
634}
635
636void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
637 unsigned long max_rate)
638{
639 hw->core->min_rate = min_rate;
640 hw->core->max_rate = max_rate;
641}
642EXPORT_SYMBOL_GPL(clk_hw_set_rate_range);
643
644
645
646
647
648
649
650
651
652
653
654
655int __clk_mux_determine_rate(struct clk_hw *hw,
656 struct clk_rate_request *req)
657{
658 return clk_mux_determine_rate_flags(hw, req, 0);
659}
660EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
661
662int __clk_mux_determine_rate_closest(struct clk_hw *hw,
663 struct clk_rate_request *req)
664{
665 return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST);
666}
667EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
668
669
670
671static void clk_core_rate_unprotect(struct clk_core *core)
672{
673 lockdep_assert_held(&prepare_lock);
674
675 if (!core)
676 return;
677
678 if (WARN(core->protect_count == 0,
679 "%s already unprotected\n", core->name))
680 return;
681
682 if (--core->protect_count > 0)
683 return;
684
685 clk_core_rate_unprotect(core->parent);
686}
687
688static int clk_core_rate_nuke_protect(struct clk_core *core)
689{
690 int ret;
691
692 lockdep_assert_held(&prepare_lock);
693
694 if (!core)
695 return -EINVAL;
696
697 if (core->protect_count == 0)
698 return 0;
699
700 ret = core->protect_count;
701 core->protect_count = 1;
702 clk_core_rate_unprotect(core);
703
704 return ret;
705}
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725void clk_rate_exclusive_put(struct clk *clk)
726{
727 if (!clk)
728 return;
729
730 clk_prepare_lock();
731
732
733
734
735
736 if (WARN_ON(clk->exclusive_count <= 0))
737 goto out;
738
739 clk_core_rate_unprotect(clk->core);
740 clk->exclusive_count--;
741out:
742 clk_prepare_unlock();
743}
744EXPORT_SYMBOL_GPL(clk_rate_exclusive_put);
745
746static void clk_core_rate_protect(struct clk_core *core)
747{
748 lockdep_assert_held(&prepare_lock);
749
750 if (!core)
751 return;
752
753 if (core->protect_count == 0)
754 clk_core_rate_protect(core->parent);
755
756 core->protect_count++;
757}
758
759static void clk_core_rate_restore_protect(struct clk_core *core, int count)
760{
761 lockdep_assert_held(&prepare_lock);
762
763 if (!core)
764 return;
765
766 if (count == 0)
767 return;
768
769 clk_core_rate_protect(core);
770 core->protect_count = count;
771}
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791int clk_rate_exclusive_get(struct clk *clk)
792{
793 if (!clk)
794 return 0;
795
796 clk_prepare_lock();
797 clk_core_rate_protect(clk->core);
798 clk->exclusive_count++;
799 clk_prepare_unlock();
800
801 return 0;
802}
803EXPORT_SYMBOL_GPL(clk_rate_exclusive_get);
804
805static void clk_core_unprepare(struct clk_core *core)
806{
807 lockdep_assert_held(&prepare_lock);
808
809 if (!core)
810 return;
811
812 if (WARN(core->prepare_count == 0,
813 "%s already unprepared\n", core->name))
814 return;
815
816 if (WARN(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL,
817 "Unpreparing critical %s\n", core->name))
818 return;
819
820 if (core->flags & CLK_SET_RATE_GATE)
821 clk_core_rate_unprotect(core);
822
823 if (--core->prepare_count > 0)
824 return;
825
826 WARN(core->enable_count > 0, "Unpreparing enabled %s\n", core->name);
827
828 trace_clk_unprepare(core);
829
830 if (core->ops->unprepare)
831 core->ops->unprepare(core->hw);
832
833 clk_pm_runtime_put(core);
834
835 trace_clk_unprepare_complete(core);
836 clk_core_unprepare(core->parent);
837}
838
839static void clk_core_unprepare_lock(struct clk_core *core)
840{
841 clk_prepare_lock();
842 clk_core_unprepare(core);
843 clk_prepare_unlock();
844}
845
846
847
848
849
850
851
852
853
854
855
856
857void clk_unprepare(struct clk *clk)
858{
859 if (IS_ERR_OR_NULL(clk))
860 return;
861
862 clk_core_unprepare_lock(clk->core);
863}
864EXPORT_SYMBOL_GPL(clk_unprepare);
865
866static int clk_core_prepare(struct clk_core *core)
867{
868 int ret = 0;
869
870 lockdep_assert_held(&prepare_lock);
871
872 if (!core)
873 return 0;
874
875 if (core->prepare_count == 0) {
876 ret = clk_pm_runtime_get(core);
877 if (ret)
878 return ret;
879
880 ret = clk_core_prepare(core->parent);
881 if (ret)
882 goto runtime_put;
883
884 trace_clk_prepare(core);
885
886 if (core->ops->prepare)
887 ret = core->ops->prepare(core->hw);
888
889 trace_clk_prepare_complete(core);
890
891 if (ret)
892 goto unprepare;
893 }
894
895 core->prepare_count++;
896
897
898
899
900
901
902
903
904 if (core->flags & CLK_SET_RATE_GATE)
905 clk_core_rate_protect(core);
906
907 return 0;
908unprepare:
909 clk_core_unprepare(core->parent);
910runtime_put:
911 clk_pm_runtime_put(core);
912 return ret;
913}
914
915static int clk_core_prepare_lock(struct clk_core *core)
916{
917 int ret;
918
919 clk_prepare_lock();
920 ret = clk_core_prepare(core);
921 clk_prepare_unlock();
922
923 return ret;
924}
925
926
927
928
929
930
931
932
933
934
935
936
937
938int clk_prepare(struct clk *clk)
939{
940 if (!clk)
941 return 0;
942
943 return clk_core_prepare_lock(clk->core);
944}
945EXPORT_SYMBOL_GPL(clk_prepare);
946
947static void clk_core_disable(struct clk_core *core)
948{
949 lockdep_assert_held(&enable_lock);
950
951 if (!core)
952 return;
953
954 if (WARN(core->enable_count == 0, "%s already disabled\n", core->name))
955 return;
956
957 if (WARN(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL,
958 "Disabling critical %s\n", core->name))
959 return;
960
961 if (--core->enable_count > 0)
962 return;
963
964 trace_clk_disable_rcuidle(core);
965
966 if (core->ops->disable)
967 core->ops->disable(core->hw);
968
969 trace_clk_disable_complete_rcuidle(core);
970
971 clk_core_disable(core->parent);
972}
973
974static void clk_core_disable_lock(struct clk_core *core)
975{
976 unsigned long flags;
977
978 flags = clk_enable_lock();
979 clk_core_disable(core);
980 clk_enable_unlock(flags);
981}
982
983
984
985
986
987
988
989
990
991
992
993
994
995void clk_disable(struct clk *clk)
996{
997 if (IS_ERR_OR_NULL(clk))
998 return;
999
1000 clk_core_disable_lock(clk->core);
1001}
1002EXPORT_SYMBOL_GPL(clk_disable);
1003
1004static int clk_core_enable(struct clk_core *core)
1005{
1006 int ret = 0;
1007
1008 lockdep_assert_held(&enable_lock);
1009
1010 if (!core)
1011 return 0;
1012
1013 if (WARN(core->prepare_count == 0,
1014 "Enabling unprepared %s\n", core->name))
1015 return -ESHUTDOWN;
1016
1017 if (core->enable_count == 0) {
1018 ret = clk_core_enable(core->parent);
1019
1020 if (ret)
1021 return ret;
1022
1023 trace_clk_enable_rcuidle(core);
1024
1025 if (core->ops->enable)
1026 ret = core->ops->enable(core->hw);
1027
1028 trace_clk_enable_complete_rcuidle(core);
1029
1030 if (ret) {
1031 clk_core_disable(core->parent);
1032 return ret;
1033 }
1034 }
1035
1036 core->enable_count++;
1037 return 0;
1038}
1039
1040static int clk_core_enable_lock(struct clk_core *core)
1041{
1042 unsigned long flags;
1043 int ret;
1044
1045 flags = clk_enable_lock();
1046 ret = clk_core_enable(core);
1047 clk_enable_unlock(flags);
1048
1049 return ret;
1050}
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062void clk_gate_restore_context(struct clk_hw *hw)
1063{
1064 struct clk_core *core = hw->core;
1065
1066 if (core->enable_count)
1067 core->ops->enable(hw);
1068 else
1069 core->ops->disable(hw);
1070}
1071EXPORT_SYMBOL_GPL(clk_gate_restore_context);
1072
1073static int clk_core_save_context(struct clk_core *core)
1074{
1075 struct clk_core *child;
1076 int ret = 0;
1077
1078 hlist_for_each_entry(child, &core->children, child_node) {
1079 ret = clk_core_save_context(child);
1080 if (ret < 0)
1081 return ret;
1082 }
1083
1084 if (core->ops && core->ops->save_context)
1085 ret = core->ops->save_context(core->hw);
1086
1087 return ret;
1088}
1089
1090static void clk_core_restore_context(struct clk_core *core)
1091{
1092 struct clk_core *child;
1093
1094 if (core->ops && core->ops->restore_context)
1095 core->ops->restore_context(core->hw);
1096
1097 hlist_for_each_entry(child, &core->children, child_node)
1098 clk_core_restore_context(child);
1099}
1100
1101
1102
1103
1104
1105
1106
1107
1108int clk_save_context(void)
1109{
1110 struct clk_core *clk;
1111 int ret;
1112
1113 hlist_for_each_entry(clk, &clk_root_list, child_node) {
1114 ret = clk_core_save_context(clk);
1115 if (ret < 0)
1116 return ret;
1117 }
1118
1119 hlist_for_each_entry(clk, &clk_orphan_list, child_node) {
1120 ret = clk_core_save_context(clk);
1121 if (ret < 0)
1122 return ret;
1123 }
1124
1125 return 0;
1126}
1127EXPORT_SYMBOL_GPL(clk_save_context);
1128
1129
1130
1131
1132
1133
1134
1135void clk_restore_context(void)
1136{
1137 struct clk_core *core;
1138
1139 hlist_for_each_entry(core, &clk_root_list, child_node)
1140 clk_core_restore_context(core);
1141
1142 hlist_for_each_entry(core, &clk_orphan_list, child_node)
1143 clk_core_restore_context(core);
1144}
1145EXPORT_SYMBOL_GPL(clk_restore_context);
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160int clk_enable(struct clk *clk)
1161{
1162 if (!clk)
1163 return 0;
1164
1165 return clk_core_enable_lock(clk->core);
1166}
1167EXPORT_SYMBOL_GPL(clk_enable);
1168
1169static int clk_core_prepare_enable(struct clk_core *core)
1170{
1171 int ret;
1172
1173 ret = clk_core_prepare_lock(core);
1174 if (ret)
1175 return ret;
1176
1177 ret = clk_core_enable_lock(core);
1178 if (ret)
1179 clk_core_unprepare_lock(core);
1180
1181 return ret;
1182}
1183
1184static void clk_core_disable_unprepare(struct clk_core *core)
1185{
1186 clk_core_disable_lock(core);
1187 clk_core_unprepare_lock(core);
1188}
1189
1190static void __init clk_unprepare_unused_subtree(struct clk_core *core)
1191{
1192 struct clk_core *child;
1193
1194 lockdep_assert_held(&prepare_lock);
1195
1196 hlist_for_each_entry(child, &core->children, child_node)
1197 clk_unprepare_unused_subtree(child);
1198
1199 if (core->prepare_count)
1200 return;
1201
1202 if (core->flags & CLK_IGNORE_UNUSED)
1203 return;
1204
1205 if (clk_pm_runtime_get(core))
1206 return;
1207
1208 if (clk_core_is_prepared(core)) {
1209 trace_clk_unprepare(core);
1210 if (core->ops->unprepare_unused)
1211 core->ops->unprepare_unused(core->hw);
1212 else if (core->ops->unprepare)
1213 core->ops->unprepare(core->hw);
1214 trace_clk_unprepare_complete(core);
1215 }
1216
1217 clk_pm_runtime_put(core);
1218}
1219
1220static void __init clk_disable_unused_subtree(struct clk_core *core)
1221{
1222 struct clk_core *child;
1223 unsigned long flags;
1224
1225 lockdep_assert_held(&prepare_lock);
1226
1227 hlist_for_each_entry(child, &core->children, child_node)
1228 clk_disable_unused_subtree(child);
1229
1230 if (core->flags & CLK_OPS_PARENT_ENABLE)
1231 clk_core_prepare_enable(core->parent);
1232
1233 if (clk_pm_runtime_get(core))
1234 goto unprepare_out;
1235
1236 flags = clk_enable_lock();
1237
1238 if (core->enable_count)
1239 goto unlock_out;
1240
1241 if (core->flags & CLK_IGNORE_UNUSED)
1242 goto unlock_out;
1243
1244
1245
1246
1247
1248
1249 if (clk_core_is_enabled(core)) {
1250 trace_clk_disable(core);
1251 if (core->ops->disable_unused)
1252 core->ops->disable_unused(core->hw);
1253 else if (core->ops->disable)
1254 core->ops->disable(core->hw);
1255 trace_clk_disable_complete(core);
1256 }
1257
1258unlock_out:
1259 clk_enable_unlock(flags);
1260 clk_pm_runtime_put(core);
1261unprepare_out:
1262 if (core->flags & CLK_OPS_PARENT_ENABLE)
1263 clk_core_disable_unprepare(core->parent);
1264}
1265
1266static bool clk_ignore_unused __initdata;
1267static int __init clk_ignore_unused_setup(char *__unused)
1268{
1269 clk_ignore_unused = true;
1270 return 1;
1271}
1272__setup("clk_ignore_unused", clk_ignore_unused_setup);
1273
1274static int __init clk_disable_unused(void)
1275{
1276 struct clk_core *core;
1277
1278 if (clk_ignore_unused) {
1279 pr_warn("clk: Not disabling unused clocks\n");
1280 return 0;
1281 }
1282
1283 clk_prepare_lock();
1284
1285 hlist_for_each_entry(core, &clk_root_list, child_node)
1286 clk_disable_unused_subtree(core);
1287
1288 hlist_for_each_entry(core, &clk_orphan_list, child_node)
1289 clk_disable_unused_subtree(core);
1290
1291 hlist_for_each_entry(core, &clk_root_list, child_node)
1292 clk_unprepare_unused_subtree(core);
1293
1294 hlist_for_each_entry(core, &clk_orphan_list, child_node)
1295 clk_unprepare_unused_subtree(core);
1296
1297 clk_prepare_unlock();
1298
1299 return 0;
1300}
1301late_initcall_sync(clk_disable_unused);
1302
1303static int clk_core_determine_round_nolock(struct clk_core *core,
1304 struct clk_rate_request *req)
1305{
1306 long rate;
1307
1308 lockdep_assert_held(&prepare_lock);
1309
1310 if (!core)
1311 return 0;
1312
1313
1314
1315
1316
1317
1318
1319 if (clk_core_rate_is_protected(core)) {
1320 req->rate = core->rate;
1321 } else if (core->ops->determine_rate) {
1322 return core->ops->determine_rate(core->hw, req);
1323 } else if (core->ops->round_rate) {
1324 rate = core->ops->round_rate(core->hw, req->rate,
1325 &req->best_parent_rate);
1326 if (rate < 0)
1327 return rate;
1328
1329 req->rate = rate;
1330 } else {
1331 return -EINVAL;
1332 }
1333
1334 return 0;
1335}
1336
1337static void clk_core_init_rate_req(struct clk_core * const core,
1338 struct clk_rate_request *req)
1339{
1340 struct clk_core *parent;
1341
1342 if (WARN_ON(!core || !req))
1343 return;
1344
1345 parent = core->parent;
1346 if (parent) {
1347 req->best_parent_hw = parent->hw;
1348 req->best_parent_rate = parent->rate;
1349 } else {
1350 req->best_parent_hw = NULL;
1351 req->best_parent_rate = 0;
1352 }
1353}
1354
1355static bool clk_core_can_round(struct clk_core * const core)
1356{
1357 return core->ops->determine_rate || core->ops->round_rate;
1358}
1359
1360static int clk_core_round_rate_nolock(struct clk_core *core,
1361 struct clk_rate_request *req)
1362{
1363 lockdep_assert_held(&prepare_lock);
1364
1365 if (!core) {
1366 req->rate = 0;
1367 return 0;
1368 }
1369
1370 clk_core_init_rate_req(core, req);
1371
1372 if (clk_core_can_round(core))
1373 return clk_core_determine_round_nolock(core, req);
1374 else if (core->flags & CLK_SET_RATE_PARENT)
1375 return clk_core_round_rate_nolock(core->parent, req);
1376
1377 req->rate = core->rate;
1378 return 0;
1379}
1380
1381
1382
1383
1384
1385
1386
1387
1388int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
1389{
1390 if (!hw) {
1391 req->rate = 0;
1392 return 0;
1393 }
1394
1395 return clk_core_round_rate_nolock(hw->core, req);
1396}
1397EXPORT_SYMBOL_GPL(__clk_determine_rate);
1398
1399unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate)
1400{
1401 int ret;
1402 struct clk_rate_request req;
1403
1404 clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate);
1405 req.rate = rate;
1406
1407 ret = clk_core_round_rate_nolock(hw->core, &req);
1408 if (ret)
1409 return 0;
1410
1411 return req.rate;
1412}
1413EXPORT_SYMBOL_GPL(clk_hw_round_rate);
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424long clk_round_rate(struct clk *clk, unsigned long rate)
1425{
1426 struct clk_rate_request req;
1427 int ret;
1428
1429 if (!clk)
1430 return 0;
1431
1432 clk_prepare_lock();
1433
1434 if (clk->exclusive_count)
1435 clk_core_rate_unprotect(clk->core);
1436
1437 clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate);
1438 req.rate = rate;
1439
1440 ret = clk_core_round_rate_nolock(clk->core, &req);
1441
1442 if (clk->exclusive_count)
1443 clk_core_rate_protect(clk->core);
1444
1445 clk_prepare_unlock();
1446
1447 if (ret)
1448 return ret;
1449
1450 return req.rate;
1451}
1452EXPORT_SYMBOL_GPL(clk_round_rate);
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468static int __clk_notify(struct clk_core *core, unsigned long msg,
1469 unsigned long old_rate, unsigned long new_rate)
1470{
1471 struct clk_notifier *cn;
1472 struct clk_notifier_data cnd;
1473 int ret = NOTIFY_DONE;
1474
1475 cnd.old_rate = old_rate;
1476 cnd.new_rate = new_rate;
1477
1478 list_for_each_entry(cn, &clk_notifier_list, node) {
1479 if (cn->clk->core == core) {
1480 cnd.clk = cn->clk;
1481 ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
1482 &cnd);
1483 if (ret & NOTIFY_STOP_MASK)
1484 return ret;
1485 }
1486 }
1487
1488 return ret;
1489}
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500static void __clk_recalc_accuracies(struct clk_core *core)
1501{
1502 unsigned long parent_accuracy = 0;
1503 struct clk_core *child;
1504
1505 lockdep_assert_held(&prepare_lock);
1506
1507 if (core->parent)
1508 parent_accuracy = core->parent->accuracy;
1509
1510 if (core->ops->recalc_accuracy)
1511 core->accuracy = core->ops->recalc_accuracy(core->hw,
1512 parent_accuracy);
1513 else
1514 core->accuracy = parent_accuracy;
1515
1516 hlist_for_each_entry(child, &core->children, child_node)
1517 __clk_recalc_accuracies(child);
1518}
1519
1520static long clk_core_get_accuracy_recalc(struct clk_core *core)
1521{
1522 if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE))
1523 __clk_recalc_accuracies(core);
1524
1525 return clk_core_get_accuracy_no_lock(core);
1526}
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537long clk_get_accuracy(struct clk *clk)
1538{
1539 long accuracy;
1540
1541 if (!clk)
1542 return 0;
1543
1544 clk_prepare_lock();
1545 accuracy = clk_core_get_accuracy_recalc(clk->core);
1546 clk_prepare_unlock();
1547
1548 return accuracy;
1549}
1550EXPORT_SYMBOL_GPL(clk_get_accuracy);
1551
1552static unsigned long clk_recalc(struct clk_core *core,
1553 unsigned long parent_rate)
1554{
1555 unsigned long rate = parent_rate;
1556
1557 if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) {
1558 rate = core->ops->recalc_rate(core->hw, parent_rate);
1559 clk_pm_runtime_put(core);
1560 }
1561 return rate;
1562}
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
1577{
1578 unsigned long old_rate;
1579 unsigned long parent_rate = 0;
1580 struct clk_core *child;
1581
1582 lockdep_assert_held(&prepare_lock);
1583
1584 old_rate = core->rate;
1585
1586 if (core->parent)
1587 parent_rate = core->parent->rate;
1588
1589 core->rate = clk_recalc(core, parent_rate);
1590
1591
1592
1593
1594
1595 if (core->notifier_count && msg)
1596 __clk_notify(core, msg, old_rate, core->rate);
1597
1598 hlist_for_each_entry(child, &core->children, child_node)
1599 __clk_recalc_rates(child, msg);
1600}
1601
1602static unsigned long clk_core_get_rate_recalc(struct clk_core *core)
1603{
1604 if (core && (core->flags & CLK_GET_RATE_NOCACHE))
1605 __clk_recalc_rates(core, 0);
1606
1607 return clk_core_get_rate_nolock(core);
1608}
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618unsigned long clk_get_rate(struct clk *clk)
1619{
1620 unsigned long rate;
1621
1622 if (!clk)
1623 return 0;
1624
1625 clk_prepare_lock();
1626 rate = clk_core_get_rate_recalc(clk->core);
1627 clk_prepare_unlock();
1628
1629 return rate;
1630}
1631EXPORT_SYMBOL_GPL(clk_get_rate);
1632
1633static int clk_fetch_parent_index(struct clk_core *core,
1634 struct clk_core *parent)
1635{
1636 int i;
1637
1638 if (!parent)
1639 return -EINVAL;
1640
1641 for (i = 0; i < core->num_parents; i++) {
1642
1643 if (core->parents[i].core == parent)
1644 return i;
1645
1646
1647 if (core->parents[i].core)
1648 continue;
1649
1650
1651 if (core->parents[i].hw) {
1652 if (core->parents[i].hw == parent->hw)
1653 break;
1654
1655
1656 continue;
1657 }
1658
1659
1660 if (parent == clk_core_get(core, i))
1661 break;
1662
1663
1664 if (core->parents[i].name &&
1665 !strcmp(parent->name, core->parents[i].name))
1666 break;
1667 }
1668
1669 if (i == core->num_parents)
1670 return -EINVAL;
1671
1672 core->parents[i].core = parent;
1673 return i;
1674}
1675
1676
1677
1678
1679
1680
1681
1682
1683int clk_hw_get_parent_index(struct clk_hw *hw)
1684{
1685 struct clk_hw *parent = clk_hw_get_parent(hw);
1686
1687 if (WARN_ON(parent == NULL))
1688 return -EINVAL;
1689
1690 return clk_fetch_parent_index(hw->core, parent->core);
1691}
1692EXPORT_SYMBOL_GPL(clk_hw_get_parent_index);
1693
1694
1695
1696
1697static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan)
1698{
1699 struct clk_core *child;
1700
1701 core->orphan = is_orphan;
1702
1703 hlist_for_each_entry(child, &core->children, child_node)
1704 clk_core_update_orphan_status(child, is_orphan);
1705}
1706
1707static void clk_reparent(struct clk_core *core, struct clk_core *new_parent)
1708{
1709 bool was_orphan = core->orphan;
1710
1711 hlist_del(&core->child_node);
1712
1713 if (new_parent) {
1714 bool becomes_orphan = new_parent->orphan;
1715
1716
1717 if (new_parent->new_child == core)
1718 new_parent->new_child = NULL;
1719
1720 hlist_add_head(&core->child_node, &new_parent->children);
1721
1722 if (was_orphan != becomes_orphan)
1723 clk_core_update_orphan_status(core, becomes_orphan);
1724 } else {
1725 hlist_add_head(&core->child_node, &clk_orphan_list);
1726 if (!was_orphan)
1727 clk_core_update_orphan_status(core, true);
1728 }
1729
1730 core->parent = new_parent;
1731}
1732
1733static struct clk_core *__clk_set_parent_before(struct clk_core *core,
1734 struct clk_core *parent)
1735{
1736 unsigned long flags;
1737 struct clk_core *old_parent = core->parent;
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760 if (core->flags & CLK_OPS_PARENT_ENABLE) {
1761 clk_core_prepare_enable(old_parent);
1762 clk_core_prepare_enable(parent);
1763 }
1764
1765
1766 if (core->prepare_count) {
1767 clk_core_prepare_enable(parent);
1768 clk_core_enable_lock(core);
1769 }
1770
1771
1772 flags = clk_enable_lock();
1773 clk_reparent(core, parent);
1774 clk_enable_unlock(flags);
1775
1776 return old_parent;
1777}
1778
1779static void __clk_set_parent_after(struct clk_core *core,
1780 struct clk_core *parent,
1781 struct clk_core *old_parent)
1782{
1783
1784
1785
1786
1787 if (core->prepare_count) {
1788 clk_core_disable_lock(core);
1789 clk_core_disable_unprepare(old_parent);
1790 }
1791
1792
1793 if (core->flags & CLK_OPS_PARENT_ENABLE) {
1794 clk_core_disable_unprepare(parent);
1795 clk_core_disable_unprepare(old_parent);
1796 }
1797}
1798
1799static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
1800 u8 p_index)
1801{
1802 unsigned long flags;
1803 int ret = 0;
1804 struct clk_core *old_parent;
1805
1806 old_parent = __clk_set_parent_before(core, parent);
1807
1808 trace_clk_set_parent(core, parent);
1809
1810
1811 if (parent && core->ops->set_parent)
1812 ret = core->ops->set_parent(core->hw, p_index);
1813
1814 trace_clk_set_parent_complete(core, parent);
1815
1816 if (ret) {
1817 flags = clk_enable_lock();
1818 clk_reparent(core, old_parent);
1819 clk_enable_unlock(flags);
1820 __clk_set_parent_after(core, old_parent, parent);
1821
1822 return ret;
1823 }
1824
1825 __clk_set_parent_after(core, parent, old_parent);
1826
1827 return 0;
1828}
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844static int __clk_speculate_rates(struct clk_core *core,
1845 unsigned long parent_rate)
1846{
1847 struct clk_core *child;
1848 unsigned long new_rate;
1849 int ret = NOTIFY_DONE;
1850
1851 lockdep_assert_held(&prepare_lock);
1852
1853 new_rate = clk_recalc(core, parent_rate);
1854
1855
1856 if (core->notifier_count)
1857 ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate);
1858
1859 if (ret & NOTIFY_STOP_MASK) {
1860 pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
1861 __func__, core->name, ret);
1862 goto out;
1863 }
1864
1865 hlist_for_each_entry(child, &core->children, child_node) {
1866 ret = __clk_speculate_rates(child, new_rate);
1867 if (ret & NOTIFY_STOP_MASK)
1868 break;
1869 }
1870
1871out:
1872 return ret;
1873}
1874
1875static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
1876 struct clk_core *new_parent, u8 p_index)
1877{
1878 struct clk_core *child;
1879
1880 core->new_rate = new_rate;
1881 core->new_parent = new_parent;
1882 core->new_parent_index = p_index;
1883
1884 core->new_child = NULL;
1885 if (new_parent && new_parent != core->parent)
1886 new_parent->new_child = core;
1887
1888 hlist_for_each_entry(child, &core->children, child_node) {
1889 child->new_rate = clk_recalc(child, new_rate);
1890 clk_calc_subtree(child, child->new_rate, NULL, 0);
1891 }
1892}
1893
1894
1895
1896
1897
1898static struct clk_core *clk_calc_new_rates(struct clk_core *core,
1899 unsigned long rate)
1900{
1901 struct clk_core *top = core;
1902 struct clk_core *old_parent, *parent;
1903 unsigned long best_parent_rate = 0;
1904 unsigned long new_rate;
1905 unsigned long min_rate;
1906 unsigned long max_rate;
1907 int p_index = 0;
1908 long ret;
1909
1910
1911 if (IS_ERR_OR_NULL(core))
1912 return NULL;
1913
1914
1915 parent = old_parent = core->parent;
1916 if (parent)
1917 best_parent_rate = parent->rate;
1918
1919 clk_core_get_boundaries(core, &min_rate, &max_rate);
1920
1921
1922 if (clk_core_can_round(core)) {
1923 struct clk_rate_request req;
1924
1925 req.rate = rate;
1926 req.min_rate = min_rate;
1927 req.max_rate = max_rate;
1928
1929 clk_core_init_rate_req(core, &req);
1930
1931 ret = clk_core_determine_round_nolock(core, &req);
1932 if (ret < 0)
1933 return NULL;
1934
1935 best_parent_rate = req.best_parent_rate;
1936 new_rate = req.rate;
1937 parent = req.best_parent_hw ? req.best_parent_hw->core : NULL;
1938
1939 if (new_rate < min_rate || new_rate > max_rate)
1940 return NULL;
1941 } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) {
1942
1943 core->new_rate = core->rate;
1944 return NULL;
1945 } else {
1946
1947 top = clk_calc_new_rates(parent, rate);
1948 new_rate = parent->new_rate;
1949 goto out;
1950 }
1951
1952
1953 if (parent != old_parent &&
1954 (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
1955 pr_debug("%s: %s not gated but wants to reparent\n",
1956 __func__, core->name);
1957 return NULL;
1958 }
1959
1960
1961 if (parent && core->num_parents > 1) {
1962 p_index = clk_fetch_parent_index(core, parent);
1963 if (p_index < 0) {
1964 pr_debug("%s: clk %s can not be parent of clk %s\n",
1965 __func__, parent->name, core->name);
1966 return NULL;
1967 }
1968 }
1969
1970 if ((core->flags & CLK_SET_RATE_PARENT) && parent &&
1971 best_parent_rate != parent->rate)
1972 top = clk_calc_new_rates(parent, best_parent_rate);
1973
1974out:
1975 clk_calc_subtree(core, new_rate, parent, p_index);
1976
1977 return top;
1978}
1979
1980
1981
1982
1983
1984
1985static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
1986 unsigned long event)
1987{
1988 struct clk_core *child, *tmp_clk, *fail_clk = NULL;
1989 int ret = NOTIFY_DONE;
1990
1991 if (core->rate == core->new_rate)
1992 return NULL;
1993
1994 if (core->notifier_count) {
1995 ret = __clk_notify(core, event, core->rate, core->new_rate);
1996 if (ret & NOTIFY_STOP_MASK)
1997 fail_clk = core;
1998 }
1999
2000 hlist_for_each_entry(child, &core->children, child_node) {
2001
2002 if (child->new_parent && child->new_parent != core)
2003 continue;
2004 tmp_clk = clk_propagate_rate_change(child, event);
2005 if (tmp_clk)
2006 fail_clk = tmp_clk;
2007 }
2008
2009
2010 if (core->new_child) {
2011 tmp_clk = clk_propagate_rate_change(core->new_child, event);
2012 if (tmp_clk)
2013 fail_clk = tmp_clk;
2014 }
2015
2016 return fail_clk;
2017}
2018
2019
2020
2021
2022
2023static void clk_change_rate(struct clk_core *core)
2024{
2025 struct clk_core *child;
2026 struct hlist_node *tmp;
2027 unsigned long old_rate;
2028 unsigned long best_parent_rate = 0;
2029 bool skip_set_rate = false;
2030 struct clk_core *old_parent;
2031 struct clk_core *parent = NULL;
2032
2033 old_rate = core->rate;
2034
2035 if (core->new_parent) {
2036 parent = core->new_parent;
2037 best_parent_rate = core->new_parent->rate;
2038 } else if (core->parent) {
2039 parent = core->parent;
2040 best_parent_rate = core->parent->rate;
2041 }
2042
2043 if (clk_pm_runtime_get(core))
2044 return;
2045
2046 if (core->flags & CLK_SET_RATE_UNGATE) {
2047 unsigned long flags;
2048
2049 clk_core_prepare(core);
2050 flags = clk_enable_lock();
2051 clk_core_enable(core);
2052 clk_enable_unlock(flags);
2053 }
2054
2055 if (core->new_parent && core->new_parent != core->parent) {
2056 old_parent = __clk_set_parent_before(core, core->new_parent);
2057 trace_clk_set_parent(core, core->new_parent);
2058
2059 if (core->ops->set_rate_and_parent) {
2060 skip_set_rate = true;
2061 core->ops->set_rate_and_parent(core->hw, core->new_rate,
2062 best_parent_rate,
2063 core->new_parent_index);
2064 } else if (core->ops->set_parent) {
2065 core->ops->set_parent(core->hw, core->new_parent_index);
2066 }
2067
2068 trace_clk_set_parent_complete(core, core->new_parent);
2069 __clk_set_parent_after(core, core->new_parent, old_parent);
2070 }
2071
2072 if (core->flags & CLK_OPS_PARENT_ENABLE)
2073 clk_core_prepare_enable(parent);
2074
2075 trace_clk_set_rate(core, core->new_rate);
2076
2077 if (!skip_set_rate && core->ops->set_rate)
2078 core->ops->set_rate(core->hw, core->new_rate, best_parent_rate);
2079
2080 trace_clk_set_rate_complete(core, core->new_rate);
2081
2082 core->rate = clk_recalc(core, best_parent_rate);
2083
2084 if (core->flags & CLK_SET_RATE_UNGATE) {
2085 unsigned long flags;
2086
2087 flags = clk_enable_lock();
2088 clk_core_disable(core);
2089 clk_enable_unlock(flags);
2090 clk_core_unprepare(core);
2091 }
2092
2093 if (core->flags & CLK_OPS_PARENT_ENABLE)
2094 clk_core_disable_unprepare(parent);
2095
2096 if (core->notifier_count && old_rate != core->rate)
2097 __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate);
2098
2099 if (core->flags & CLK_RECALC_NEW_RATES)
2100 (void)clk_calc_new_rates(core, core->new_rate);
2101
2102
2103
2104
2105
2106 hlist_for_each_entry_safe(child, tmp, &core->children, child_node) {
2107
2108 if (child->new_parent && child->new_parent != core)
2109 continue;
2110 clk_change_rate(child);
2111 }
2112
2113
2114 if (core->new_child)
2115 clk_change_rate(core->new_child);
2116
2117 clk_pm_runtime_put(core);
2118}
2119
2120static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core,
2121 unsigned long req_rate)
2122{
2123 int ret, cnt;
2124 struct clk_rate_request req;
2125
2126 lockdep_assert_held(&prepare_lock);
2127
2128 if (!core)
2129 return 0;
2130
2131
2132 cnt = clk_core_rate_nuke_protect(core);
2133 if (cnt < 0)
2134 return cnt;
2135
2136 clk_core_get_boundaries(core, &req.min_rate, &req.max_rate);
2137 req.rate = req_rate;
2138
2139 ret = clk_core_round_rate_nolock(core, &req);
2140
2141
2142 clk_core_rate_restore_protect(core, cnt);
2143
2144 return ret ? 0 : req.rate;
2145}
2146
2147static int clk_core_set_rate_nolock(struct clk_core *core,
2148 unsigned long req_rate)
2149{
2150 struct clk_core *top, *fail_clk;
2151 unsigned long rate;
2152 int ret = 0;
2153
2154 if (!core)
2155 return 0;
2156
2157 rate = clk_core_req_round_rate_nolock(core, req_rate);
2158
2159
2160 if (rate == clk_core_get_rate_nolock(core))
2161 return 0;
2162
2163
2164 if (clk_core_rate_is_protected(core))
2165 return -EBUSY;
2166
2167
2168 top = clk_calc_new_rates(core, req_rate);
2169 if (!top)
2170 return -EINVAL;
2171
2172 ret = clk_pm_runtime_get(core);
2173 if (ret)
2174 return ret;
2175
2176
2177 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
2178 if (fail_clk) {
2179 pr_debug("%s: failed to set %s rate\n", __func__,
2180 fail_clk->name);
2181 clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
2182 ret = -EBUSY;
2183 goto err;
2184 }
2185
2186
2187 clk_change_rate(top);
2188
2189 core->req_rate = req_rate;
2190err:
2191 clk_pm_runtime_put(core);
2192
2193 return ret;
2194}
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217int clk_set_rate(struct clk *clk, unsigned long rate)
2218{
2219 int ret;
2220
2221 if (!clk)
2222 return 0;
2223
2224
2225 clk_prepare_lock();
2226
2227 if (clk->exclusive_count)
2228 clk_core_rate_unprotect(clk->core);
2229
2230 ret = clk_core_set_rate_nolock(clk->core, rate);
2231
2232 if (clk->exclusive_count)
2233 clk_core_rate_protect(clk->core);
2234
2235 clk_prepare_unlock();
2236
2237 return ret;
2238}
2239EXPORT_SYMBOL_GPL(clk_set_rate);
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
2261{
2262 int ret;
2263
2264 if (!clk)
2265 return 0;
2266
2267
2268 clk_prepare_lock();
2269
2270
2271
2272
2273
2274
2275
2276 ret = clk_core_set_rate_nolock(clk->core, rate);
2277 if (!ret) {
2278 clk_core_rate_protect(clk->core);
2279 clk->exclusive_count++;
2280 }
2281
2282 clk_prepare_unlock();
2283
2284 return ret;
2285}
2286EXPORT_SYMBOL_GPL(clk_set_rate_exclusive);
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
2297{
2298 int ret = 0;
2299 unsigned long old_min, old_max, rate;
2300
2301 if (!clk)
2302 return 0;
2303
2304 if (min > max) {
2305 pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
2306 __func__, clk->core->name, clk->dev_id, clk->con_id,
2307 min, max);
2308 return -EINVAL;
2309 }
2310
2311 clk_prepare_lock();
2312
2313 if (clk->exclusive_count)
2314 clk_core_rate_unprotect(clk->core);
2315
2316
2317 old_min = clk->min_rate;
2318 old_max = clk->max_rate;
2319 clk->min_rate = min;
2320 clk->max_rate = max;
2321
2322 rate = clk_core_get_rate_nolock(clk->core);
2323 if (rate < min || rate > max) {
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337 if (rate < min)
2338 rate = min;
2339 else
2340 rate = max;
2341
2342 ret = clk_core_set_rate_nolock(clk->core, rate);
2343 if (ret) {
2344
2345 clk->min_rate = old_min;
2346 clk->max_rate = old_max;
2347 }
2348 }
2349
2350 if (clk->exclusive_count)
2351 clk_core_rate_protect(clk->core);
2352
2353 clk_prepare_unlock();
2354
2355 return ret;
2356}
2357EXPORT_SYMBOL_GPL(clk_set_rate_range);
2358
2359
2360
2361
2362
2363
2364
2365
2366int clk_set_min_rate(struct clk *clk, unsigned long rate)
2367{
2368 if (!clk)
2369 return 0;
2370
2371 return clk_set_rate_range(clk, rate, clk->max_rate);
2372}
2373EXPORT_SYMBOL_GPL(clk_set_min_rate);
2374
2375
2376
2377
2378
2379
2380
2381
2382int clk_set_max_rate(struct clk *clk, unsigned long rate)
2383{
2384 if (!clk)
2385 return 0;
2386
2387 return clk_set_rate_range(clk, clk->min_rate, rate);
2388}
2389EXPORT_SYMBOL_GPL(clk_set_max_rate);
2390
2391
2392
2393
2394
2395
2396
2397struct clk *clk_get_parent(struct clk *clk)
2398{
2399 struct clk *parent;
2400
2401 if (!clk)
2402 return NULL;
2403
2404 clk_prepare_lock();
2405
2406 parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk;
2407 clk_prepare_unlock();
2408
2409 return parent;
2410}
2411EXPORT_SYMBOL_GPL(clk_get_parent);
2412
2413static struct clk_core *__clk_init_parent(struct clk_core *core)
2414{
2415 u8 index = 0;
2416
2417 if (core->num_parents > 1 && core->ops->get_parent)
2418 index = core->ops->get_parent(core->hw);
2419
2420 return clk_core_get_parent_by_index(core, index);
2421}
2422
2423static void clk_core_reparent(struct clk_core *core,
2424 struct clk_core *new_parent)
2425{
2426 clk_reparent(core, new_parent);
2427 __clk_recalc_accuracies(core);
2428 __clk_recalc_rates(core, POST_RATE_CHANGE);
2429}
2430
2431void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent)
2432{
2433 if (!hw)
2434 return;
2435
2436 clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core);
2437}
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449bool clk_has_parent(struct clk *clk, struct clk *parent)
2450{
2451 struct clk_core *core, *parent_core;
2452 int i;
2453
2454
2455 if (!clk || !parent)
2456 return true;
2457
2458 core = clk->core;
2459 parent_core = parent->core;
2460
2461
2462 if (core->parent == parent_core)
2463 return true;
2464
2465 for (i = 0; i < core->num_parents; i++)
2466 if (!strcmp(core->parents[i].name, parent_core->name))
2467 return true;
2468
2469 return false;
2470}
2471EXPORT_SYMBOL_GPL(clk_has_parent);
2472
2473static int clk_core_set_parent_nolock(struct clk_core *core,
2474 struct clk_core *parent)
2475{
2476 int ret = 0;
2477 int p_index = 0;
2478 unsigned long p_rate = 0;
2479
2480 lockdep_assert_held(&prepare_lock);
2481
2482 if (!core)
2483 return 0;
2484
2485 if (core->parent == parent)
2486 return 0;
2487
2488
2489 if (core->num_parents > 1 && !core->ops->set_parent)
2490 return -EPERM;
2491
2492
2493 if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count)
2494 return -EBUSY;
2495
2496 if (clk_core_rate_is_protected(core))
2497 return -EBUSY;
2498
2499
2500 if (parent) {
2501 p_index = clk_fetch_parent_index(core, parent);
2502 if (p_index < 0) {
2503 pr_debug("%s: clk %s can not be parent of clk %s\n",
2504 __func__, parent->name, core->name);
2505 return p_index;
2506 }
2507 p_rate = parent->rate;
2508 }
2509
2510 ret = clk_pm_runtime_get(core);
2511 if (ret)
2512 return ret;
2513
2514
2515 ret = __clk_speculate_rates(core, p_rate);
2516
2517
2518 if (ret & NOTIFY_STOP_MASK)
2519 goto runtime_put;
2520
2521
2522 ret = __clk_set_parent(core, parent, p_index);
2523
2524
2525 if (ret) {
2526 __clk_recalc_rates(core, ABORT_RATE_CHANGE);
2527 } else {
2528 __clk_recalc_rates(core, POST_RATE_CHANGE);
2529 __clk_recalc_accuracies(core);
2530 }
2531
2532runtime_put:
2533 clk_pm_runtime_put(core);
2534
2535 return ret;
2536}
2537
2538int clk_hw_set_parent(struct clk_hw *hw, struct clk_hw *parent)
2539{
2540 return clk_core_set_parent_nolock(hw->core, parent->core);
2541}
2542EXPORT_SYMBOL_GPL(clk_hw_set_parent);
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561int clk_set_parent(struct clk *clk, struct clk *parent)
2562{
2563 int ret;
2564
2565 if (!clk)
2566 return 0;
2567
2568 clk_prepare_lock();
2569
2570 if (clk->exclusive_count)
2571 clk_core_rate_unprotect(clk->core);
2572
2573 ret = clk_core_set_parent_nolock(clk->core,
2574 parent ? parent->core : NULL);
2575
2576 if (clk->exclusive_count)
2577 clk_core_rate_protect(clk->core);
2578
2579 clk_prepare_unlock();
2580
2581 return ret;
2582}
2583EXPORT_SYMBOL_GPL(clk_set_parent);
2584
2585static int clk_core_set_phase_nolock(struct clk_core *core, int degrees)
2586{
2587 int ret = -EINVAL;
2588
2589 lockdep_assert_held(&prepare_lock);
2590
2591 if (!core)
2592 return 0;
2593
2594 if (clk_core_rate_is_protected(core))
2595 return -EBUSY;
2596
2597 trace_clk_set_phase(core, degrees);
2598
2599 if (core->ops->set_phase) {
2600 ret = core->ops->set_phase(core->hw, degrees);
2601 if (!ret)
2602 core->phase = degrees;
2603 }
2604
2605 trace_clk_set_phase_complete(core, degrees);
2606
2607 return ret;
2608}
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630int clk_set_phase(struct clk *clk, int degrees)
2631{
2632 int ret;
2633
2634 if (!clk)
2635 return 0;
2636
2637
2638 degrees %= 360;
2639 if (degrees < 0)
2640 degrees += 360;
2641
2642 clk_prepare_lock();
2643
2644 if (clk->exclusive_count)
2645 clk_core_rate_unprotect(clk->core);
2646
2647 ret = clk_core_set_phase_nolock(clk->core, degrees);
2648
2649 if (clk->exclusive_count)
2650 clk_core_rate_protect(clk->core);
2651
2652 clk_prepare_unlock();
2653
2654 return ret;
2655}
2656EXPORT_SYMBOL_GPL(clk_set_phase);
2657
2658static int clk_core_get_phase(struct clk_core *core)
2659{
2660 int ret;
2661
2662 lockdep_assert_held(&prepare_lock);
2663 if (!core->ops->get_phase)
2664 return 0;
2665
2666
2667 ret = core->ops->get_phase(core->hw);
2668 if (ret >= 0)
2669 core->phase = ret;
2670
2671 return ret;
2672}
2673
2674
2675
2676
2677
2678
2679
2680
2681int clk_get_phase(struct clk *clk)
2682{
2683 int ret;
2684
2685 if (!clk)
2686 return 0;
2687
2688 clk_prepare_lock();
2689 ret = clk_core_get_phase(clk->core);
2690 clk_prepare_unlock();
2691
2692 return ret;
2693}
2694EXPORT_SYMBOL_GPL(clk_get_phase);
2695
2696static void clk_core_reset_duty_cycle_nolock(struct clk_core *core)
2697{
2698
2699 core->duty.num = 1;
2700 core->duty.den = 2;
2701}
2702
2703static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core);
2704
2705static int clk_core_update_duty_cycle_nolock(struct clk_core *core)
2706{
2707 struct clk_duty *duty = &core->duty;
2708 int ret = 0;
2709
2710 if (!core->ops->get_duty_cycle)
2711 return clk_core_update_duty_cycle_parent_nolock(core);
2712
2713 ret = core->ops->get_duty_cycle(core->hw, duty);
2714 if (ret)
2715 goto reset;
2716
2717
2718 if (duty->den == 0 || duty->num > duty->den) {
2719 ret = -EINVAL;
2720 goto reset;
2721 }
2722
2723 return 0;
2724
2725reset:
2726 clk_core_reset_duty_cycle_nolock(core);
2727 return ret;
2728}
2729
2730static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core)
2731{
2732 int ret = 0;
2733
2734 if (core->parent &&
2735 core->flags & CLK_DUTY_CYCLE_PARENT) {
2736 ret = clk_core_update_duty_cycle_nolock(core->parent);
2737 memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
2738 } else {
2739 clk_core_reset_duty_cycle_nolock(core);
2740 }
2741
2742 return ret;
2743}
2744
2745static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
2746 struct clk_duty *duty);
2747
2748static int clk_core_set_duty_cycle_nolock(struct clk_core *core,
2749 struct clk_duty *duty)
2750{
2751 int ret;
2752
2753 lockdep_assert_held(&prepare_lock);
2754
2755 if (clk_core_rate_is_protected(core))
2756 return -EBUSY;
2757
2758 trace_clk_set_duty_cycle(core, duty);
2759
2760 if (!core->ops->set_duty_cycle)
2761 return clk_core_set_duty_cycle_parent_nolock(core, duty);
2762
2763 ret = core->ops->set_duty_cycle(core->hw, duty);
2764 if (!ret)
2765 memcpy(&core->duty, duty, sizeof(*duty));
2766
2767 trace_clk_set_duty_cycle_complete(core, duty);
2768
2769 return ret;
2770}
2771
2772static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
2773 struct clk_duty *duty)
2774{
2775 int ret = 0;
2776
2777 if (core->parent &&
2778 core->flags & (CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)) {
2779 ret = clk_core_set_duty_cycle_nolock(core->parent, duty);
2780 memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
2781 }
2782
2783 return ret;
2784}
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den)
2798{
2799 int ret;
2800 struct clk_duty duty;
2801
2802 if (!clk)
2803 return 0;
2804
2805
2806 if (den == 0 || num > den)
2807 return -EINVAL;
2808
2809 duty.num = num;
2810 duty.den = den;
2811
2812 clk_prepare_lock();
2813
2814 if (clk->exclusive_count)
2815 clk_core_rate_unprotect(clk->core);
2816
2817 ret = clk_core_set_duty_cycle_nolock(clk->core, &duty);
2818
2819 if (clk->exclusive_count)
2820 clk_core_rate_protect(clk->core);
2821
2822 clk_prepare_unlock();
2823
2824 return ret;
2825}
2826EXPORT_SYMBOL_GPL(clk_set_duty_cycle);
2827
2828static int clk_core_get_scaled_duty_cycle(struct clk_core *core,
2829 unsigned int scale)
2830{
2831 struct clk_duty *duty = &core->duty;
2832 int ret;
2833
2834 clk_prepare_lock();
2835
2836 ret = clk_core_update_duty_cycle_nolock(core);
2837 if (!ret)
2838 ret = mult_frac(scale, duty->num, duty->den);
2839
2840 clk_prepare_unlock();
2841
2842 return ret;
2843}
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale)
2854{
2855 if (!clk)
2856 return 0;
2857
2858 return clk_core_get_scaled_duty_cycle(clk->core, scale);
2859}
2860EXPORT_SYMBOL_GPL(clk_get_scaled_duty_cycle);
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873bool clk_is_match(const struct clk *p, const struct clk *q)
2874{
2875
2876 if (p == q)
2877 return true;
2878
2879
2880 if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q))
2881 if (p->core == q->core)
2882 return true;
2883
2884 return false;
2885}
2886EXPORT_SYMBOL_GPL(clk_is_match);
2887
2888
2889
2890#ifdef CONFIG_DEBUG_FS
2891#include <linux/debugfs.h>
2892
2893static struct dentry *rootdir;
2894static int inited = 0;
2895static DEFINE_MUTEX(clk_debug_lock);
2896static HLIST_HEAD(clk_debug_list);
2897
2898static struct hlist_head *orphan_list[] = {
2899 &clk_orphan_list,
2900 NULL,
2901};
2902
2903static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
2904 int level)
2905{
2906 int phase;
2907
2908 seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu ",
2909 level * 3 + 1, "",
2910 30 - level * 3, c->name,
2911 c->enable_count, c->prepare_count, c->protect_count,
2912 clk_core_get_rate_recalc(c),
2913 clk_core_get_accuracy_recalc(c));
2914
2915 phase = clk_core_get_phase(c);
2916 if (phase >= 0)
2917 seq_printf(s, "%5d", phase);
2918 else
2919 seq_puts(s, "-----");
2920
2921 seq_printf(s, " %6d\n", clk_core_get_scaled_duty_cycle(c, 100000));
2922}
2923
2924static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
2925 int level)
2926{
2927 struct clk_core *child;
2928
2929 clk_summary_show_one(s, c, level);
2930
2931 hlist_for_each_entry(child, &c->children, child_node)
2932 clk_summary_show_subtree(s, child, level + 1);
2933}
2934
2935static int clk_summary_show(struct seq_file *s, void *data)
2936{
2937 struct clk_core *c;
2938 struct hlist_head **lists = (struct hlist_head **)s->private;
2939
2940 seq_puts(s, " enable prepare protect duty\n");
2941 seq_puts(s, " clock count count count rate accuracy phase cycle\n");
2942 seq_puts(s, "---------------------------------------------------------------------------------------------\n");
2943
2944 clk_prepare_lock();
2945
2946 for (; *lists; lists++)
2947 hlist_for_each_entry(c, *lists, child_node)
2948 clk_summary_show_subtree(s, c, 0);
2949
2950 clk_prepare_unlock();
2951
2952 return 0;
2953}
2954DEFINE_SHOW_ATTRIBUTE(clk_summary);
2955
2956static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
2957{
2958 int phase;
2959 unsigned long min_rate, max_rate;
2960
2961 clk_core_get_boundaries(c, &min_rate, &max_rate);
2962
2963
2964 seq_printf(s, "\"%s\": { ", c->name);
2965 seq_printf(s, "\"enable_count\": %d,", c->enable_count);
2966 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
2967 seq_printf(s, "\"protect_count\": %d,", c->protect_count);
2968 seq_printf(s, "\"rate\": %lu,", clk_core_get_rate_recalc(c));
2969 seq_printf(s, "\"min_rate\": %lu,", min_rate);
2970 seq_printf(s, "\"max_rate\": %lu,", max_rate);
2971 seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy_recalc(c));
2972 phase = clk_core_get_phase(c);
2973 if (phase >= 0)
2974 seq_printf(s, "\"phase\": %d,", phase);
2975 seq_printf(s, "\"duty_cycle\": %u",
2976 clk_core_get_scaled_duty_cycle(c, 100000));
2977}
2978
2979static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
2980{
2981 struct clk_core *child;
2982
2983 clk_dump_one(s, c, level);
2984
2985 hlist_for_each_entry(child, &c->children, child_node) {
2986 seq_putc(s, ',');
2987 clk_dump_subtree(s, child, level + 1);
2988 }
2989
2990 seq_putc(s, '}');
2991}
2992
2993static int clk_dump_show(struct seq_file *s, void *data)
2994{
2995 struct clk_core *c;
2996 bool first_node = true;
2997 struct hlist_head **lists = (struct hlist_head **)s->private;
2998
2999 seq_putc(s, '{');
3000 clk_prepare_lock();
3001
3002 for (; *lists; lists++) {
3003 hlist_for_each_entry(c, *lists, child_node) {
3004 if (!first_node)
3005 seq_putc(s, ',');
3006 first_node = false;
3007 clk_dump_subtree(s, c, 0);
3008 }
3009 }
3010
3011 clk_prepare_unlock();
3012
3013 seq_puts(s, "}\n");
3014 return 0;
3015}
3016DEFINE_SHOW_ATTRIBUTE(clk_dump);
3017
3018#undef CLOCK_ALLOW_WRITE_DEBUGFS
3019#ifdef CLOCK_ALLOW_WRITE_DEBUGFS
3020
3021
3022
3023
3024
3025static int clk_rate_set(void *data, u64 val)
3026{
3027 struct clk_core *core = data;
3028 int ret;
3029
3030 clk_prepare_lock();
3031 ret = clk_core_set_rate_nolock(core, val);
3032 clk_prepare_unlock();
3033
3034 return ret;
3035}
3036
3037#define clk_rate_mode 0644
3038#else
3039#define clk_rate_set NULL
3040#define clk_rate_mode 0444
3041#endif
3042
3043static int clk_rate_get(void *data, u64 *val)
3044{
3045 struct clk_core *core = data;
3046
3047 *val = core->rate;
3048 return 0;
3049}
3050
3051DEFINE_DEBUGFS_ATTRIBUTE(clk_rate_fops, clk_rate_get, clk_rate_set, "%llu\n");
3052
3053static const struct {
3054 unsigned long flag;
3055 const char *name;
3056} clk_flags[] = {
3057#define ENTRY(f) { f, #f }
3058 ENTRY(CLK_SET_RATE_GATE),
3059 ENTRY(CLK_SET_PARENT_GATE),
3060 ENTRY(CLK_SET_RATE_PARENT),
3061 ENTRY(CLK_IGNORE_UNUSED),
3062 ENTRY(CLK_GET_RATE_NOCACHE),
3063 ENTRY(CLK_SET_RATE_NO_REPARENT),
3064 ENTRY(CLK_GET_ACCURACY_NOCACHE),
3065 ENTRY(CLK_RECALC_NEW_RATES),
3066 ENTRY(CLK_SET_RATE_UNGATE),
3067 ENTRY(CLK_IS_CRITICAL),
3068 ENTRY(CLK_OPS_PARENT_ENABLE),
3069 ENTRY(CLK_DUTY_CYCLE_PARENT),
3070#undef ENTRY
3071};
3072
3073static int clk_flags_show(struct seq_file *s, void *data)
3074{
3075 struct clk_core *core = s->private;
3076 unsigned long flags = core->flags;
3077 unsigned int i;
3078
3079 for (i = 0; flags && i < ARRAY_SIZE(clk_flags); i++) {
3080 if (flags & clk_flags[i].flag) {
3081 seq_printf(s, "%s\n", clk_flags[i].name);
3082 flags &= ~clk_flags[i].flag;
3083 }
3084 }
3085 if (flags) {
3086
3087 seq_printf(s, "0x%lx\n", flags);
3088 }
3089
3090 return 0;
3091}
3092DEFINE_SHOW_ATTRIBUTE(clk_flags);
3093
3094static void possible_parent_show(struct seq_file *s, struct clk_core *core,
3095 unsigned int i, char terminator)
3096{
3097 struct clk_core *parent;
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111 parent = clk_core_get_parent_by_index(core, i);
3112 if (parent)
3113 seq_puts(s, parent->name);
3114 else if (core->parents[i].name)
3115 seq_puts(s, core->parents[i].name);
3116 else if (core->parents[i].fw_name)
3117 seq_printf(s, "<%s>(fw)", core->parents[i].fw_name);
3118 else if (core->parents[i].index >= 0)
3119 seq_puts(s,
3120 of_clk_get_parent_name(core->of_node,
3121 core->parents[i].index));
3122 else
3123 seq_puts(s, "(missing)");
3124
3125 seq_putc(s, terminator);
3126}
3127
3128static int possible_parents_show(struct seq_file *s, void *data)
3129{
3130 struct clk_core *core = s->private;
3131 int i;
3132
3133 for (i = 0; i < core->num_parents - 1; i++)
3134 possible_parent_show(s, core, i, ' ');
3135
3136 possible_parent_show(s, core, i, '\n');
3137
3138 return 0;
3139}
3140DEFINE_SHOW_ATTRIBUTE(possible_parents);
3141
3142static int current_parent_show(struct seq_file *s, void *data)
3143{
3144 struct clk_core *core = s->private;
3145
3146 if (core->parent)
3147 seq_printf(s, "%s\n", core->parent->name);
3148
3149 return 0;
3150}
3151DEFINE_SHOW_ATTRIBUTE(current_parent);
3152
3153static int clk_duty_cycle_show(struct seq_file *s, void *data)
3154{
3155 struct clk_core *core = s->private;
3156 struct clk_duty *duty = &core->duty;
3157
3158 seq_printf(s, "%u/%u\n", duty->num, duty->den);
3159
3160 return 0;
3161}
3162DEFINE_SHOW_ATTRIBUTE(clk_duty_cycle);
3163
3164static int clk_min_rate_show(struct seq_file *s, void *data)
3165{
3166 struct clk_core *core = s->private;
3167 unsigned long min_rate, max_rate;
3168
3169 clk_prepare_lock();
3170 clk_core_get_boundaries(core, &min_rate, &max_rate);
3171 clk_prepare_unlock();
3172 seq_printf(s, "%lu\n", min_rate);
3173
3174 return 0;
3175}
3176DEFINE_SHOW_ATTRIBUTE(clk_min_rate);
3177
3178static int clk_max_rate_show(struct seq_file *s, void *data)
3179{
3180 struct clk_core *core = s->private;
3181 unsigned long min_rate, max_rate;
3182
3183 clk_prepare_lock();
3184 clk_core_get_boundaries(core, &min_rate, &max_rate);
3185 clk_prepare_unlock();
3186 seq_printf(s, "%lu\n", max_rate);
3187
3188 return 0;
3189}
3190DEFINE_SHOW_ATTRIBUTE(clk_max_rate);
3191
3192static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
3193{
3194 struct dentry *root;
3195
3196 if (!core || !pdentry)
3197 return;
3198
3199 root = debugfs_create_dir(core->name, pdentry);
3200 core->dentry = root;
3201
3202 debugfs_create_file("clk_rate", clk_rate_mode, root, core,
3203 &clk_rate_fops);
3204 debugfs_create_file("clk_min_rate", 0444, root, core, &clk_min_rate_fops);
3205 debugfs_create_file("clk_max_rate", 0444, root, core, &clk_max_rate_fops);
3206 debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy);
3207 debugfs_create_u32("clk_phase", 0444, root, &core->phase);
3208 debugfs_create_file("clk_flags", 0444, root, core, &clk_flags_fops);
3209 debugfs_create_u32("clk_prepare_count", 0444, root, &core->prepare_count);
3210 debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count);
3211 debugfs_create_u32("clk_protect_count", 0444, root, &core->protect_count);
3212 debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count);
3213 debugfs_create_file("clk_duty_cycle", 0444, root, core,
3214 &clk_duty_cycle_fops);
3215
3216 if (core->num_parents > 0)
3217 debugfs_create_file("clk_parent", 0444, root, core,
3218 ¤t_parent_fops);
3219
3220 if (core->num_parents > 1)
3221 debugfs_create_file("clk_possible_parents", 0444, root, core,
3222 &possible_parents_fops);
3223
3224 if (core->ops->debug_init)
3225 core->ops->debug_init(core->hw, core->dentry);
3226}
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236static void clk_debug_register(struct clk_core *core)
3237{
3238 mutex_lock(&clk_debug_lock);
3239 hlist_add_head(&core->debug_node, &clk_debug_list);
3240 if (inited)
3241 clk_debug_create_one(core, rootdir);
3242 mutex_unlock(&clk_debug_lock);
3243}
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253static void clk_debug_unregister(struct clk_core *core)
3254{
3255 mutex_lock(&clk_debug_lock);
3256 hlist_del_init(&core->debug_node);
3257 debugfs_remove_recursive(core->dentry);
3258 core->dentry = NULL;
3259 mutex_unlock(&clk_debug_lock);
3260}
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271static int __init clk_debug_init(void)
3272{
3273 struct clk_core *core;
3274
3275 rootdir = debugfs_create_dir("clk", NULL);
3276
3277 debugfs_create_file("clk_summary", 0444, rootdir, &all_lists,
3278 &clk_summary_fops);
3279 debugfs_create_file("clk_dump", 0444, rootdir, &all_lists,
3280 &clk_dump_fops);
3281 debugfs_create_file("clk_orphan_summary", 0444, rootdir, &orphan_list,
3282 &clk_summary_fops);
3283 debugfs_create_file("clk_orphan_dump", 0444, rootdir, &orphan_list,
3284 &clk_dump_fops);
3285
3286 mutex_lock(&clk_debug_lock);
3287 hlist_for_each_entry(core, &clk_debug_list, debug_node)
3288 clk_debug_create_one(core, rootdir);
3289
3290 inited = 1;
3291 mutex_unlock(&clk_debug_lock);
3292
3293 return 0;
3294}
3295late_initcall(clk_debug_init);
3296#else
3297static inline void clk_debug_register(struct clk_core *core) { }
3298static inline void clk_debug_reparent(struct clk_core *core,
3299 struct clk_core *new_parent)
3300{
3301}
3302static inline void clk_debug_unregister(struct clk_core *core)
3303{
3304}
3305#endif
3306
3307static void clk_core_reparent_orphans_nolock(void)
3308{
3309 struct clk_core *orphan;
3310 struct hlist_node *tmp2;
3311
3312
3313
3314
3315
3316 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
3317 struct clk_core *parent = __clk_init_parent(orphan);
3318
3319
3320
3321
3322
3323
3324
3325 if (parent) {
3326
3327 __clk_set_parent_before(orphan, parent);
3328 __clk_set_parent_after(orphan, parent, NULL);
3329 __clk_recalc_accuracies(orphan);
3330 __clk_recalc_rates(orphan, 0);
3331 }
3332 }
3333}
3334
3335
3336
3337
3338
3339
3340
3341
3342static int __clk_core_init(struct clk_core *core)
3343{
3344 int ret;
3345 struct clk_core *parent;
3346 unsigned long rate;
3347 int phase;
3348
3349 if (!core)
3350 return -EINVAL;
3351
3352 clk_prepare_lock();
3353
3354 ret = clk_pm_runtime_get(core);
3355 if (ret)
3356 goto unlock;
3357
3358
3359 if (clk_core_lookup(core->name)) {
3360 pr_debug("%s: clk %s already initialized\n",
3361 __func__, core->name);
3362 ret = -EEXIST;
3363 goto out;
3364 }
3365
3366
3367 if (core->ops->set_rate &&
3368 !((core->ops->round_rate || core->ops->determine_rate) &&
3369 core->ops->recalc_rate)) {
3370 pr_err("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
3371 __func__, core->name);
3372 ret = -EINVAL;
3373 goto out;
3374 }
3375
3376 if (core->ops->set_parent && !core->ops->get_parent) {
3377 pr_err("%s: %s must implement .get_parent & .set_parent\n",
3378 __func__, core->name);
3379 ret = -EINVAL;
3380 goto out;
3381 }
3382
3383 if (core->num_parents > 1 && !core->ops->get_parent) {
3384 pr_err("%s: %s must implement .get_parent as it has multi parents\n",
3385 __func__, core->name);
3386 ret = -EINVAL;
3387 goto out;
3388 }
3389
3390 if (core->ops->set_rate_and_parent &&
3391 !(core->ops->set_parent && core->ops->set_rate)) {
3392 pr_err("%s: %s must implement .set_parent & .set_rate\n",
3393 __func__, core->name);
3394 ret = -EINVAL;
3395 goto out;
3396 }
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412 if (core->ops->init) {
3413 ret = core->ops->init(core->hw);
3414 if (ret)
3415 goto out;
3416 }
3417
3418 parent = core->parent = __clk_init_parent(core);
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430 if (parent) {
3431 hlist_add_head(&core->child_node, &parent->children);
3432 core->orphan = parent->orphan;
3433 } else if (!core->num_parents) {
3434 hlist_add_head(&core->child_node, &clk_root_list);
3435 core->orphan = false;
3436 } else {
3437 hlist_add_head(&core->child_node, &clk_orphan_list);
3438 core->orphan = true;
3439 }
3440
3441
3442
3443
3444
3445
3446
3447
3448 if (core->ops->recalc_accuracy)
3449 core->accuracy = core->ops->recalc_accuracy(core->hw,
3450 clk_core_get_accuracy_no_lock(parent));
3451 else if (parent)
3452 core->accuracy = parent->accuracy;
3453 else
3454 core->accuracy = 0;
3455
3456
3457
3458
3459
3460
3461 phase = clk_core_get_phase(core);
3462 if (phase < 0) {
3463 ret = phase;
3464 pr_warn("%s: Failed to get phase for clk '%s'\n", __func__,
3465 core->name);
3466 goto out;
3467 }
3468
3469
3470
3471
3472 clk_core_update_duty_cycle_nolock(core);
3473
3474
3475
3476
3477
3478
3479
3480 if (core->ops->recalc_rate)
3481 rate = core->ops->recalc_rate(core->hw,
3482 clk_core_get_rate_nolock(parent));
3483 else if (parent)
3484 rate = parent->rate;
3485 else
3486 rate = 0;
3487 core->rate = core->req_rate = rate;
3488
3489
3490
3491
3492
3493
3494 if (core->flags & CLK_IS_CRITICAL) {
3495 unsigned long flags;
3496
3497 ret = clk_core_prepare(core);
3498 if (ret) {
3499 pr_warn("%s: critical clk '%s' failed to prepare\n",
3500 __func__, core->name);
3501 goto out;
3502 }
3503
3504 flags = clk_enable_lock();
3505 ret = clk_core_enable(core);
3506 clk_enable_unlock(flags);
3507 if (ret) {
3508 pr_warn("%s: critical clk '%s' failed to enable\n",
3509 __func__, core->name);
3510 clk_core_unprepare(core);
3511 goto out;
3512 }
3513 }
3514
3515 clk_core_reparent_orphans_nolock();
3516
3517
3518 kref_init(&core->ref);
3519out:
3520 clk_pm_runtime_put(core);
3521unlock:
3522 if (ret)
3523 hlist_del_init(&core->child_node);
3524
3525 clk_prepare_unlock();
3526
3527 if (!ret)
3528 clk_debug_register(core);
3529
3530 return ret;
3531}
3532
3533
3534
3535
3536
3537
3538static void clk_core_link_consumer(struct clk_core *core, struct clk *clk)
3539{
3540 clk_prepare_lock();
3541 hlist_add_head(&clk->clks_node, &core->clks);
3542 clk_prepare_unlock();
3543}
3544
3545
3546
3547
3548
3549static void clk_core_unlink_consumer(struct clk *clk)
3550{
3551 lockdep_assert_held(&prepare_lock);
3552 hlist_del(&clk->clks_node);
3553}
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563static struct clk *alloc_clk(struct clk_core *core, const char *dev_id,
3564 const char *con_id)
3565{
3566 struct clk *clk;
3567
3568 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
3569 if (!clk)
3570 return ERR_PTR(-ENOMEM);
3571
3572 clk->core = core;
3573 clk->dev_id = dev_id;
3574 clk->con_id = kstrdup_const(con_id, GFP_KERNEL);
3575 clk->max_rate = ULONG_MAX;
3576
3577 return clk;
3578}
3579
3580
3581
3582
3583
3584
3585
3586
3587static void free_clk(struct clk *clk)
3588{
3589 kfree_const(clk->con_id);
3590 kfree(clk);
3591}
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw,
3606 const char *dev_id, const char *con_id)
3607{
3608 struct clk *clk;
3609 struct clk_core *core;
3610
3611
3612 if (IS_ERR_OR_NULL(hw))
3613 return ERR_CAST(hw);
3614
3615 core = hw->core;
3616 clk = alloc_clk(core, dev_id, con_id);
3617 if (IS_ERR(clk))
3618 return clk;
3619 clk->dev = dev;
3620
3621 if (!try_module_get(core->owner)) {
3622 free_clk(clk);
3623 return ERR_PTR(-ENOENT);
3624 }
3625
3626 kref_get(&core->ref);
3627 clk_core_link_consumer(core, clk);
3628
3629 return clk;
3630}
3631
3632static int clk_cpy_name(const char **dst_p, const char *src, bool must_exist)
3633{
3634 const char *dst;
3635
3636 if (!src) {
3637 if (must_exist)
3638 return -EINVAL;
3639 return 0;
3640 }
3641
3642 *dst_p = dst = kstrdup_const(src, GFP_KERNEL);
3643 if (!dst)
3644 return -ENOMEM;
3645
3646 return 0;
3647}
3648
3649static int clk_core_populate_parent_map(struct clk_core *core,
3650 const struct clk_init_data *init)
3651{
3652 u8 num_parents = init->num_parents;
3653 const char * const *parent_names = init->parent_names;
3654 const struct clk_hw **parent_hws = init->parent_hws;
3655 const struct clk_parent_data *parent_data = init->parent_data;
3656 int i, ret = 0;
3657 struct clk_parent_map *parents, *parent;
3658
3659 if (!num_parents)
3660 return 0;
3661
3662
3663
3664
3665
3666 parents = kcalloc(num_parents, sizeof(*parents), GFP_KERNEL);
3667 core->parents = parents;
3668 if (!parents)
3669 return -ENOMEM;
3670
3671
3672 for (i = 0, parent = parents; i < num_parents; i++, parent++) {
3673 parent->index = -1;
3674 if (parent_names) {
3675
3676 WARN(!parent_names[i],
3677 "%s: invalid NULL in %s's .parent_names\n",
3678 __func__, core->name);
3679 ret = clk_cpy_name(&parent->name, parent_names[i],
3680 true);
3681 } else if (parent_data) {
3682 parent->hw = parent_data[i].hw;
3683 parent->index = parent_data[i].index;
3684 ret = clk_cpy_name(&parent->fw_name,
3685 parent_data[i].fw_name, false);
3686 if (!ret)
3687 ret = clk_cpy_name(&parent->name,
3688 parent_data[i].name,
3689 false);
3690 } else if (parent_hws) {
3691 parent->hw = parent_hws[i];
3692 } else {
3693 ret = -EINVAL;
3694 WARN(1, "Must specify parents if num_parents > 0\n");
3695 }
3696
3697 if (ret) {
3698 do {
3699 kfree_const(parents[i].name);
3700 kfree_const(parents[i].fw_name);
3701 } while (--i >= 0);
3702 kfree(parents);
3703
3704 return ret;
3705 }
3706 }
3707
3708 return 0;
3709}
3710
3711static void clk_core_free_parent_map(struct clk_core *core)
3712{
3713 int i = core->num_parents;
3714
3715 if (!core->num_parents)
3716 return;
3717
3718 while (--i >= 0) {
3719 kfree_const(core->parents[i].name);
3720 kfree_const(core->parents[i].fw_name);
3721 }
3722
3723 kfree(core->parents);
3724}
3725
3726static struct clk *
3727__clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
3728{
3729 int ret;
3730 struct clk_core *core;
3731 const struct clk_init_data *init = hw->init;
3732
3733
3734
3735
3736
3737
3738 hw->init = NULL;
3739
3740 core = kzalloc(sizeof(*core), GFP_KERNEL);
3741 if (!core) {
3742 ret = -ENOMEM;
3743 goto fail_out;
3744 }
3745
3746 core->name = kstrdup_const(init->name, GFP_KERNEL);
3747 if (!core->name) {
3748 ret = -ENOMEM;
3749 goto fail_name;
3750 }
3751
3752 if (WARN_ON(!init->ops)) {
3753 ret = -EINVAL;
3754 goto fail_ops;
3755 }
3756 core->ops = init->ops;
3757
3758 if (dev && pm_runtime_enabled(dev))
3759 core->rpm_enabled = true;
3760 core->dev = dev;
3761 core->of_node = np;
3762 if (dev && dev->driver)
3763 core->owner = dev->driver->owner;
3764 core->hw = hw;
3765 core->flags = init->flags;
3766 core->num_parents = init->num_parents;
3767 core->min_rate = 0;
3768 core->max_rate = ULONG_MAX;
3769 hw->core = core;
3770
3771 ret = clk_core_populate_parent_map(core, init);
3772 if (ret)
3773 goto fail_parents;
3774
3775 INIT_HLIST_HEAD(&core->clks);
3776
3777
3778
3779
3780
3781 hw->clk = alloc_clk(core, NULL, NULL);
3782 if (IS_ERR(hw->clk)) {
3783 ret = PTR_ERR(hw->clk);
3784 goto fail_create_clk;
3785 }
3786
3787 clk_core_link_consumer(hw->core, hw->clk);
3788
3789 ret = __clk_core_init(core);
3790 if (!ret)
3791 return hw->clk;
3792
3793 clk_prepare_lock();
3794 clk_core_unlink_consumer(hw->clk);
3795 clk_prepare_unlock();
3796
3797 free_clk(hw->clk);
3798 hw->clk = NULL;
3799
3800fail_create_clk:
3801 clk_core_free_parent_map(core);
3802fail_parents:
3803fail_ops:
3804 kfree_const(core->name);
3805fail_name:
3806 kfree(core);
3807fail_out:
3808 return ERR_PTR(ret);
3809}
3810
3811
3812
3813
3814
3815
3816
3817
3818
3819static struct device_node *dev_or_parent_of_node(struct device *dev)
3820{
3821 struct device_node *np;
3822
3823 if (!dev)
3824 return NULL;
3825
3826 np = dev_of_node(dev);
3827 if (!np)
3828 np = dev_of_node(dev->parent);
3829
3830 return np;
3831}
3832
3833
3834
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846struct clk *clk_register(struct device *dev, struct clk_hw *hw)
3847{
3848 return __clk_register(dev, dev_or_parent_of_node(dev), hw);
3849}
3850EXPORT_SYMBOL_GPL(clk_register);
3851
3852
3853
3854
3855
3856
3857
3858
3859
3860
3861
3862int clk_hw_register(struct device *dev, struct clk_hw *hw)
3863{
3864 return PTR_ERR_OR_ZERO(__clk_register(dev, dev_or_parent_of_node(dev),
3865 hw));
3866}
3867EXPORT_SYMBOL_GPL(clk_hw_register);
3868
3869
3870
3871
3872
3873
3874
3875
3876
3877
3878
3879
3880int of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
3881{
3882 return PTR_ERR_OR_ZERO(__clk_register(NULL, node, hw));
3883}
3884EXPORT_SYMBOL_GPL(of_clk_hw_register);
3885
3886
3887static void __clk_release(struct kref *ref)
3888{
3889 struct clk_core *core = container_of(ref, struct clk_core, ref);
3890
3891 lockdep_assert_held(&prepare_lock);
3892
3893 clk_core_free_parent_map(core);
3894 kfree_const(core->name);
3895 kfree(core);
3896}
3897
3898
3899
3900
3901
3902
3903static int clk_nodrv_prepare_enable(struct clk_hw *hw)
3904{
3905 return -ENXIO;
3906}
3907
3908static void clk_nodrv_disable_unprepare(struct clk_hw *hw)
3909{
3910 WARN_ON_ONCE(1);
3911}
3912
3913static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate,
3914 unsigned long parent_rate)
3915{
3916 return -ENXIO;
3917}
3918
3919static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
3920{
3921 return -ENXIO;
3922}
3923
3924static const struct clk_ops clk_nodrv_ops = {
3925 .enable = clk_nodrv_prepare_enable,
3926 .disable = clk_nodrv_disable_unprepare,
3927 .prepare = clk_nodrv_prepare_enable,
3928 .unprepare = clk_nodrv_disable_unprepare,
3929 .set_rate = clk_nodrv_set_rate,
3930 .set_parent = clk_nodrv_set_parent,
3931};
3932
3933static void clk_core_evict_parent_cache_subtree(struct clk_core *root,
3934 struct clk_core *target)
3935{
3936 int i;
3937 struct clk_core *child;
3938
3939 for (i = 0; i < root->num_parents; i++)
3940 if (root->parents[i].core == target)
3941 root->parents[i].core = NULL;
3942
3943 hlist_for_each_entry(child, &root->children, child_node)
3944 clk_core_evict_parent_cache_subtree(child, target);
3945}
3946
3947
3948static void clk_core_evict_parent_cache(struct clk_core *core)
3949{
3950 struct hlist_head **lists;
3951 struct clk_core *root;
3952
3953 lockdep_assert_held(&prepare_lock);
3954
3955 for (lists = all_lists; *lists; lists++)
3956 hlist_for_each_entry(root, *lists, child_node)
3957 clk_core_evict_parent_cache_subtree(root, core);
3958
3959}
3960
3961
3962
3963
3964
3965void clk_unregister(struct clk *clk)
3966{
3967 unsigned long flags;
3968 const struct clk_ops *ops;
3969
3970 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
3971 return;
3972
3973 clk_debug_unregister(clk->core);
3974
3975 clk_prepare_lock();
3976
3977 ops = clk->core->ops;
3978 if (ops == &clk_nodrv_ops) {
3979 pr_err("%s: unregistered clock: %s\n", __func__,
3980 clk->core->name);
3981 goto unlock;
3982 }
3983
3984
3985
3986
3987 flags = clk_enable_lock();
3988 clk->core->ops = &clk_nodrv_ops;
3989 clk_enable_unlock(flags);
3990
3991 if (ops->terminate)
3992 ops->terminate(clk->core->hw);
3993
3994 if (!hlist_empty(&clk->core->children)) {
3995 struct clk_core *child;
3996 struct hlist_node *t;
3997
3998
3999 hlist_for_each_entry_safe(child, t, &clk->core->children,
4000 child_node)
4001 clk_core_set_parent_nolock(child, NULL);
4002 }
4003
4004 clk_core_evict_parent_cache(clk->core);
4005
4006 hlist_del_init(&clk->core->child_node);
4007
4008 if (clk->core->prepare_count)
4009 pr_warn("%s: unregistering prepared clock: %s\n",
4010 __func__, clk->core->name);
4011
4012 if (clk->core->protect_count)
4013 pr_warn("%s: unregistering protected clock: %s\n",
4014 __func__, clk->core->name);
4015
4016 kref_put(&clk->core->ref, __clk_release);
4017 free_clk(clk);
4018unlock:
4019 clk_prepare_unlock();
4020}
4021EXPORT_SYMBOL_GPL(clk_unregister);
4022
4023
4024
4025
4026
4027void clk_hw_unregister(struct clk_hw *hw)
4028{
4029 clk_unregister(hw->clk);
4030}
4031EXPORT_SYMBOL_GPL(clk_hw_unregister);
4032
4033static void devm_clk_release(struct device *dev, void *res)
4034{
4035 clk_unregister(*(struct clk **)res);
4036}
4037
4038static void devm_clk_hw_release(struct device *dev, void *res)
4039{
4040 clk_hw_unregister(*(struct clk_hw **)res);
4041}
4042
4043
4044
4045
4046
4047
4048
4049
4050
4051
4052
4053struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
4054{
4055 struct clk *clk;
4056 struct clk **clkp;
4057
4058 clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
4059 if (!clkp)
4060 return ERR_PTR(-ENOMEM);
4061
4062 clk = clk_register(dev, hw);
4063 if (!IS_ERR(clk)) {
4064 *clkp = clk;
4065 devres_add(dev, clkp);
4066 } else {
4067 devres_free(clkp);
4068 }
4069
4070 return clk;
4071}
4072EXPORT_SYMBOL_GPL(devm_clk_register);
4073
4074
4075
4076
4077
4078
4079
4080
4081
4082
4083int devm_clk_hw_register(struct device *dev, struct clk_hw *hw)
4084{
4085 struct clk_hw **hwp;
4086 int ret;
4087
4088 hwp = devres_alloc(devm_clk_hw_release, sizeof(*hwp), GFP_KERNEL);
4089 if (!hwp)
4090 return -ENOMEM;
4091
4092 ret = clk_hw_register(dev, hw);
4093 if (!ret) {
4094 *hwp = hw;
4095 devres_add(dev, hwp);
4096 } else {
4097 devres_free(hwp);
4098 }
4099
4100 return ret;
4101}
4102EXPORT_SYMBOL_GPL(devm_clk_hw_register);
4103
4104static int devm_clk_match(struct device *dev, void *res, void *data)
4105{
4106 struct clk *c = res;
4107 if (WARN_ON(!c))
4108 return 0;
4109 return c == data;
4110}
4111
4112static int devm_clk_hw_match(struct device *dev, void *res, void *data)
4113{
4114 struct clk_hw *hw = res;
4115
4116 if (WARN_ON(!hw))
4117 return 0;
4118 return hw == data;
4119}
4120
4121
4122
4123
4124
4125
4126
4127
4128
4129void devm_clk_unregister(struct device *dev, struct clk *clk)
4130{
4131 WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
4132}
4133EXPORT_SYMBOL_GPL(devm_clk_unregister);
4134
4135
4136
4137
4138
4139
4140
4141
4142
4143
4144void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw)
4145{
4146 WARN_ON(devres_release(dev, devm_clk_hw_release, devm_clk_hw_match,
4147 hw));
4148}
4149EXPORT_SYMBOL_GPL(devm_clk_hw_unregister);
4150
4151
4152
4153
4154
4155void __clk_put(struct clk *clk)
4156{
4157 struct module *owner;
4158
4159 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
4160 return;
4161
4162 clk_prepare_lock();
4163
4164
4165
4166
4167
4168
4169 if (WARN_ON(clk->exclusive_count)) {
4170
4171 clk->core->protect_count -= (clk->exclusive_count - 1);
4172 clk_core_rate_unprotect(clk->core);
4173 clk->exclusive_count = 0;
4174 }
4175
4176 hlist_del(&clk->clks_node);
4177 if (clk->min_rate > clk->core->req_rate ||
4178 clk->max_rate < clk->core->req_rate)
4179 clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
4180
4181 owner = clk->core->owner;
4182 kref_put(&clk->core->ref, __clk_release);
4183
4184 clk_prepare_unlock();
4185
4186 module_put(owner);
4187
4188 free_clk(clk);
4189}
4190
4191
4192
4193
4194
4195
4196
4197
4198
4199
4200
4201
4202
4203
4204
4205
4206
4207
4208
4209
4210
4211
4212
4213int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
4214{
4215 struct clk_notifier *cn;
4216 int ret = -ENOMEM;
4217
4218 if (!clk || !nb)
4219 return -EINVAL;
4220
4221 clk_prepare_lock();
4222
4223
4224 list_for_each_entry(cn, &clk_notifier_list, node)
4225 if (cn->clk == clk)
4226 break;
4227
4228
4229 if (cn->clk != clk) {
4230 cn = kzalloc(sizeof(*cn), GFP_KERNEL);
4231 if (!cn)
4232 goto out;
4233
4234 cn->clk = clk;
4235 srcu_init_notifier_head(&cn->notifier_head);
4236
4237 list_add(&cn->node, &clk_notifier_list);
4238 }
4239
4240 ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
4241
4242 clk->core->notifier_count++;
4243
4244out:
4245 clk_prepare_unlock();
4246
4247 return ret;
4248}
4249EXPORT_SYMBOL_GPL(clk_notifier_register);
4250
4251
4252
4253
4254
4255
4256
4257
4258
4259
4260
4261
4262int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
4263{
4264 struct clk_notifier *cn = NULL;
4265 int ret = -EINVAL;
4266
4267 if (!clk || !nb)
4268 return -EINVAL;
4269
4270 clk_prepare_lock();
4271
4272 list_for_each_entry(cn, &clk_notifier_list, node)
4273 if (cn->clk == clk)
4274 break;
4275
4276 if (cn->clk == clk) {
4277 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
4278
4279 clk->core->notifier_count--;
4280
4281
4282 if (!cn->notifier_head.head) {
4283 srcu_cleanup_notifier_head(&cn->notifier_head);
4284 list_del(&cn->node);
4285 kfree(cn);
4286 }
4287
4288 } else {
4289 ret = -ENOENT;
4290 }
4291
4292 clk_prepare_unlock();
4293
4294 return ret;
4295}
4296EXPORT_SYMBOL_GPL(clk_notifier_unregister);
4297
4298#ifdef CONFIG_OF
4299static void clk_core_reparent_orphans(void)
4300{
4301 clk_prepare_lock();
4302 clk_core_reparent_orphans_nolock();
4303 clk_prepare_unlock();
4304}
4305
4306
4307
4308
4309
4310
4311
4312
4313
4314struct of_clk_provider {
4315 struct list_head link;
4316
4317 struct device_node *node;
4318 struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
4319 struct clk_hw *(*get_hw)(struct of_phandle_args *clkspec, void *data);
4320 void *data;
4321};
4322
4323extern struct of_device_id __clk_of_table;
4324static const struct of_device_id __clk_of_table_sentinel
4325 __used __section(__clk_of_table_end);
4326
4327static LIST_HEAD(of_clk_providers);
4328static DEFINE_MUTEX(of_clk_mutex);
4329
4330struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
4331 void *data)
4332{
4333 return data;
4334}
4335EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
4336
4337struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data)
4338{
4339 return data;
4340}
4341EXPORT_SYMBOL_GPL(of_clk_hw_simple_get);
4342
4343struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
4344{
4345 struct clk_onecell_data *clk_data = data;
4346 unsigned int idx = clkspec->args[0];
4347
4348 if (idx >= clk_data->clk_num) {
4349 pr_err("%s: invalid clock index %u\n", __func__, idx);
4350 return ERR_PTR(-EINVAL);
4351 }
4352
4353 return clk_data->clks[idx];
4354}
4355EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
4356
4357struct clk_hw *
4358of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data)
4359{
4360 struct clk_hw_onecell_data *hw_data = data;
4361 unsigned int idx = clkspec->args[0];
4362
4363 if (idx >= hw_data->num) {
4364 pr_err("%s: invalid index %u\n", __func__, idx);
4365 return ERR_PTR(-EINVAL);
4366 }
4367
4368 return hw_data->hws[idx];
4369}
4370EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get);
4371
4372
4373
4374
4375
4376
4377
4378
4379
4380int of_clk_add_provider(struct device_node *np,
4381 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
4382 void *data),
4383 void *data)
4384{
4385 struct of_clk_provider *cp;
4386 int ret;
4387
4388 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
4389 if (!cp)
4390 return -ENOMEM;
4391
4392 cp->node = of_node_get(np);
4393 cp->data = data;
4394 cp->get = clk_src_get;
4395
4396 mutex_lock(&of_clk_mutex);
4397 list_add(&cp->link, &of_clk_providers);
4398 mutex_unlock(&of_clk_mutex);
4399 pr_debug("Added clock from %pOF\n", np);
4400
4401 clk_core_reparent_orphans();
4402
4403 ret = of_clk_set_defaults(np, true);
4404 if (ret < 0)
4405 of_clk_del_provider(np);
4406
4407 return ret;
4408}
4409EXPORT_SYMBOL_GPL(of_clk_add_provider);
4410
4411
4412
4413
4414
4415
4416
4417int of_clk_add_hw_provider(struct device_node *np,
4418 struct clk_hw *(*get)(struct of_phandle_args *clkspec,
4419 void *data),
4420 void *data)
4421{
4422 struct of_clk_provider *cp;
4423 int ret;
4424
4425 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
4426 if (!cp)
4427 return -ENOMEM;
4428
4429 cp->node = of_node_get(np);
4430 cp->data = data;
4431 cp->get_hw = get;
4432
4433 mutex_lock(&of_clk_mutex);
4434 list_add(&cp->link, &of_clk_providers);
4435 mutex_unlock(&of_clk_mutex);
4436 pr_debug("Added clk_hw provider from %pOF\n", np);
4437
4438 clk_core_reparent_orphans();
4439
4440 ret = of_clk_set_defaults(np, true);
4441 if (ret < 0)
4442 of_clk_del_provider(np);
4443
4444 return ret;
4445}
4446EXPORT_SYMBOL_GPL(of_clk_add_hw_provider);
4447
4448static void devm_of_clk_release_provider(struct device *dev, void *res)
4449{
4450 of_clk_del_provider(*(struct device_node **)res);
4451}
4452
4453
4454
4455
4456
4457
4458static struct device_node *get_clk_provider_node(struct device *dev)
4459{
4460 struct device_node *np, *parent_np;
4461
4462 np = dev->of_node;
4463 parent_np = dev->parent ? dev->parent->of_node : NULL;
4464
4465 if (!of_find_property(np, "#clock-cells", NULL))
4466 if (of_find_property(parent_np, "#clock-cells", NULL))
4467 np = parent_np;
4468
4469 return np;
4470}
4471
4472
4473
4474
4475
4476
4477
4478
4479
4480
4481
4482
4483
4484
4485
4486int devm_of_clk_add_hw_provider(struct device *dev,
4487 struct clk_hw *(*get)(struct of_phandle_args *clkspec,
4488 void *data),
4489 void *data)
4490{
4491 struct device_node **ptr, *np;
4492 int ret;
4493
4494 ptr = devres_alloc(devm_of_clk_release_provider, sizeof(*ptr),
4495 GFP_KERNEL);
4496 if (!ptr)
4497 return -ENOMEM;
4498
4499 np = get_clk_provider_node(dev);
4500 ret = of_clk_add_hw_provider(np, get, data);
4501 if (!ret) {
4502 *ptr = np;
4503 devres_add(dev, ptr);
4504 } else {
4505 devres_free(ptr);
4506 }
4507
4508 return ret;
4509}
4510EXPORT_SYMBOL_GPL(devm_of_clk_add_hw_provider);
4511
4512
4513
4514
4515
4516void of_clk_del_provider(struct device_node *np)
4517{
4518 struct of_clk_provider *cp;
4519
4520 mutex_lock(&of_clk_mutex);
4521 list_for_each_entry(cp, &of_clk_providers, link) {
4522 if (cp->node == np) {
4523 list_del(&cp->link);
4524 of_node_put(cp->node);
4525 kfree(cp);
4526 break;
4527 }
4528 }
4529 mutex_unlock(&of_clk_mutex);
4530}
4531EXPORT_SYMBOL_GPL(of_clk_del_provider);
4532
4533static int devm_clk_provider_match(struct device *dev, void *res, void *data)
4534{
4535 struct device_node **np = res;
4536
4537 if (WARN_ON(!np || !*np))
4538 return 0;
4539
4540 return *np == data;
4541}
4542
4543
4544
4545
4546
4547void devm_of_clk_del_provider(struct device *dev)
4548{
4549 int ret;
4550 struct device_node *np = get_clk_provider_node(dev);
4551
4552 ret = devres_release(dev, devm_of_clk_release_provider,
4553 devm_clk_provider_match, np);
4554
4555 WARN_ON(ret);
4556}
4557EXPORT_SYMBOL(devm_of_clk_del_provider);
4558
4559
4560
4561
4562
4563
4564
4565
4566
4567
4568
4569
4570
4571
4572
4573
4574
4575
4576
4577
4578
4579
4580
4581
4582
4583
4584
4585
4586
4587
4588
4589
4590
4591
4592
4593
4594
4595
4596
4597static int of_parse_clkspec(const struct device_node *np, int index,
4598 const char *name, struct of_phandle_args *out_args)
4599{
4600 int ret = -ENOENT;
4601
4602
4603 while (np) {
4604
4605
4606
4607
4608
4609
4610 if (name)
4611 index = of_property_match_string(np, "clock-names", name);
4612 ret = of_parse_phandle_with_args(np, "clocks", "#clock-cells",
4613 index, out_args);
4614 if (!ret)
4615 break;
4616 if (name && index >= 0)
4617 break;
4618
4619
4620
4621
4622
4623
4624 np = np->parent;
4625 if (np && !of_get_property(np, "clock-ranges", NULL))
4626 break;
4627 index = 0;
4628 }
4629
4630 return ret;
4631}
4632
4633static struct clk_hw *
4634__of_clk_get_hw_from_provider(struct of_clk_provider *provider,
4635 struct of_phandle_args *clkspec)
4636{
4637 struct clk *clk;
4638
4639 if (provider->get_hw)
4640 return provider->get_hw(clkspec, provider->data);
4641
4642 clk = provider->get(clkspec, provider->data);
4643 if (IS_ERR(clk))
4644 return ERR_CAST(clk);
4645 return __clk_get_hw(clk);
4646}
4647
4648static struct clk_hw *
4649of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
4650{
4651 struct of_clk_provider *provider;
4652 struct clk_hw *hw = ERR_PTR(-EPROBE_DEFER);
4653
4654 if (!clkspec)
4655 return ERR_PTR(-EINVAL);
4656
4657 mutex_lock(&of_clk_mutex);
4658 list_for_each_entry(provider, &of_clk_providers, link) {
4659 if (provider->node == clkspec->np) {
4660 hw = __of_clk_get_hw_from_provider(provider, clkspec);
4661 if (!IS_ERR(hw))
4662 break;
4663 }
4664 }
4665 mutex_unlock(&of_clk_mutex);
4666
4667 return hw;
4668}
4669
4670
4671
4672
4673
4674
4675
4676
4677
4678struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
4679{
4680 struct clk_hw *hw = of_clk_get_hw_from_clkspec(clkspec);
4681
4682 return clk_hw_create_clk(NULL, hw, NULL, __func__);
4683}
4684EXPORT_SYMBOL_GPL(of_clk_get_from_provider);
4685
4686struct clk_hw *of_clk_get_hw(struct device_node *np, int index,
4687 const char *con_id)
4688{
4689 int ret;
4690 struct clk_hw *hw;
4691 struct of_phandle_args clkspec;
4692
4693 ret = of_parse_clkspec(np, index, con_id, &clkspec);
4694 if (ret)
4695 return ERR_PTR(ret);
4696
4697 hw = of_clk_get_hw_from_clkspec(&clkspec);
4698 of_node_put(clkspec.np);
4699
4700 return hw;
4701}
4702
4703static struct clk *__of_clk_get(struct device_node *np,
4704 int index, const char *dev_id,
4705 const char *con_id)
4706{
4707 struct clk_hw *hw = of_clk_get_hw(np, index, con_id);
4708
4709 return clk_hw_create_clk(NULL, hw, dev_id, con_id);
4710}
4711
4712struct clk *of_clk_get(struct device_node *np, int index)
4713{
4714 return __of_clk_get(np, index, np->full_name, NULL);
4715}
4716EXPORT_SYMBOL(of_clk_get);
4717
4718
4719
4720
4721
4722
4723
4724
4725
4726
4727struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
4728{
4729 if (!np)
4730 return ERR_PTR(-ENOENT);
4731
4732 return __of_clk_get(np, 0, np->full_name, name);
4733}
4734EXPORT_SYMBOL(of_clk_get_by_name);
4735
4736
4737
4738
4739
4740
4741
4742unsigned int of_clk_get_parent_count(const struct device_node *np)
4743{
4744 int count;
4745
4746 count = of_count_phandle_with_args(np, "clocks", "#clock-cells");
4747 if (count < 0)
4748 return 0;
4749
4750 return count;
4751}
4752EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
4753
4754const char *of_clk_get_parent_name(const struct device_node *np, int index)
4755{
4756 struct of_phandle_args clkspec;
4757 struct property *prop;
4758 const char *clk_name;
4759 const __be32 *vp;
4760 u32 pv;
4761 int rc;
4762 int count;
4763 struct clk *clk;
4764
4765 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
4766 &clkspec);
4767 if (rc)
4768 return NULL;
4769
4770 index = clkspec.args_count ? clkspec.args[0] : 0;
4771 count = 0;
4772
4773
4774
4775
4776 of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) {
4777 if (index == pv) {
4778 index = count;
4779 break;
4780 }
4781 count++;
4782 }
4783
4784 if (prop && !vp)
4785 return NULL;
4786
4787 if (of_property_read_string_index(clkspec.np, "clock-output-names",
4788 index,
4789 &clk_name) < 0) {
4790
4791
4792
4793
4794
4795
4796 clk = of_clk_get_from_provider(&clkspec);
4797 if (IS_ERR(clk)) {
4798 if (clkspec.args_count == 0)
4799 clk_name = clkspec.np->name;
4800 else
4801 clk_name = NULL;
4802 } else {
4803 clk_name = __clk_get_name(clk);
4804 clk_put(clk);
4805 }
4806 }
4807
4808
4809 of_node_put(clkspec.np);
4810 return clk_name;
4811}
4812EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
4813
4814
4815
4816
4817
4818
4819
4820
4821
4822
4823int of_clk_parent_fill(struct device_node *np, const char **parents,
4824 unsigned int size)
4825{
4826 unsigned int i = 0;
4827
4828 while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL)
4829 i++;
4830
4831 return i;
4832}
4833EXPORT_SYMBOL_GPL(of_clk_parent_fill);
4834
4835struct clock_provider {
4836 void (*clk_init_cb)(struct device_node *);
4837 struct device_node *np;
4838 struct list_head node;
4839};
4840
4841
4842
4843
4844
4845
4846static int parent_ready(struct device_node *np)
4847{
4848 int i = 0;
4849
4850 while (true) {
4851 struct clk *clk = of_clk_get(np, i);
4852
4853
4854 if (!IS_ERR(clk)) {
4855 clk_put(clk);
4856 i++;
4857 continue;
4858 }
4859
4860
4861 if (PTR_ERR(clk) == -EPROBE_DEFER)
4862 return 0;
4863
4864
4865
4866
4867
4868
4869
4870
4871
4872 return 1;
4873 }
4874}
4875
4876
4877
4878
4879
4880
4881
4882
4883
4884
4885
4886
4887
4888
4889
4890
4891
4892
4893
4894int of_clk_detect_critical(struct device_node *np, int index,
4895 unsigned long *flags)
4896{
4897 struct property *prop;
4898 const __be32 *cur;
4899 uint32_t idx;
4900
4901 if (!np || !flags)
4902 return -EINVAL;
4903
4904 of_property_for_each_u32(np, "clock-critical", prop, cur, idx)
4905 if (index == idx)
4906 *flags |= CLK_IS_CRITICAL;
4907
4908 return 0;
4909}
4910
4911
4912
4913
4914
4915
4916
4917
4918
4919void __init of_clk_init(const struct of_device_id *matches)
4920{
4921 const struct of_device_id *match;
4922 struct device_node *np;
4923 struct clock_provider *clk_provider, *next;
4924 bool is_init_done;
4925 bool force = false;
4926 LIST_HEAD(clk_provider_list);
4927
4928 if (!matches)
4929 matches = &__clk_of_table;
4930
4931
4932 for_each_matching_node_and_match(np, matches, &match) {
4933 struct clock_provider *parent;
4934
4935 if (!of_device_is_available(np))
4936 continue;
4937
4938 parent = kzalloc(sizeof(*parent), GFP_KERNEL);
4939 if (!parent) {
4940 list_for_each_entry_safe(clk_provider, next,
4941 &clk_provider_list, node) {
4942 list_del(&clk_provider->node);
4943 of_node_put(clk_provider->np);
4944 kfree(clk_provider);
4945 }
4946 of_node_put(np);
4947 return;
4948 }
4949
4950 parent->clk_init_cb = match->data;
4951 parent->np = of_node_get(np);
4952 list_add_tail(&parent->node, &clk_provider_list);
4953 }
4954
4955 while (!list_empty(&clk_provider_list)) {
4956 is_init_done = false;
4957 list_for_each_entry_safe(clk_provider, next,
4958 &clk_provider_list, node) {
4959 if (force || parent_ready(clk_provider->np)) {
4960
4961
4962 of_node_set_flag(clk_provider->np,
4963 OF_POPULATED);
4964
4965 clk_provider->clk_init_cb(clk_provider->np);
4966 of_clk_set_defaults(clk_provider->np, true);
4967
4968 list_del(&clk_provider->node);
4969 of_node_put(clk_provider->np);
4970 kfree(clk_provider);
4971 is_init_done = true;
4972 }
4973 }
4974
4975
4976
4977
4978
4979
4980
4981 if (!is_init_done)
4982 force = true;
4983 }
4984}
4985#endif
4986