1
2
3
4
5
6
7
8
9#define LOG_CATEGORY UCLASS_CLK
10
11#include <common.h>
12#include <clk.h>
13#include <clk-uclass.h>
14#include <dm.h>
15#include <dt-structs.h>
16#include <errno.h>
17#include <log.h>
18#include <malloc.h>
19#include <asm/global_data.h>
20#include <dm/device_compat.h>
21#include <dm/device-internal.h>
22#include <dm/devres.h>
23#include <dm/read.h>
24#include <linux/bug.h>
25#include <linux/clk-provider.h>
26#include <linux/err.h>
27
28static inline const struct clk_ops *clk_dev_ops(struct udevice *dev)
29{
30 return (const struct clk_ops *)dev->driver->ops;
31}
32
33struct clk *dev_get_clk_ptr(struct udevice *dev)
34{
35 return (struct clk *)dev_get_uclass_priv(dev);
36}
37
38#if CONFIG_IS_ENABLED(OF_PLATDATA)
39int clk_get_by_phandle(struct udevice *dev, const struct phandle_1_arg *cells,
40 struct clk *clk)
41{
42 int ret;
43
44 ret = device_get_by_ofplat_idx(cells->idx, &clk->dev);
45 if (ret)
46 return ret;
47 clk->id = cells->arg[0];
48
49 return 0;
50}
51#endif
52
53#if CONFIG_IS_ENABLED(OF_REAL)
54static int clk_of_xlate_default(struct clk *clk,
55 struct ofnode_phandle_args *args)
56{
57 debug("%s(clk=%p)\n", __func__, clk);
58
59 if (args->args_count > 1) {
60 debug("Invalid args_count: %d\n", args->args_count);
61 return -EINVAL;
62 }
63
64 if (args->args_count)
65 clk->id = args->args[0];
66 else
67 clk->id = 0;
68
69 clk->data = 0;
70
71 return 0;
72}
73
74static int clk_get_by_index_tail(int ret, ofnode node,
75 struct ofnode_phandle_args *args,
76 const char *list_name, int index,
77 struct clk *clk)
78{
79 struct udevice *dev_clk;
80 const struct clk_ops *ops;
81
82 assert(clk);
83 clk->dev = NULL;
84 if (ret)
85 goto err;
86
87 ret = uclass_get_device_by_ofnode(UCLASS_CLK, args->node, &dev_clk);
88 if (ret) {
89 debug("%s: uclass_get_device_by_of_offset failed: err=%d\n",
90 __func__, ret);
91 return log_msg_ret("get", ret);
92 }
93
94 clk->dev = dev_clk;
95
96 ops = clk_dev_ops(dev_clk);
97
98 if (ops->of_xlate)
99 ret = ops->of_xlate(clk, args);
100 else
101 ret = clk_of_xlate_default(clk, args);
102 if (ret) {
103 debug("of_xlate() failed: %d\n", ret);
104 return log_msg_ret("xlate", ret);
105 }
106
107 return clk_request(dev_clk, clk);
108err:
109 debug("%s: Node '%s', property '%s', failed to request CLK index %d: %d\n",
110 __func__, ofnode_get_name(node), list_name, index, ret);
111
112 return log_msg_ret("prop", ret);
113}
114
115static int clk_get_by_indexed_prop(struct udevice *dev, const char *prop_name,
116 int index, struct clk *clk)
117{
118 int ret;
119 struct ofnode_phandle_args args;
120
121 debug("%s(dev=%p, index=%d, clk=%p)\n", __func__, dev, index, clk);
122
123 assert(clk);
124 clk->dev = NULL;
125
126 ret = dev_read_phandle_with_args(dev, prop_name, "#clock-cells", 0,
127 index, &args);
128 if (ret) {
129 debug("%s: fdtdec_parse_phandle_with_args failed: err=%d\n",
130 __func__, ret);
131 return log_ret(ret);
132 }
133
134
135 return clk_get_by_index_tail(ret, dev_ofnode(dev), &args, "clocks",
136 index, clk);
137}
138
139int clk_get_by_index(struct udevice *dev, int index, struct clk *clk)
140{
141 struct ofnode_phandle_args args;
142 int ret;
143
144 ret = dev_read_phandle_with_args(dev, "clocks", "#clock-cells", 0,
145 index, &args);
146
147 return clk_get_by_index_tail(ret, dev_ofnode(dev), &args, "clocks",
148 index, clk);
149}
150
151int clk_get_by_index_nodev(ofnode node, int index, struct clk *clk)
152{
153 struct ofnode_phandle_args args;
154 int ret;
155
156 ret = ofnode_parse_phandle_with_args(node, "clocks", "#clock-cells", 0,
157 index, &args);
158
159 return clk_get_by_index_tail(ret, node, &args, "clocks",
160 index, clk);
161}
162
163int clk_get_bulk(struct udevice *dev, struct clk_bulk *bulk)
164{
165 int i, ret, err, count;
166
167 bulk->count = 0;
168
169 count = dev_count_phandle_with_args(dev, "clocks", "#clock-cells", 0);
170 if (count < 1)
171 return count;
172
173 bulk->clks = devm_kcalloc(dev, count, sizeof(struct clk), GFP_KERNEL);
174 if (!bulk->clks)
175 return -ENOMEM;
176
177 for (i = 0; i < count; i++) {
178 ret = clk_get_by_index(dev, i, &bulk->clks[i]);
179 if (ret < 0)
180 goto bulk_get_err;
181
182 ++bulk->count;
183 }
184
185 return 0;
186
187bulk_get_err:
188 err = clk_release_all(bulk->clks, bulk->count);
189 if (err)
190 debug("%s: could release all clocks for %p\n",
191 __func__, dev);
192
193 return ret;
194}
195
196static struct clk *clk_set_default_get_by_id(struct clk *clk)
197{
198 struct clk *c = clk;
199
200 if (CONFIG_IS_ENABLED(CLK_CCF)) {
201 int ret = clk_get_by_id(clk->id, &c);
202
203 if (ret) {
204 debug("%s(): could not get parent clock pointer, id %lu\n",
205 __func__, clk->id);
206 ERR_PTR(ret);
207 }
208 }
209
210 return c;
211}
212
213static int clk_set_default_parents(struct udevice *dev,
214 enum clk_defaults_stage stage)
215{
216 struct clk clk, parent_clk, *c, *p;
217 int index;
218 int num_parents;
219 int ret;
220
221 num_parents = dev_count_phandle_with_args(dev, "assigned-clock-parents",
222 "#clock-cells", 0);
223 if (num_parents < 0) {
224 debug("%s: could not read assigned-clock-parents for %p\n",
225 __func__, dev);
226 return 0;
227 }
228
229 for (index = 0; index < num_parents; index++) {
230 ret = clk_get_by_indexed_prop(dev, "assigned-clock-parents",
231 index, &parent_clk);
232
233 if (ret == -ENOENT)
234 continue;
235
236 if (ret) {
237 debug("%s: could not get parent clock %d for %s\n",
238 __func__, index, dev_read_name(dev));
239 return ret;
240 }
241
242 p = clk_set_default_get_by_id(&parent_clk);
243 if (IS_ERR(p))
244 return PTR_ERR(p);
245
246 ret = clk_get_by_indexed_prop(dev, "assigned-clocks",
247 index, &clk);
248
249
250
251
252 if (ret == -EPROBE_DEFER) {
253 ret = 0;
254 continue;
255 }
256
257 if (ret) {
258 debug("%s: could not get assigned clock %d for %s\n",
259 __func__, index, dev_read_name(dev));
260 return ret;
261 }
262
263
264
265
266
267 if (stage == CLK_DEFAULTS_PRE && clk.dev == dev)
268 continue;
269
270 if (stage != CLK_DEFAULTS_PRE && clk.dev != dev)
271
272 continue;
273
274 c = clk_set_default_get_by_id(&clk);
275 if (IS_ERR(c))
276 return PTR_ERR(c);
277
278 ret = clk_set_parent(c, p);
279
280
281
282
283 if (ret == -ENOSYS)
284 continue;
285
286 if (ret < 0) {
287 debug("%s: failed to reparent clock %d for %s\n",
288 __func__, index, dev_read_name(dev));
289 return ret;
290 }
291 }
292
293 return 0;
294}
295
296static int clk_set_default_rates(struct udevice *dev,
297 enum clk_defaults_stage stage)
298{
299 struct clk clk, *c;
300 int index;
301 int num_rates;
302 int size;
303 int ret = 0;
304 u32 *rates = NULL;
305
306 size = dev_read_size(dev, "assigned-clock-rates");
307 if (size < 0)
308 return 0;
309
310 num_rates = size / sizeof(u32);
311 rates = calloc(num_rates, sizeof(u32));
312 if (!rates)
313 return -ENOMEM;
314
315 ret = dev_read_u32_array(dev, "assigned-clock-rates", rates, num_rates);
316 if (ret)
317 goto fail;
318
319 for (index = 0; index < num_rates; index++) {
320
321 if (!rates[index])
322 continue;
323
324 ret = clk_get_by_indexed_prop(dev, "assigned-clocks",
325 index, &clk);
326
327
328
329
330 if (ret == -EPROBE_DEFER) {
331 ret = 0;
332 continue;
333 }
334
335 if (ret) {
336 dev_dbg(dev,
337 "could not get assigned clock %d (err = %d)\n",
338 index, ret);
339 continue;
340 }
341
342
343
344
345
346 if (stage == CLK_DEFAULTS_PRE && clk.dev == dev)
347 continue;
348
349 if (stage != CLK_DEFAULTS_PRE && clk.dev != dev)
350
351 continue;
352
353 c = clk_set_default_get_by_id(&clk);
354 if (IS_ERR(c))
355 return PTR_ERR(c);
356
357 ret = clk_set_rate(c, rates[index]);
358
359 if (ret < 0) {
360 dev_warn(dev,
361 "failed to set rate on clock index %d (%ld) (error = %d)\n",
362 index, clk.id, ret);
363 break;
364 }
365 }
366
367fail:
368 free(rates);
369 return ret;
370}
371
372int clk_set_defaults(struct udevice *dev, enum clk_defaults_stage stage)
373{
374 int ret;
375
376 if (!dev_has_ofnode(dev))
377 return 0;
378
379
380
381
382
383
384 if (!(IS_ENABLED(CONFIG_SPL_BUILD) || (gd->flags & GD_FLG_RELOC)))
385 if (stage != CLK_DEFAULTS_POST_FORCE)
386 return 0;
387
388 debug("%s(%s)\n", __func__, dev_read_name(dev));
389
390 ret = clk_set_default_parents(dev, stage);
391 if (ret)
392 return ret;
393
394 ret = clk_set_default_rates(dev, stage);
395 if (ret < 0)
396 return ret;
397
398 return 0;
399}
400
401int clk_get_by_name(struct udevice *dev, const char *name, struct clk *clk)
402{
403 int index;
404
405 debug("%s(dev=%p, name=%s, clk=%p)\n", __func__, dev, name, clk);
406 clk->dev = NULL;
407
408 index = dev_read_stringlist_search(dev, "clock-names", name);
409 if (index < 0) {
410 debug("fdt_stringlist_search() failed: %d\n", index);
411 return index;
412 }
413
414 return clk_get_by_index(dev, index, clk);
415}
416#endif
417
418int clk_get_by_name_nodev(ofnode node, const char *name, struct clk *clk)
419{
420 int index;
421
422 debug("%s(node=%p, name=%s, clk=%p)\n", __func__,
423 ofnode_get_name(node), name, clk);
424 clk->dev = NULL;
425
426 index = ofnode_stringlist_search(node, "clock-names", name);
427 if (index < 0) {
428 debug("fdt_stringlist_search() failed: %d\n", index);
429 return index;
430 }
431
432 return clk_get_by_index_nodev(node, index, clk);
433}
434
435int clk_release_all(struct clk *clk, int count)
436{
437 int i, ret;
438
439 for (i = 0; i < count; i++) {
440 debug("%s(clk[%d]=%p)\n", __func__, i, &clk[i]);
441
442
443 if (!clk[i].dev)
444 continue;
445
446 ret = clk_disable(&clk[i]);
447 if (ret && ret != -ENOSYS)
448 return ret;
449
450 ret = clk_free(&clk[i]);
451 if (ret && ret != -ENOSYS)
452 return ret;
453 }
454
455 return 0;
456}
457
458int clk_request(struct udevice *dev, struct clk *clk)
459{
460 const struct clk_ops *ops;
461
462 debug("%s(dev=%p, clk=%p)\n", __func__, dev, clk);
463 if (!clk)
464 return 0;
465 ops = clk_dev_ops(dev);
466
467 clk->dev = dev;
468
469 if (!ops->request)
470 return 0;
471
472 return ops->request(clk);
473}
474
475int clk_free(struct clk *clk)
476{
477 const struct clk_ops *ops;
478
479 debug("%s(clk=%p)\n", __func__, clk);
480 if (!clk_valid(clk))
481 return 0;
482 ops = clk_dev_ops(clk->dev);
483
484 if (!ops->rfree)
485 return 0;
486
487 return ops->rfree(clk);
488}
489
490ulong clk_get_rate(struct clk *clk)
491{
492 const struct clk_ops *ops;
493 int ret;
494
495 debug("%s(clk=%p)\n", __func__, clk);
496 if (!clk_valid(clk))
497 return 0;
498 ops = clk_dev_ops(clk->dev);
499
500 if (!ops->get_rate)
501 return -ENOSYS;
502
503 ret = ops->get_rate(clk);
504 if (ret)
505 return log_ret(ret);
506
507 return 0;
508}
509
510struct clk *clk_get_parent(struct clk *clk)
511{
512 struct udevice *pdev;
513 struct clk *pclk;
514
515 debug("%s(clk=%p)\n", __func__, clk);
516 if (!clk_valid(clk))
517 return NULL;
518
519 pdev = dev_get_parent(clk->dev);
520 if (!pdev)
521 return ERR_PTR(-ENODEV);
522 pclk = dev_get_clk_ptr(pdev);
523 if (!pclk)
524 return ERR_PTR(-ENODEV);
525
526 return pclk;
527}
528
529long long clk_get_parent_rate(struct clk *clk)
530{
531 const struct clk_ops *ops;
532 struct clk *pclk;
533
534 debug("%s(clk=%p)\n", __func__, clk);
535 if (!clk_valid(clk))
536 return 0;
537
538 pclk = clk_get_parent(clk);
539 if (IS_ERR(pclk))
540 return -ENODEV;
541
542 ops = clk_dev_ops(pclk->dev);
543 if (!ops->get_rate)
544 return -ENOSYS;
545
546
547 if (!pclk->rate || pclk->flags & CLK_GET_RATE_NOCACHE)
548 pclk->rate = clk_get_rate(pclk);
549
550 return pclk->rate;
551}
552
553ulong clk_round_rate(struct clk *clk, ulong rate)
554{
555 const struct clk_ops *ops;
556
557 debug("%s(clk=%p, rate=%lu)\n", __func__, clk, rate);
558 if (!clk_valid(clk))
559 return 0;
560
561 ops = clk_dev_ops(clk->dev);
562 if (!ops->round_rate)
563 return -ENOSYS;
564
565 return ops->round_rate(clk, rate);
566}
567
568static void clk_clean_rate_cache(struct clk *clk)
569{
570 struct udevice *child_dev;
571 struct clk *clkp;
572
573 if (!clk)
574 return;
575
576 clk->rate = 0;
577
578 list_for_each_entry(child_dev, &clk->dev->child_head, sibling_node) {
579 clkp = dev_get_clk_ptr(child_dev);
580 clk_clean_rate_cache(clkp);
581 }
582}
583
584ulong clk_set_rate(struct clk *clk, ulong rate)
585{
586 const struct clk_ops *ops;
587
588 debug("%s(clk=%p, rate=%lu)\n", __func__, clk, rate);
589 if (!clk_valid(clk))
590 return 0;
591 ops = clk_dev_ops(clk->dev);
592
593 if (!ops->set_rate)
594 return -ENOSYS;
595
596
597 clk_clean_rate_cache(clk);
598
599 return ops->set_rate(clk, rate);
600}
601
602int clk_set_parent(struct clk *clk, struct clk *parent)
603{
604 const struct clk_ops *ops;
605 int ret;
606
607 debug("%s(clk=%p, parent=%p)\n", __func__, clk, parent);
608 if (!clk_valid(clk))
609 return 0;
610 ops = clk_dev_ops(clk->dev);
611
612 if (!ops->set_parent)
613 return -ENOSYS;
614
615 ret = ops->set_parent(clk, parent);
616 if (ret)
617 return ret;
618
619 if (CONFIG_IS_ENABLED(CLK_CCF))
620 ret = device_reparent(clk->dev, parent->dev);
621
622 return ret;
623}
624
625int clk_enable(struct clk *clk)
626{
627 const struct clk_ops *ops;
628 struct clk *clkp = NULL;
629 int ret;
630
631 debug("%s(clk=%p)\n", __func__, clk);
632 if (!clk_valid(clk))
633 return 0;
634 ops = clk_dev_ops(clk->dev);
635
636 if (CONFIG_IS_ENABLED(CLK_CCF)) {
637
638 if (clk->id && !clk_get_by_id(clk->id, &clkp)) {
639 if (clkp->enable_count) {
640 clkp->enable_count++;
641 return 0;
642 }
643 if (clkp->dev->parent &&
644 device_get_uclass_id(clkp->dev->parent) == UCLASS_CLK) {
645 ret = clk_enable(dev_get_clk_ptr(clkp->dev->parent));
646 if (ret) {
647 printf("Enable %s failed\n",
648 clkp->dev->parent->name);
649 return ret;
650 }
651 }
652 }
653
654 if (ops->enable) {
655 ret = ops->enable(clk);
656 if (ret) {
657 printf("Enable %s failed\n", clk->dev->name);
658 return ret;
659 }
660 }
661 if (clkp)
662 clkp->enable_count++;
663 } else {
664 if (!ops->enable)
665 return -ENOSYS;
666 return ops->enable(clk);
667 }
668
669 return 0;
670}
671
672int clk_enable_bulk(struct clk_bulk *bulk)
673{
674 int i, ret;
675
676 for (i = 0; i < bulk->count; i++) {
677 ret = clk_enable(&bulk->clks[i]);
678 if (ret < 0 && ret != -ENOSYS)
679 return ret;
680 }
681
682 return 0;
683}
684
685int clk_disable(struct clk *clk)
686{
687 const struct clk_ops *ops;
688 struct clk *clkp = NULL;
689 int ret;
690
691 debug("%s(clk=%p)\n", __func__, clk);
692 if (!clk_valid(clk))
693 return 0;
694 ops = clk_dev_ops(clk->dev);
695
696 if (CONFIG_IS_ENABLED(CLK_CCF)) {
697 if (clk->id && !clk_get_by_id(clk->id, &clkp)) {
698 if (clkp->flags & CLK_IS_CRITICAL)
699 return 0;
700
701 if (clkp->enable_count == 0) {
702 printf("clk %s already disabled\n",
703 clkp->dev->name);
704 return 0;
705 }
706
707 if (--clkp->enable_count > 0)
708 return 0;
709 }
710
711 if (ops->disable) {
712 ret = ops->disable(clk);
713 if (ret)
714 return ret;
715 }
716
717 if (clkp && clkp->dev->parent &&
718 device_get_uclass_id(clkp->dev->parent) == UCLASS_CLK) {
719 ret = clk_disable(dev_get_clk_ptr(clkp->dev->parent));
720 if (ret) {
721 printf("Disable %s failed\n",
722 clkp->dev->parent->name);
723 return ret;
724 }
725 }
726 } else {
727 if (!ops->disable)
728 return -ENOSYS;
729
730 return ops->disable(clk);
731 }
732
733 return 0;
734}
735
736int clk_disable_bulk(struct clk_bulk *bulk)
737{
738 int i, ret;
739
740 for (i = 0; i < bulk->count; i++) {
741 ret = clk_disable(&bulk->clks[i]);
742 if (ret < 0 && ret != -ENOSYS)
743 return ret;
744 }
745
746 return 0;
747}
748
749int clk_get_by_id(ulong id, struct clk **clkp)
750{
751 struct udevice *dev;
752 struct uclass *uc;
753 int ret;
754
755 ret = uclass_get(UCLASS_CLK, &uc);
756 if (ret)
757 return ret;
758
759 uclass_foreach_dev(dev, uc) {
760 struct clk *clk = dev_get_clk_ptr(dev);
761
762 if (clk && clk->id == id) {
763 *clkp = clk;
764 return 0;
765 }
766 }
767
768 return -ENOENT;
769}
770
771bool clk_is_match(const struct clk *p, const struct clk *q)
772{
773
774 if (p == q)
775 return true;
776
777
778 if (!p || !q)
779 return false;
780
781
782 if (p->dev == q->dev && p->id == q->id && p->data == q->data)
783 return true;
784
785 return false;
786}
787
788static void devm_clk_release(struct udevice *dev, void *res)
789{
790 clk_free(res);
791}
792
793static int devm_clk_match(struct udevice *dev, void *res, void *data)
794{
795 return res == data;
796}
797
798struct clk *devm_clk_get(struct udevice *dev, const char *id)
799{
800 int rc;
801 struct clk *clk;
802
803 clk = devres_alloc(devm_clk_release, sizeof(struct clk), __GFP_ZERO);
804 if (unlikely(!clk))
805 return ERR_PTR(-ENOMEM);
806
807 rc = clk_get_by_name(dev, id, clk);
808 if (rc)
809 return ERR_PTR(rc);
810
811 devres_add(dev, clk);
812 return clk;
813}
814
815void devm_clk_put(struct udevice *dev, struct clk *clk)
816{
817 int rc;
818
819 if (!clk)
820 return;
821
822 rc = devres_release(dev, devm_clk_release, devm_clk_match, clk);
823 WARN_ON(rc);
824}
825
826int clk_uclass_post_probe(struct udevice *dev)
827{
828
829
830
831
832
833
834 clk_set_defaults(dev, CLK_DEFAULTS_POST);
835
836 return 0;
837}
838
839UCLASS_DRIVER(clk) = {
840 .id = UCLASS_CLK,
841 .name = "clk",
842 .post_probe = clk_uclass_post_probe,
843};
844