1
2
3
4
5
6
7
8#include <linux/kernel.h>
9#include <linux/device.h>
10#include <linux/io.h>
11#include <linux/pm.h>
12#include <linux/pm_clock.h>
13#include <linux/clk.h>
14#include <linux/clkdev.h>
15#include <linux/of_clk.h>
16#include <linux/slab.h>
17#include <linux/err.h>
18#include <linux/pm_domain.h>
19#include <linux/pm_runtime.h>
20
21#ifdef CONFIG_PM_CLK
22
23enum pce_status {
24 PCE_STATUS_NONE = 0,
25 PCE_STATUS_ACQUIRED,
26 PCE_STATUS_PREPARED,
27 PCE_STATUS_ENABLED,
28 PCE_STATUS_ERROR,
29};
30
31struct pm_clock_entry {
32 struct list_head node;
33 char *con_id;
34 struct clk *clk;
35 enum pce_status status;
36 bool enabled_when_prepared;
37};
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52static void pm_clk_list_lock(struct pm_subsys_data *psd)
53 __acquires(&psd->lock)
54{
55 mutex_lock(&psd->clock_mutex);
56 spin_lock_irq(&psd->lock);
57}
58
59
60
61
62
63
64static void pm_clk_list_unlock(struct pm_subsys_data *psd)
65 __releases(&psd->lock)
66{
67 spin_unlock_irq(&psd->lock);
68 mutex_unlock(&psd->clock_mutex);
69}
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86static int pm_clk_op_lock(struct pm_subsys_data *psd, unsigned long *flags,
87 const char *fn)
88
89{
90 bool atomic_context = in_atomic() || irqs_disabled();
91
92try_again:
93 spin_lock_irqsave(&psd->lock, *flags);
94 if (!psd->clock_op_might_sleep) {
95
96 __release(&psd->lock);
97 return 0;
98 }
99
100
101 if (atomic_context) {
102 pr_err("%s: atomic context with clock_ops_might_sleep = %d",
103 fn, psd->clock_op_might_sleep);
104 spin_unlock_irqrestore(&psd->lock, *flags);
105 might_sleep();
106 return -EPERM;
107 }
108
109
110 spin_unlock_irqrestore(&psd->lock, *flags);
111 mutex_lock(&psd->clock_mutex);
112
113
114
115
116
117 if (likely(psd->clock_op_might_sleep))
118 return 0;
119
120 mutex_unlock(&psd->clock_mutex);
121 goto try_again;
122}
123
124
125
126
127
128
129
130static void pm_clk_op_unlock(struct pm_subsys_data *psd, unsigned long *flags)
131
132{
133 if (psd->clock_op_might_sleep) {
134 mutex_unlock(&psd->clock_mutex);
135 } else {
136
137 __acquire(&psd->lock);
138 spin_unlock_irqrestore(&psd->lock, *flags);
139 }
140}
141
142
143
144
145
146
147static inline void __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce)
148{
149 int ret;
150
151 switch (ce->status) {
152 case PCE_STATUS_ACQUIRED:
153 ret = clk_prepare_enable(ce->clk);
154 break;
155 case PCE_STATUS_PREPARED:
156 ret = clk_enable(ce->clk);
157 break;
158 default:
159 return;
160 }
161 if (!ret)
162 ce->status = PCE_STATUS_ENABLED;
163 else
164 dev_err(dev, "%s: failed to enable clk %p, error %d\n",
165 __func__, ce->clk, ret);
166}
167
168
169
170
171
172
173static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
174{
175 if (!ce->clk)
176 ce->clk = clk_get(dev, ce->con_id);
177 if (IS_ERR(ce->clk)) {
178 ce->status = PCE_STATUS_ERROR;
179 return;
180 } else if (clk_is_enabled_when_prepared(ce->clk)) {
181
182 ce->status = PCE_STATUS_ACQUIRED;
183 ce->enabled_when_prepared = true;
184 } else if (clk_prepare(ce->clk)) {
185 ce->status = PCE_STATUS_ERROR;
186 dev_err(dev, "clk_prepare() failed\n");
187 return;
188 } else {
189 ce->status = PCE_STATUS_PREPARED;
190 }
191 dev_dbg(dev, "Clock %pC con_id %s managed by runtime PM.\n",
192 ce->clk, ce->con_id);
193}
194
195static int __pm_clk_add(struct device *dev, const char *con_id,
196 struct clk *clk)
197{
198 struct pm_subsys_data *psd = dev_to_psd(dev);
199 struct pm_clock_entry *ce;
200
201 if (!psd)
202 return -EINVAL;
203
204 ce = kzalloc(sizeof(*ce), GFP_KERNEL);
205 if (!ce)
206 return -ENOMEM;
207
208 if (con_id) {
209 ce->con_id = kstrdup(con_id, GFP_KERNEL);
210 if (!ce->con_id) {
211 kfree(ce);
212 return -ENOMEM;
213 }
214 } else {
215 if (IS_ERR(clk)) {
216 kfree(ce);
217 return -ENOENT;
218 }
219 ce->clk = clk;
220 }
221
222 pm_clk_acquire(dev, ce);
223
224 pm_clk_list_lock(psd);
225 list_add_tail(&ce->node, &psd->clock_list);
226 if (ce->enabled_when_prepared)
227 psd->clock_op_might_sleep++;
228 pm_clk_list_unlock(psd);
229 return 0;
230}
231
232
233
234
235
236
237
238
239
240int pm_clk_add(struct device *dev, const char *con_id)
241{
242 return __pm_clk_add(dev, con_id, NULL);
243}
244EXPORT_SYMBOL_GPL(pm_clk_add);
245
246
247
248
249
250
251
252
253
254
255
256int pm_clk_add_clk(struct device *dev, struct clk *clk)
257{
258 return __pm_clk_add(dev, NULL, clk);
259}
260EXPORT_SYMBOL_GPL(pm_clk_add_clk);
261
262
263
264
265
266
267
268
269
270
271
272
273int of_pm_clk_add_clk(struct device *dev, const char *name)
274{
275 struct clk *clk;
276 int ret;
277
278 if (!dev || !dev->of_node || !name)
279 return -EINVAL;
280
281 clk = of_clk_get_by_name(dev->of_node, name);
282 if (IS_ERR(clk))
283 return PTR_ERR(clk);
284
285 ret = pm_clk_add_clk(dev, clk);
286 if (ret) {
287 clk_put(clk);
288 return ret;
289 }
290
291 return 0;
292}
293EXPORT_SYMBOL_GPL(of_pm_clk_add_clk);
294
295
296
297
298
299
300
301
302
303
304
305int of_pm_clk_add_clks(struct device *dev)
306{
307 struct clk **clks;
308 int i, count;
309 int ret;
310
311 if (!dev || !dev->of_node)
312 return -EINVAL;
313
314 count = of_clk_get_parent_count(dev->of_node);
315 if (count <= 0)
316 return -ENODEV;
317
318 clks = kcalloc(count, sizeof(*clks), GFP_KERNEL);
319 if (!clks)
320 return -ENOMEM;
321
322 for (i = 0; i < count; i++) {
323 clks[i] = of_clk_get(dev->of_node, i);
324 if (IS_ERR(clks[i])) {
325 ret = PTR_ERR(clks[i]);
326 goto error;
327 }
328
329 ret = pm_clk_add_clk(dev, clks[i]);
330 if (ret) {
331 clk_put(clks[i]);
332 goto error;
333 }
334 }
335
336 kfree(clks);
337
338 return i;
339
340error:
341 while (i--)
342 pm_clk_remove_clk(dev, clks[i]);
343
344 kfree(clks);
345
346 return ret;
347}
348EXPORT_SYMBOL_GPL(of_pm_clk_add_clks);
349
350
351
352
353
354static void __pm_clk_remove(struct pm_clock_entry *ce)
355{
356 if (!ce)
357 return;
358
359 switch (ce->status) {
360 case PCE_STATUS_ENABLED:
361 clk_disable(ce->clk);
362 fallthrough;
363 case PCE_STATUS_PREPARED:
364 clk_unprepare(ce->clk);
365 fallthrough;
366 case PCE_STATUS_ACQUIRED:
367 case PCE_STATUS_ERROR:
368 if (!IS_ERR(ce->clk))
369 clk_put(ce->clk);
370 break;
371 default:
372 break;
373 }
374
375 kfree(ce->con_id);
376 kfree(ce);
377}
378
379
380
381
382
383
384
385
386
387void pm_clk_remove(struct device *dev, const char *con_id)
388{
389 struct pm_subsys_data *psd = dev_to_psd(dev);
390 struct pm_clock_entry *ce;
391
392 if (!psd)
393 return;
394
395 pm_clk_list_lock(psd);
396
397 list_for_each_entry(ce, &psd->clock_list, node) {
398 if (!con_id && !ce->con_id)
399 goto remove;
400 else if (!con_id || !ce->con_id)
401 continue;
402 else if (!strcmp(con_id, ce->con_id))
403 goto remove;
404 }
405
406 pm_clk_list_unlock(psd);
407 return;
408
409 remove:
410 list_del(&ce->node);
411 if (ce->enabled_when_prepared)
412 psd->clock_op_might_sleep--;
413 pm_clk_list_unlock(psd);
414
415 __pm_clk_remove(ce);
416}
417EXPORT_SYMBOL_GPL(pm_clk_remove);
418
419
420
421
422
423
424
425
426
427void pm_clk_remove_clk(struct device *dev, struct clk *clk)
428{
429 struct pm_subsys_data *psd = dev_to_psd(dev);
430 struct pm_clock_entry *ce;
431
432 if (!psd || !clk)
433 return;
434
435 pm_clk_list_lock(psd);
436
437 list_for_each_entry(ce, &psd->clock_list, node) {
438 if (clk == ce->clk)
439 goto remove;
440 }
441
442 pm_clk_list_unlock(psd);
443 return;
444
445 remove:
446 list_del(&ce->node);
447 if (ce->enabled_when_prepared)
448 psd->clock_op_might_sleep--;
449 pm_clk_list_unlock(psd);
450
451 __pm_clk_remove(ce);
452}
453EXPORT_SYMBOL_GPL(pm_clk_remove_clk);
454
455
456
457
458
459
460
461
462void pm_clk_init(struct device *dev)
463{
464 struct pm_subsys_data *psd = dev_to_psd(dev);
465 if (psd) {
466 INIT_LIST_HEAD(&psd->clock_list);
467 mutex_init(&psd->clock_mutex);
468 psd->clock_op_might_sleep = 0;
469 }
470}
471EXPORT_SYMBOL_GPL(pm_clk_init);
472
473
474
475
476
477
478
479
480int pm_clk_create(struct device *dev)
481{
482 return dev_pm_get_subsys_data(dev);
483}
484EXPORT_SYMBOL_GPL(pm_clk_create);
485
486
487
488
489
490
491
492
493
494void pm_clk_destroy(struct device *dev)
495{
496 struct pm_subsys_data *psd = dev_to_psd(dev);
497 struct pm_clock_entry *ce, *c;
498 struct list_head list;
499
500 if (!psd)
501 return;
502
503 INIT_LIST_HEAD(&list);
504
505 pm_clk_list_lock(psd);
506
507 list_for_each_entry_safe_reverse(ce, c, &psd->clock_list, node)
508 list_move(&ce->node, &list);
509 psd->clock_op_might_sleep = 0;
510
511 pm_clk_list_unlock(psd);
512
513 dev_pm_put_subsys_data(dev);
514
515 list_for_each_entry_safe_reverse(ce, c, &list, node) {
516 list_del(&ce->node);
517 __pm_clk_remove(ce);
518 }
519}
520EXPORT_SYMBOL_GPL(pm_clk_destroy);
521
522static void pm_clk_destroy_action(void *data)
523{
524 pm_clk_destroy(data);
525}
526
527int devm_pm_clk_create(struct device *dev)
528{
529 int ret;
530
531 ret = pm_clk_create(dev);
532 if (ret)
533 return ret;
534
535 return devm_add_action_or_reset(dev, pm_clk_destroy_action, dev);
536}
537EXPORT_SYMBOL_GPL(devm_pm_clk_create);
538
539
540
541
542
543int pm_clk_suspend(struct device *dev)
544{
545 struct pm_subsys_data *psd = dev_to_psd(dev);
546 struct pm_clock_entry *ce;
547 unsigned long flags;
548 int ret;
549
550 dev_dbg(dev, "%s()\n", __func__);
551
552 if (!psd)
553 return 0;
554
555 ret = pm_clk_op_lock(psd, &flags, __func__);
556 if (ret)
557 return ret;
558
559 list_for_each_entry_reverse(ce, &psd->clock_list, node) {
560 if (ce->status == PCE_STATUS_ENABLED) {
561 if (ce->enabled_when_prepared) {
562 clk_disable_unprepare(ce->clk);
563 ce->status = PCE_STATUS_ACQUIRED;
564 } else {
565 clk_disable(ce->clk);
566 ce->status = PCE_STATUS_PREPARED;
567 }
568 }
569 }
570
571 pm_clk_op_unlock(psd, &flags);
572
573 return 0;
574}
575EXPORT_SYMBOL_GPL(pm_clk_suspend);
576
577
578
579
580
581int pm_clk_resume(struct device *dev)
582{
583 struct pm_subsys_data *psd = dev_to_psd(dev);
584 struct pm_clock_entry *ce;
585 unsigned long flags;
586 int ret;
587
588 dev_dbg(dev, "%s()\n", __func__);
589
590 if (!psd)
591 return 0;
592
593 ret = pm_clk_op_lock(psd, &flags, __func__);
594 if (ret)
595 return ret;
596
597 list_for_each_entry(ce, &psd->clock_list, node)
598 __pm_clk_enable(dev, ce);
599
600 pm_clk_op_unlock(psd, &flags);
601
602 return 0;
603}
604EXPORT_SYMBOL_GPL(pm_clk_resume);
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622static int pm_clk_notify(struct notifier_block *nb,
623 unsigned long action, void *data)
624{
625 struct pm_clk_notifier_block *clknb;
626 struct device *dev = data;
627 char **con_id;
628 int error;
629
630 dev_dbg(dev, "%s() %ld\n", __func__, action);
631
632 clknb = container_of(nb, struct pm_clk_notifier_block, nb);
633
634 switch (action) {
635 case BUS_NOTIFY_ADD_DEVICE:
636 if (dev->pm_domain)
637 break;
638
639 error = pm_clk_create(dev);
640 if (error)
641 break;
642
643 dev_pm_domain_set(dev, clknb->pm_domain);
644 if (clknb->con_ids[0]) {
645 for (con_id = clknb->con_ids; *con_id; con_id++)
646 pm_clk_add(dev, *con_id);
647 } else {
648 pm_clk_add(dev, NULL);
649 }
650
651 break;
652 case BUS_NOTIFY_DEL_DEVICE:
653 if (dev->pm_domain != clknb->pm_domain)
654 break;
655
656 dev_pm_domain_set(dev, NULL);
657 pm_clk_destroy(dev);
658 break;
659 }
660
661 return 0;
662}
663
664int pm_clk_runtime_suspend(struct device *dev)
665{
666 int ret;
667
668 dev_dbg(dev, "%s\n", __func__);
669
670 ret = pm_generic_runtime_suspend(dev);
671 if (ret) {
672 dev_err(dev, "failed to suspend device\n");
673 return ret;
674 }
675
676 ret = pm_clk_suspend(dev);
677 if (ret) {
678 dev_err(dev, "failed to suspend clock\n");
679 pm_generic_runtime_resume(dev);
680 return ret;
681 }
682
683 return 0;
684}
685EXPORT_SYMBOL_GPL(pm_clk_runtime_suspend);
686
687int pm_clk_runtime_resume(struct device *dev)
688{
689 int ret;
690
691 dev_dbg(dev, "%s\n", __func__);
692
693 ret = pm_clk_resume(dev);
694 if (ret) {
695 dev_err(dev, "failed to resume clock\n");
696 return ret;
697 }
698
699 return pm_generic_runtime_resume(dev);
700}
701EXPORT_SYMBOL_GPL(pm_clk_runtime_resume);
702
703#else
704
705
706
707
708
709
710static void enable_clock(struct device *dev, const char *con_id)
711{
712 struct clk *clk;
713
714 clk = clk_get(dev, con_id);
715 if (!IS_ERR(clk)) {
716 clk_prepare_enable(clk);
717 clk_put(clk);
718 dev_info(dev, "Runtime PM disabled, clock forced on.\n");
719 }
720}
721
722
723
724
725
726
727static void disable_clock(struct device *dev, const char *con_id)
728{
729 struct clk *clk;
730
731 clk = clk_get(dev, con_id);
732 if (!IS_ERR(clk)) {
733 clk_disable_unprepare(clk);
734 clk_put(clk);
735 dev_info(dev, "Runtime PM disabled, clock forced off.\n");
736 }
737}
738
739
740
741
742
743
744
745
746
747
748
749
750static int pm_clk_notify(struct notifier_block *nb,
751 unsigned long action, void *data)
752{
753 struct pm_clk_notifier_block *clknb;
754 struct device *dev = data;
755 char **con_id;
756
757 dev_dbg(dev, "%s() %ld\n", __func__, action);
758
759 clknb = container_of(nb, struct pm_clk_notifier_block, nb);
760
761 switch (action) {
762 case BUS_NOTIFY_BIND_DRIVER:
763 if (clknb->con_ids[0]) {
764 for (con_id = clknb->con_ids; *con_id; con_id++)
765 enable_clock(dev, *con_id);
766 } else {
767 enable_clock(dev, NULL);
768 }
769 break;
770 case BUS_NOTIFY_DRIVER_NOT_BOUND:
771 case BUS_NOTIFY_UNBOUND_DRIVER:
772 if (clknb->con_ids[0]) {
773 for (con_id = clknb->con_ids; *con_id; con_id++)
774 disable_clock(dev, *con_id);
775 } else {
776 disable_clock(dev, NULL);
777 }
778 break;
779 }
780
781 return 0;
782}
783
784#endif
785
786
787
788
789
790
791
792
793
794
795
796void pm_clk_add_notifier(struct bus_type *bus,
797 struct pm_clk_notifier_block *clknb)
798{
799 if (!bus || !clknb)
800 return;
801
802 clknb->nb.notifier_call = pm_clk_notify;
803 bus_register_notifier(bus, &clknb->nb);
804}
805EXPORT_SYMBOL_GPL(pm_clk_add_notifier);
806