1
2
3
4
5
6
7
8
9#include <linux/delay.h>
10#include <linux/kernel.h>
11#include <linux/io.h>
12#include <linux/platform_device.h>
13#include <linux/pm_runtime.h>
14#include <linux/pm_domain.h>
15#include <linux/pm_qos.h>
16#include <linux/pm_clock.h>
17#include <linux/slab.h>
18#include <linux/err.h>
19#include <linux/sched.h>
20#include <linux/suspend.h>
21#include <linux/export.h>
22
23#include "power.h"
24
25#define GENPD_RETRY_MAX_MS 250
26
27#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
28({ \
29 type (*__routine)(struct device *__d); \
30 type __ret = (type)0; \
31 \
32 __routine = genpd->dev_ops.callback; \
33 if (__routine) { \
34 __ret = __routine(dev); \
35 } \
36 __ret; \
37})
38
39static LIST_HEAD(gpd_list);
40static DEFINE_MUTEX(gpd_list_lock);
41
42
43
44
45
46
47
48static struct generic_pm_domain *genpd_lookup_dev(struct device *dev)
49{
50 struct generic_pm_domain *genpd = NULL, *gpd;
51
52 if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
53 return NULL;
54
55 mutex_lock(&gpd_list_lock);
56 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
57 if (&gpd->domain == dev->pm_domain) {
58 genpd = gpd;
59 break;
60 }
61 }
62 mutex_unlock(&gpd_list_lock);
63
64 return genpd;
65}
66
67
68
69
70
71static struct generic_pm_domain *dev_to_genpd(struct device *dev)
72{
73 if (IS_ERR_OR_NULL(dev->pm_domain))
74 return ERR_PTR(-EINVAL);
75
76 return pd_to_genpd(dev->pm_domain);
77}
78
79static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
80{
81 return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
82}
83
84static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
85{
86 return GENPD_DEV_CALLBACK(genpd, int, start, dev);
87}
88
89static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
90{
91 bool ret = false;
92
93 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
94 ret = !!atomic_dec_and_test(&genpd->sd_count);
95
96 return ret;
97}
98
99static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
100{
101 atomic_inc(&genpd->sd_count);
102 smp_mb__after_atomic();
103}
104
105static int genpd_power_on(struct generic_pm_domain *genpd, bool timed)
106{
107 unsigned int state_idx = genpd->state_idx;
108 ktime_t time_start;
109 s64 elapsed_ns;
110 int ret;
111
112 if (!genpd->power_on)
113 return 0;
114
115 if (!timed)
116 return genpd->power_on(genpd);
117
118 time_start = ktime_get();
119 ret = genpd->power_on(genpd);
120 if (ret)
121 return ret;
122
123 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
124 if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
125 return ret;
126
127 genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
128 genpd->max_off_time_changed = true;
129 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
130 genpd->name, "on", elapsed_ns);
131
132 return ret;
133}
134
135static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
136{
137 unsigned int state_idx = genpd->state_idx;
138 ktime_t time_start;
139 s64 elapsed_ns;
140 int ret;
141
142 if (!genpd->power_off)
143 return 0;
144
145 if (!timed)
146 return genpd->power_off(genpd);
147
148 time_start = ktime_get();
149 ret = genpd->power_off(genpd);
150 if (ret == -EBUSY)
151 return ret;
152
153 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
154 if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
155 return ret;
156
157 genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
158 genpd->max_off_time_changed = true;
159 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
160 genpd->name, "off", elapsed_ns);
161
162 return ret;
163}
164
165
166
167
168
169
170
171
172static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
173{
174 queue_work(pm_wq, &genpd->power_off_work);
175}
176
177
178
179
180
181
182
183
184
185static int genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth)
186{
187 struct gpd_link *link;
188 int ret = 0;
189
190 if (genpd->status == GPD_STATE_ACTIVE)
191 return 0;
192
193
194
195
196
197
198 list_for_each_entry(link, &genpd->slave_links, slave_node) {
199 struct generic_pm_domain *master = link->master;
200
201 genpd_sd_counter_inc(master);
202
203 mutex_lock_nested(&master->lock, depth + 1);
204 ret = genpd_poweron(master, depth + 1);
205 mutex_unlock(&master->lock);
206
207 if (ret) {
208 genpd_sd_counter_dec(master);
209 goto err;
210 }
211 }
212
213 ret = genpd_power_on(genpd, true);
214 if (ret)
215 goto err;
216
217 genpd->status = GPD_STATE_ACTIVE;
218 return 0;
219
220 err:
221 list_for_each_entry_continue_reverse(link,
222 &genpd->slave_links,
223 slave_node) {
224 genpd_sd_counter_dec(link->master);
225 genpd_queue_power_off_work(link->master);
226 }
227
228 return ret;
229}
230
231static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
232 unsigned long val, void *ptr)
233{
234 struct generic_pm_domain_data *gpd_data;
235 struct device *dev;
236
237 gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
238 dev = gpd_data->base.dev;
239
240 for (;;) {
241 struct generic_pm_domain *genpd;
242 struct pm_domain_data *pdd;
243
244 spin_lock_irq(&dev->power.lock);
245
246 pdd = dev->power.subsys_data ?
247 dev->power.subsys_data->domain_data : NULL;
248 if (pdd && pdd->dev) {
249 to_gpd_data(pdd)->td.constraint_changed = true;
250 genpd = dev_to_genpd(dev);
251 } else {
252 genpd = ERR_PTR(-ENODATA);
253 }
254
255 spin_unlock_irq(&dev->power.lock);
256
257 if (!IS_ERR(genpd)) {
258 mutex_lock(&genpd->lock);
259 genpd->max_off_time_changed = true;
260 mutex_unlock(&genpd->lock);
261 }
262
263 dev = dev->parent;
264 if (!dev || dev->power.ignore_children)
265 break;
266 }
267
268 return NOTIFY_DONE;
269}
270
271
272
273
274
275
276
277
278
279static int genpd_poweroff(struct generic_pm_domain *genpd, bool is_async)
280{
281 struct pm_domain_data *pdd;
282 struct gpd_link *link;
283 unsigned int not_suspended = 0;
284
285
286
287
288
289
290 if (genpd->status == GPD_STATE_POWER_OFF
291 || genpd->prepared_count > 0)
292 return 0;
293
294 if (atomic_read(&genpd->sd_count) > 0)
295 return -EBUSY;
296
297 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
298 enum pm_qos_flags_status stat;
299
300 stat = dev_pm_qos_flags(pdd->dev,
301 PM_QOS_FLAG_NO_POWER_OFF
302 | PM_QOS_FLAG_REMOTE_WAKEUP);
303 if (stat > PM_QOS_FLAGS_NONE)
304 return -EBUSY;
305
306 if (!pm_runtime_suspended(pdd->dev) || pdd->dev->power.irq_safe)
307 not_suspended++;
308 }
309
310 if (not_suspended > 1 || (not_suspended == 1 && is_async))
311 return -EBUSY;
312
313 if (genpd->gov && genpd->gov->power_down_ok) {
314 if (!genpd->gov->power_down_ok(&genpd->domain))
315 return -EAGAIN;
316 }
317
318 if (genpd->power_off) {
319 int ret;
320
321 if (atomic_read(&genpd->sd_count) > 0)
322 return -EBUSY;
323
324
325
326
327
328
329
330
331
332 ret = genpd_power_off(genpd, true);
333 if (ret)
334 return ret;
335 }
336
337 genpd->status = GPD_STATE_POWER_OFF;
338
339 list_for_each_entry(link, &genpd->slave_links, slave_node) {
340 genpd_sd_counter_dec(link->master);
341 genpd_queue_power_off_work(link->master);
342 }
343
344 return 0;
345}
346
347
348
349
350
351static void genpd_power_off_work_fn(struct work_struct *work)
352{
353 struct generic_pm_domain *genpd;
354
355 genpd = container_of(work, struct generic_pm_domain, power_off_work);
356
357 mutex_lock(&genpd->lock);
358 genpd_poweroff(genpd, true);
359 mutex_unlock(&genpd->lock);
360}
361
362
363
364
365
366static int __genpd_runtime_suspend(struct device *dev)
367{
368 int (*cb)(struct device *__dev);
369
370 if (dev->type && dev->type->pm)
371 cb = dev->type->pm->runtime_suspend;
372 else if (dev->class && dev->class->pm)
373 cb = dev->class->pm->runtime_suspend;
374 else if (dev->bus && dev->bus->pm)
375 cb = dev->bus->pm->runtime_suspend;
376 else
377 cb = NULL;
378
379 if (!cb && dev->driver && dev->driver->pm)
380 cb = dev->driver->pm->runtime_suspend;
381
382 return cb ? cb(dev) : 0;
383}
384
385
386
387
388
389static int __genpd_runtime_resume(struct device *dev)
390{
391 int (*cb)(struct device *__dev);
392
393 if (dev->type && dev->type->pm)
394 cb = dev->type->pm->runtime_resume;
395 else if (dev->class && dev->class->pm)
396 cb = dev->class->pm->runtime_resume;
397 else if (dev->bus && dev->bus->pm)
398 cb = dev->bus->pm->runtime_resume;
399 else
400 cb = NULL;
401
402 if (!cb && dev->driver && dev->driver->pm)
403 cb = dev->driver->pm->runtime_resume;
404
405 return cb ? cb(dev) : 0;
406}
407
408
409
410
411
412
413
414
415
416static int genpd_runtime_suspend(struct device *dev)
417{
418 struct generic_pm_domain *genpd;
419 bool (*suspend_ok)(struct device *__dev);
420 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
421 bool runtime_pm = pm_runtime_enabled(dev);
422 ktime_t time_start;
423 s64 elapsed_ns;
424 int ret;
425
426 dev_dbg(dev, "%s()\n", __func__);
427
428 genpd = dev_to_genpd(dev);
429 if (IS_ERR(genpd))
430 return -EINVAL;
431
432
433
434
435
436
437
438 suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
439 if (runtime_pm && suspend_ok && !suspend_ok(dev))
440 return -EBUSY;
441
442
443 if (runtime_pm)
444 time_start = ktime_get();
445
446 ret = __genpd_runtime_suspend(dev);
447 if (ret)
448 return ret;
449
450 ret = genpd_stop_dev(genpd, dev);
451 if (ret) {
452 __genpd_runtime_resume(dev);
453 return ret;
454 }
455
456
457 if (runtime_pm) {
458 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
459 if (elapsed_ns > td->suspend_latency_ns) {
460 td->suspend_latency_ns = elapsed_ns;
461 dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
462 elapsed_ns);
463 genpd->max_off_time_changed = true;
464 td->constraint_changed = true;
465 }
466 }
467
468
469
470
471
472 if (dev->power.irq_safe)
473 return 0;
474
475 mutex_lock(&genpd->lock);
476 genpd_poweroff(genpd, false);
477 mutex_unlock(&genpd->lock);
478
479 return 0;
480}
481
482
483
484
485
486
487
488
489
490static int genpd_runtime_resume(struct device *dev)
491{
492 struct generic_pm_domain *genpd;
493 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
494 bool runtime_pm = pm_runtime_enabled(dev);
495 ktime_t time_start;
496 s64 elapsed_ns;
497 int ret;
498 bool timed = true;
499
500 dev_dbg(dev, "%s()\n", __func__);
501
502 genpd = dev_to_genpd(dev);
503 if (IS_ERR(genpd))
504 return -EINVAL;
505
506
507 if (dev->power.irq_safe) {
508 timed = false;
509 goto out;
510 }
511
512 mutex_lock(&genpd->lock);
513 ret = genpd_poweron(genpd, 0);
514 mutex_unlock(&genpd->lock);
515
516 if (ret)
517 return ret;
518
519 out:
520
521 if (timed && runtime_pm)
522 time_start = ktime_get();
523
524 ret = genpd_start_dev(genpd, dev);
525 if (ret)
526 goto err_poweroff;
527
528 ret = __genpd_runtime_resume(dev);
529 if (ret)
530 goto err_stop;
531
532
533 if (timed && runtime_pm) {
534 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
535 if (elapsed_ns > td->resume_latency_ns) {
536 td->resume_latency_ns = elapsed_ns;
537 dev_dbg(dev, "resume latency exceeded, %lld ns\n",
538 elapsed_ns);
539 genpd->max_off_time_changed = true;
540 td->constraint_changed = true;
541 }
542 }
543
544 return 0;
545
546err_stop:
547 genpd_stop_dev(genpd, dev);
548err_poweroff:
549 if (!dev->power.irq_safe) {
550 mutex_lock(&genpd->lock);
551 genpd_poweroff(genpd, 0);
552 mutex_unlock(&genpd->lock);
553 }
554
555 return ret;
556}
557
558static bool pd_ignore_unused;
559static int __init pd_ignore_unused_setup(char *__unused)
560{
561 pd_ignore_unused = true;
562 return 1;
563}
564__setup("pd_ignore_unused", pd_ignore_unused_setup);
565
566
567
568
569static int __init genpd_poweroff_unused(void)
570{
571 struct generic_pm_domain *genpd;
572
573 if (pd_ignore_unused) {
574 pr_warn("genpd: Not disabling unused power domains\n");
575 return 0;
576 }
577
578 mutex_lock(&gpd_list_lock);
579
580 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
581 genpd_queue_power_off_work(genpd);
582
583 mutex_unlock(&gpd_list_lock);
584
585 return 0;
586}
587late_initcall(genpd_poweroff_unused);
588
589#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_GENERIC_DOMAINS_OF)
590
591
592
593
594
595static bool pm_genpd_present(const struct generic_pm_domain *genpd)
596{
597 const struct generic_pm_domain *gpd;
598
599 if (IS_ERR_OR_NULL(genpd))
600 return false;
601
602 list_for_each_entry(gpd, &gpd_list, gpd_list_node)
603 if (gpd == genpd)
604 return true;
605
606 return false;
607}
608
609#endif
610
611#ifdef CONFIG_PM_SLEEP
612
613static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
614 struct device *dev)
615{
616 return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
617}
618
619
620
621
622
623
624
625
626
627
628
629
630
631static void genpd_sync_poweroff(struct generic_pm_domain *genpd)
632{
633 struct gpd_link *link;
634
635 if (genpd->status == GPD_STATE_POWER_OFF)
636 return;
637
638 if (genpd->suspended_count != genpd->device_count
639 || atomic_read(&genpd->sd_count) > 0)
640 return;
641
642
643 genpd->state_idx = genpd->state_count - 1;
644 genpd_power_off(genpd, false);
645
646 genpd->status = GPD_STATE_POWER_OFF;
647
648 list_for_each_entry(link, &genpd->slave_links, slave_node) {
649 genpd_sd_counter_dec(link->master);
650 genpd_sync_poweroff(link->master);
651 }
652}
653
654
655
656
657
658
659
660
661
662
663static void genpd_sync_poweron(struct generic_pm_domain *genpd)
664{
665 struct gpd_link *link;
666
667 if (genpd->status == GPD_STATE_ACTIVE)
668 return;
669
670 list_for_each_entry(link, &genpd->slave_links, slave_node) {
671 genpd_sync_poweron(link->master);
672 genpd_sd_counter_inc(link->master);
673 }
674
675 genpd_power_on(genpd, false);
676
677 genpd->status = GPD_STATE_ACTIVE;
678}
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
697{
698 bool active_wakeup;
699
700 if (!device_can_wakeup(dev))
701 return false;
702
703 active_wakeup = genpd_dev_active_wakeup(genpd, dev);
704 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
705}
706
707
708
709
710
711
712
713
714
715
716static int pm_genpd_prepare(struct device *dev)
717{
718 struct generic_pm_domain *genpd;
719 int ret;
720
721 dev_dbg(dev, "%s()\n", __func__);
722
723 genpd = dev_to_genpd(dev);
724 if (IS_ERR(genpd))
725 return -EINVAL;
726
727
728
729
730
731
732 if (resume_needed(dev, genpd))
733 pm_runtime_resume(dev);
734
735 mutex_lock(&genpd->lock);
736
737 if (genpd->prepared_count++ == 0)
738 genpd->suspended_count = 0;
739
740 mutex_unlock(&genpd->lock);
741
742 ret = pm_generic_prepare(dev);
743 if (ret) {
744 mutex_lock(&genpd->lock);
745
746 genpd->prepared_count--;
747
748 mutex_unlock(&genpd->lock);
749 }
750
751 return ret;
752}
753
754
755
756
757
758
759
760
761static int pm_genpd_suspend_noirq(struct device *dev)
762{
763 struct generic_pm_domain *genpd;
764 int ret;
765
766 dev_dbg(dev, "%s()\n", __func__);
767
768 genpd = dev_to_genpd(dev);
769 if (IS_ERR(genpd))
770 return -EINVAL;
771
772 if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
773 return 0;
774
775 if (genpd->dev_ops.stop && genpd->dev_ops.start) {
776 ret = pm_runtime_force_suspend(dev);
777 if (ret)
778 return ret;
779 }
780
781
782
783
784
785
786 genpd->suspended_count++;
787 genpd_sync_poweroff(genpd);
788
789 return 0;
790}
791
792
793
794
795
796
797
798static int pm_genpd_resume_noirq(struct device *dev)
799{
800 struct generic_pm_domain *genpd;
801 int ret = 0;
802
803 dev_dbg(dev, "%s()\n", __func__);
804
805 genpd = dev_to_genpd(dev);
806 if (IS_ERR(genpd))
807 return -EINVAL;
808
809 if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
810 return 0;
811
812
813
814
815
816
817 genpd_sync_poweron(genpd);
818 genpd->suspended_count--;
819
820 if (genpd->dev_ops.stop && genpd->dev_ops.start)
821 ret = pm_runtime_force_resume(dev);
822
823 return ret;
824}
825
826
827
828
829
830
831
832
833
834
835static int pm_genpd_freeze_noirq(struct device *dev)
836{
837 struct generic_pm_domain *genpd;
838 int ret = 0;
839
840 dev_dbg(dev, "%s()\n", __func__);
841
842 genpd = dev_to_genpd(dev);
843 if (IS_ERR(genpd))
844 return -EINVAL;
845
846 if (genpd->dev_ops.stop && genpd->dev_ops.start)
847 ret = pm_runtime_force_suspend(dev);
848
849 return ret;
850}
851
852
853
854
855
856
857
858
859static int pm_genpd_thaw_noirq(struct device *dev)
860{
861 struct generic_pm_domain *genpd;
862 int ret = 0;
863
864 dev_dbg(dev, "%s()\n", __func__);
865
866 genpd = dev_to_genpd(dev);
867 if (IS_ERR(genpd))
868 return -EINVAL;
869
870 if (genpd->dev_ops.stop && genpd->dev_ops.start)
871 ret = pm_runtime_force_resume(dev);
872
873 return ret;
874}
875
876
877
878
879
880
881
882
883static int pm_genpd_restore_noirq(struct device *dev)
884{
885 struct generic_pm_domain *genpd;
886 int ret = 0;
887
888 dev_dbg(dev, "%s()\n", __func__);
889
890 genpd = dev_to_genpd(dev);
891 if (IS_ERR(genpd))
892 return -EINVAL;
893
894
895
896
897
898
899
900
901
902 if (genpd->suspended_count++ == 0)
903
904
905
906
907
908 genpd->status = GPD_STATE_POWER_OFF;
909
910 genpd_sync_poweron(genpd);
911
912 if (genpd->dev_ops.stop && genpd->dev_ops.start)
913 ret = pm_runtime_force_resume(dev);
914
915 return ret;
916}
917
918
919
920
921
922
923
924
925
926
927static void pm_genpd_complete(struct device *dev)
928{
929 struct generic_pm_domain *genpd;
930
931 dev_dbg(dev, "%s()\n", __func__);
932
933 genpd = dev_to_genpd(dev);
934 if (IS_ERR(genpd))
935 return;
936
937 pm_generic_complete(dev);
938
939 mutex_lock(&genpd->lock);
940
941 genpd->prepared_count--;
942 if (!genpd->prepared_count)
943 genpd_queue_power_off_work(genpd);
944
945 mutex_unlock(&genpd->lock);
946}
947
948
949
950
951
952
953
954
955static void genpd_syscore_switch(struct device *dev, bool suspend)
956{
957 struct generic_pm_domain *genpd;
958
959 genpd = dev_to_genpd(dev);
960 if (!pm_genpd_present(genpd))
961 return;
962
963 if (suspend) {
964 genpd->suspended_count++;
965 genpd_sync_poweroff(genpd);
966 } else {
967 genpd_sync_poweron(genpd);
968 genpd->suspended_count--;
969 }
970}
971
972void pm_genpd_syscore_poweroff(struct device *dev)
973{
974 genpd_syscore_switch(dev, true);
975}
976EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);
977
978void pm_genpd_syscore_poweron(struct device *dev)
979{
980 genpd_syscore_switch(dev, false);
981}
982EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
983
984#else
985
986#define pm_genpd_prepare NULL
987#define pm_genpd_suspend_noirq NULL
988#define pm_genpd_resume_noirq NULL
989#define pm_genpd_freeze_noirq NULL
990#define pm_genpd_thaw_noirq NULL
991#define pm_genpd_restore_noirq NULL
992#define pm_genpd_complete NULL
993
994#endif
995
996static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
997 struct generic_pm_domain *genpd,
998 struct gpd_timing_data *td)
999{
1000 struct generic_pm_domain_data *gpd_data;
1001 int ret;
1002
1003 ret = dev_pm_get_subsys_data(dev);
1004 if (ret)
1005 return ERR_PTR(ret);
1006
1007 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1008 if (!gpd_data) {
1009 ret = -ENOMEM;
1010 goto err_put;
1011 }
1012
1013 if (td)
1014 gpd_data->td = *td;
1015
1016 gpd_data->base.dev = dev;
1017 gpd_data->td.constraint_changed = true;
1018 gpd_data->td.effective_constraint_ns = -1;
1019 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1020
1021 spin_lock_irq(&dev->power.lock);
1022
1023 if (dev->power.subsys_data->domain_data) {
1024 ret = -EINVAL;
1025 goto err_free;
1026 }
1027
1028 dev->power.subsys_data->domain_data = &gpd_data->base;
1029
1030 spin_unlock_irq(&dev->power.lock);
1031
1032 dev_pm_domain_set(dev, &genpd->domain);
1033
1034 return gpd_data;
1035
1036 err_free:
1037 spin_unlock_irq(&dev->power.lock);
1038 kfree(gpd_data);
1039 err_put:
1040 dev_pm_put_subsys_data(dev);
1041 return ERR_PTR(ret);
1042}
1043
1044static void genpd_free_dev_data(struct device *dev,
1045 struct generic_pm_domain_data *gpd_data)
1046{
1047 dev_pm_domain_set(dev, NULL);
1048
1049 spin_lock_irq(&dev->power.lock);
1050
1051 dev->power.subsys_data->domain_data = NULL;
1052
1053 spin_unlock_irq(&dev->power.lock);
1054
1055 kfree(gpd_data);
1056 dev_pm_put_subsys_data(dev);
1057}
1058
1059static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1060 struct gpd_timing_data *td)
1061{
1062 struct generic_pm_domain_data *gpd_data;
1063 int ret = 0;
1064
1065 dev_dbg(dev, "%s()\n", __func__);
1066
1067 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1068 return -EINVAL;
1069
1070 gpd_data = genpd_alloc_dev_data(dev, genpd, td);
1071 if (IS_ERR(gpd_data))
1072 return PTR_ERR(gpd_data);
1073
1074 mutex_lock(&genpd->lock);
1075
1076 if (genpd->prepared_count > 0) {
1077 ret = -EAGAIN;
1078 goto out;
1079 }
1080
1081 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1082 if (ret)
1083 goto out;
1084
1085 genpd->device_count++;
1086 genpd->max_off_time_changed = true;
1087
1088 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1089
1090 out:
1091 mutex_unlock(&genpd->lock);
1092
1093 if (ret)
1094 genpd_free_dev_data(dev, gpd_data);
1095 else
1096 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1097
1098 return ret;
1099}
1100
1101
1102
1103
1104
1105
1106
1107int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1108 struct gpd_timing_data *td)
1109{
1110 int ret;
1111
1112 mutex_lock(&gpd_list_lock);
1113 ret = genpd_add_device(genpd, dev, td);
1114 mutex_unlock(&gpd_list_lock);
1115
1116 return ret;
1117}
1118EXPORT_SYMBOL_GPL(__pm_genpd_add_device);
1119
1120static int genpd_remove_device(struct generic_pm_domain *genpd,
1121 struct device *dev)
1122{
1123 struct generic_pm_domain_data *gpd_data;
1124 struct pm_domain_data *pdd;
1125 int ret = 0;
1126
1127 dev_dbg(dev, "%s()\n", __func__);
1128
1129 pdd = dev->power.subsys_data->domain_data;
1130 gpd_data = to_gpd_data(pdd);
1131 dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1132
1133 mutex_lock(&genpd->lock);
1134
1135 if (genpd->prepared_count > 0) {
1136 ret = -EAGAIN;
1137 goto out;
1138 }
1139
1140 genpd->device_count--;
1141 genpd->max_off_time_changed = true;
1142
1143 if (genpd->detach_dev)
1144 genpd->detach_dev(genpd, dev);
1145
1146 list_del_init(&pdd->list_node);
1147
1148 mutex_unlock(&genpd->lock);
1149
1150 genpd_free_dev_data(dev, gpd_data);
1151
1152 return 0;
1153
1154 out:
1155 mutex_unlock(&genpd->lock);
1156 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1157
1158 return ret;
1159}
1160
1161
1162
1163
1164
1165
1166int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1167 struct device *dev)
1168{
1169 if (!genpd || genpd != genpd_lookup_dev(dev))
1170 return -EINVAL;
1171
1172 return genpd_remove_device(genpd, dev);
1173}
1174EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
1175
1176static int genpd_add_subdomain(struct generic_pm_domain *genpd,
1177 struct generic_pm_domain *subdomain)
1178{
1179 struct gpd_link *link, *itr;
1180 int ret = 0;
1181
1182 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1183 || genpd == subdomain)
1184 return -EINVAL;
1185
1186 link = kzalloc(sizeof(*link), GFP_KERNEL);
1187 if (!link)
1188 return -ENOMEM;
1189
1190 mutex_lock(&subdomain->lock);
1191 mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
1192
1193 if (genpd->status == GPD_STATE_POWER_OFF
1194 && subdomain->status != GPD_STATE_POWER_OFF) {
1195 ret = -EINVAL;
1196 goto out;
1197 }
1198
1199 list_for_each_entry(itr, &genpd->master_links, master_node) {
1200 if (itr->slave == subdomain && itr->master == genpd) {
1201 ret = -EINVAL;
1202 goto out;
1203 }
1204 }
1205
1206 link->master = genpd;
1207 list_add_tail(&link->master_node, &genpd->master_links);
1208 link->slave = subdomain;
1209 list_add_tail(&link->slave_node, &subdomain->slave_links);
1210 if (subdomain->status != GPD_STATE_POWER_OFF)
1211 genpd_sd_counter_inc(genpd);
1212
1213 out:
1214 mutex_unlock(&genpd->lock);
1215 mutex_unlock(&subdomain->lock);
1216 if (ret)
1217 kfree(link);
1218 return ret;
1219}
1220
1221
1222
1223
1224
1225
1226int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1227 struct generic_pm_domain *subdomain)
1228{
1229 int ret;
1230
1231 mutex_lock(&gpd_list_lock);
1232 ret = genpd_add_subdomain(genpd, subdomain);
1233 mutex_unlock(&gpd_list_lock);
1234
1235 return ret;
1236}
1237EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
1238
1239
1240
1241
1242
1243
1244int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1245 struct generic_pm_domain *subdomain)
1246{
1247 struct gpd_link *link;
1248 int ret = -EINVAL;
1249
1250 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1251 return -EINVAL;
1252
1253 mutex_lock(&subdomain->lock);
1254 mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
1255
1256 if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
1257 pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
1258 subdomain->name);
1259 ret = -EBUSY;
1260 goto out;
1261 }
1262
1263 list_for_each_entry(link, &genpd->master_links, master_node) {
1264 if (link->slave != subdomain)
1265 continue;
1266
1267 list_del(&link->master_node);
1268 list_del(&link->slave_node);
1269 kfree(link);
1270 if (subdomain->status != GPD_STATE_POWER_OFF)
1271 genpd_sd_counter_dec(genpd);
1272
1273 ret = 0;
1274 break;
1275 }
1276
1277out:
1278 mutex_unlock(&genpd->lock);
1279 mutex_unlock(&subdomain->lock);
1280
1281 return ret;
1282}
1283EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293int pm_genpd_init(struct generic_pm_domain *genpd,
1294 struct dev_power_governor *gov, bool is_off)
1295{
1296 if (IS_ERR_OR_NULL(genpd))
1297 return -EINVAL;
1298
1299 INIT_LIST_HEAD(&genpd->master_links);
1300 INIT_LIST_HEAD(&genpd->slave_links);
1301 INIT_LIST_HEAD(&genpd->dev_list);
1302 mutex_init(&genpd->lock);
1303 genpd->gov = gov;
1304 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1305 atomic_set(&genpd->sd_count, 0);
1306 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1307 genpd->device_count = 0;
1308 genpd->max_off_time_ns = -1;
1309 genpd->max_off_time_changed = true;
1310 genpd->provider = NULL;
1311 genpd->has_provider = false;
1312 genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
1313 genpd->domain.ops.runtime_resume = genpd_runtime_resume;
1314 genpd->domain.ops.prepare = pm_genpd_prepare;
1315 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1316 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1317 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1318 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1319 genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
1320 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1321 genpd->domain.ops.complete = pm_genpd_complete;
1322
1323 if (genpd->flags & GENPD_FLAG_PM_CLK) {
1324 genpd->dev_ops.stop = pm_clk_suspend;
1325 genpd->dev_ops.start = pm_clk_resume;
1326 }
1327
1328 if (genpd->state_idx >= GENPD_MAX_NUM_STATES) {
1329 pr_warn("Initial state index out of bounds.\n");
1330 genpd->state_idx = GENPD_MAX_NUM_STATES - 1;
1331 }
1332
1333 if (genpd->state_count > GENPD_MAX_NUM_STATES) {
1334 pr_warn("Limiting states to %d\n", GENPD_MAX_NUM_STATES);
1335 genpd->state_count = GENPD_MAX_NUM_STATES;
1336 }
1337
1338
1339 if (genpd->state_count == 0)
1340 genpd->state_count = 1;
1341
1342 mutex_lock(&gpd_list_lock);
1343 list_add(&genpd->gpd_list_node, &gpd_list);
1344 mutex_unlock(&gpd_list_lock);
1345
1346 return 0;
1347}
1348EXPORT_SYMBOL_GPL(pm_genpd_init);
1349
1350static int genpd_remove(struct generic_pm_domain *genpd)
1351{
1352 struct gpd_link *l, *link;
1353
1354 if (IS_ERR_OR_NULL(genpd))
1355 return -EINVAL;
1356
1357 mutex_lock(&genpd->lock);
1358
1359 if (genpd->has_provider) {
1360 mutex_unlock(&genpd->lock);
1361 pr_err("Provider present, unable to remove %s\n", genpd->name);
1362 return -EBUSY;
1363 }
1364
1365 if (!list_empty(&genpd->master_links) || genpd->device_count) {
1366 mutex_unlock(&genpd->lock);
1367 pr_err("%s: unable to remove %s\n", __func__, genpd->name);
1368 return -EBUSY;
1369 }
1370
1371 list_for_each_entry_safe(link, l, &genpd->slave_links, slave_node) {
1372 list_del(&link->master_node);
1373 list_del(&link->slave_node);
1374 kfree(link);
1375 }
1376
1377 list_del(&genpd->gpd_list_node);
1378 mutex_unlock(&genpd->lock);
1379 cancel_work_sync(&genpd->power_off_work);
1380 pr_debug("%s: removed %s\n", __func__, genpd->name);
1381
1382 return 0;
1383}
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398int pm_genpd_remove(struct generic_pm_domain *genpd)
1399{
1400 int ret;
1401
1402 mutex_lock(&gpd_list_lock);
1403 ret = genpd_remove(genpd);
1404 mutex_unlock(&gpd_list_lock);
1405
1406 return ret;
1407}
1408EXPORT_SYMBOL_GPL(pm_genpd_remove);
1409
1410#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
1411
1412typedef struct generic_pm_domain *(*genpd_xlate_t)(struct of_phandle_args *args,
1413 void *data);
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439struct of_genpd_provider {
1440 struct list_head link;
1441 struct device_node *node;
1442 genpd_xlate_t xlate;
1443 void *data;
1444};
1445
1446
1447static LIST_HEAD(of_genpd_providers);
1448
1449static DEFINE_MUTEX(of_genpd_mutex);
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460static struct generic_pm_domain *genpd_xlate_simple(
1461 struct of_phandle_args *genpdspec,
1462 void *data)
1463{
1464 if (genpdspec->args_count != 0)
1465 return ERR_PTR(-EINVAL);
1466 return data;
1467}
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479static struct generic_pm_domain *genpd_xlate_onecell(
1480 struct of_phandle_args *genpdspec,
1481 void *data)
1482{
1483 struct genpd_onecell_data *genpd_data = data;
1484 unsigned int idx = genpdspec->args[0];
1485
1486 if (genpdspec->args_count != 1)
1487 return ERR_PTR(-EINVAL);
1488
1489 if (idx >= genpd_data->num_domains) {
1490 pr_err("%s: invalid domain index %u\n", __func__, idx);
1491 return ERR_PTR(-EINVAL);
1492 }
1493
1494 if (!genpd_data->domains[idx])
1495 return ERR_PTR(-ENOENT);
1496
1497 return genpd_data->domains[idx];
1498}
1499
1500
1501
1502
1503
1504
1505
1506static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
1507 void *data)
1508{
1509 struct of_genpd_provider *cp;
1510
1511 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
1512 if (!cp)
1513 return -ENOMEM;
1514
1515 cp->node = of_node_get(np);
1516 cp->data = data;
1517 cp->xlate = xlate;
1518
1519 mutex_lock(&of_genpd_mutex);
1520 list_add(&cp->link, &of_genpd_providers);
1521 mutex_unlock(&of_genpd_mutex);
1522 pr_debug("Added domain provider from %s\n", np->full_name);
1523
1524 return 0;
1525}
1526
1527
1528
1529
1530
1531
1532int of_genpd_add_provider_simple(struct device_node *np,
1533 struct generic_pm_domain *genpd)
1534{
1535 int ret = -EINVAL;
1536
1537 if (!np || !genpd)
1538 return -EINVAL;
1539
1540 mutex_lock(&gpd_list_lock);
1541
1542 if (pm_genpd_present(genpd))
1543 ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
1544
1545 if (!ret) {
1546 genpd->provider = &np->fwnode;
1547 genpd->has_provider = true;
1548 }
1549
1550 mutex_unlock(&gpd_list_lock);
1551
1552 return ret;
1553}
1554EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
1555
1556
1557
1558
1559
1560
1561int of_genpd_add_provider_onecell(struct device_node *np,
1562 struct genpd_onecell_data *data)
1563{
1564 unsigned int i;
1565 int ret = -EINVAL;
1566
1567 if (!np || !data)
1568 return -EINVAL;
1569
1570 mutex_lock(&gpd_list_lock);
1571
1572 for (i = 0; i < data->num_domains; i++) {
1573 if (!data->domains[i])
1574 continue;
1575 if (!pm_genpd_present(data->domains[i]))
1576 goto error;
1577
1578 data->domains[i]->provider = &np->fwnode;
1579 data->domains[i]->has_provider = true;
1580 }
1581
1582 ret = genpd_add_provider(np, genpd_xlate_onecell, data);
1583 if (ret < 0)
1584 goto error;
1585
1586 mutex_unlock(&gpd_list_lock);
1587
1588 return 0;
1589
1590error:
1591 while (i--) {
1592 if (!data->domains[i])
1593 continue;
1594 data->domains[i]->provider = NULL;
1595 data->domains[i]->has_provider = false;
1596 }
1597
1598 mutex_unlock(&gpd_list_lock);
1599
1600 return ret;
1601}
1602EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
1603
1604
1605
1606
1607
1608void of_genpd_del_provider(struct device_node *np)
1609{
1610 struct of_genpd_provider *cp;
1611 struct generic_pm_domain *gpd;
1612
1613 mutex_lock(&gpd_list_lock);
1614 mutex_lock(&of_genpd_mutex);
1615 list_for_each_entry(cp, &of_genpd_providers, link) {
1616 if (cp->node == np) {
1617
1618
1619
1620
1621
1622 list_for_each_entry(gpd, &gpd_list, gpd_list_node)
1623 if (gpd->provider == &np->fwnode)
1624 gpd->has_provider = false;
1625
1626 list_del(&cp->link);
1627 of_node_put(cp->node);
1628 kfree(cp);
1629 break;
1630 }
1631 }
1632 mutex_unlock(&of_genpd_mutex);
1633 mutex_unlock(&gpd_list_lock);
1634}
1635EXPORT_SYMBOL_GPL(of_genpd_del_provider);
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648static struct generic_pm_domain *genpd_get_from_provider(
1649 struct of_phandle_args *genpdspec)
1650{
1651 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
1652 struct of_genpd_provider *provider;
1653
1654 if (!genpdspec)
1655 return ERR_PTR(-EINVAL);
1656
1657 mutex_lock(&of_genpd_mutex);
1658
1659
1660 list_for_each_entry(provider, &of_genpd_providers, link) {
1661 if (provider->node == genpdspec->np)
1662 genpd = provider->xlate(genpdspec, provider->data);
1663 if (!IS_ERR(genpd))
1664 break;
1665 }
1666
1667 mutex_unlock(&of_genpd_mutex);
1668
1669 return genpd;
1670}
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
1681{
1682 struct generic_pm_domain *genpd;
1683 int ret;
1684
1685 mutex_lock(&gpd_list_lock);
1686
1687 genpd = genpd_get_from_provider(genpdspec);
1688 if (IS_ERR(genpd)) {
1689 ret = PTR_ERR(genpd);
1690 goto out;
1691 }
1692
1693 ret = genpd_add_device(genpd, dev, NULL);
1694
1695out:
1696 mutex_unlock(&gpd_list_lock);
1697
1698 return ret;
1699}
1700EXPORT_SYMBOL_GPL(of_genpd_add_device);
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
1712 struct of_phandle_args *subdomain_spec)
1713{
1714 struct generic_pm_domain *parent, *subdomain;
1715 int ret;
1716
1717 mutex_lock(&gpd_list_lock);
1718
1719 parent = genpd_get_from_provider(parent_spec);
1720 if (IS_ERR(parent)) {
1721 ret = PTR_ERR(parent);
1722 goto out;
1723 }
1724
1725 subdomain = genpd_get_from_provider(subdomain_spec);
1726 if (IS_ERR(subdomain)) {
1727 ret = PTR_ERR(subdomain);
1728 goto out;
1729 }
1730
1731 ret = genpd_add_subdomain(parent, subdomain);
1732
1733out:
1734 mutex_unlock(&gpd_list_lock);
1735
1736 return ret;
1737}
1738EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
1754{
1755 struct generic_pm_domain *gpd, *genpd = ERR_PTR(-ENOENT);
1756 int ret;
1757
1758 if (IS_ERR_OR_NULL(np))
1759 return ERR_PTR(-EINVAL);
1760
1761 mutex_lock(&gpd_list_lock);
1762 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1763 if (gpd->provider == &np->fwnode) {
1764 ret = genpd_remove(gpd);
1765 genpd = ret ? ERR_PTR(ret) : gpd;
1766 break;
1767 }
1768 }
1769 mutex_unlock(&gpd_list_lock);
1770
1771 return genpd;
1772}
1773EXPORT_SYMBOL_GPL(of_genpd_remove_last);
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783static void genpd_dev_pm_detach(struct device *dev, bool power_off)
1784{
1785 struct generic_pm_domain *pd;
1786 unsigned int i;
1787 int ret = 0;
1788
1789 pd = dev_to_genpd(dev);
1790 if (IS_ERR(pd))
1791 return;
1792
1793 dev_dbg(dev, "removing from PM domain %s\n", pd->name);
1794
1795 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
1796 ret = genpd_remove_device(pd, dev);
1797 if (ret != -EAGAIN)
1798 break;
1799
1800 mdelay(i);
1801 cond_resched();
1802 }
1803
1804 if (ret < 0) {
1805 dev_err(dev, "failed to remove from PM domain %s: %d",
1806 pd->name, ret);
1807 return;
1808 }
1809
1810
1811 genpd_queue_power_off_work(pd);
1812}
1813
1814static void genpd_dev_pm_sync(struct device *dev)
1815{
1816 struct generic_pm_domain *pd;
1817
1818 pd = dev_to_genpd(dev);
1819 if (IS_ERR(pd))
1820 return;
1821
1822 genpd_queue_power_off_work(pd);
1823}
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840int genpd_dev_pm_attach(struct device *dev)
1841{
1842 struct of_phandle_args pd_args;
1843 struct generic_pm_domain *pd;
1844 unsigned int i;
1845 int ret;
1846
1847 if (!dev->of_node)
1848 return -ENODEV;
1849
1850 if (dev->pm_domain)
1851 return -EEXIST;
1852
1853 ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
1854 "#power-domain-cells", 0, &pd_args);
1855 if (ret < 0) {
1856 if (ret != -ENOENT)
1857 return ret;
1858
1859
1860
1861
1862
1863 pd_args.args_count = 0;
1864 pd_args.np = of_parse_phandle(dev->of_node,
1865 "samsung,power-domain", 0);
1866 if (!pd_args.np)
1867 return -ENOENT;
1868 }
1869
1870 mutex_lock(&gpd_list_lock);
1871 pd = genpd_get_from_provider(&pd_args);
1872 of_node_put(pd_args.np);
1873 if (IS_ERR(pd)) {
1874 mutex_unlock(&gpd_list_lock);
1875 dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
1876 __func__, PTR_ERR(pd));
1877 return -EPROBE_DEFER;
1878 }
1879
1880 dev_dbg(dev, "adding to PM domain %s\n", pd->name);
1881
1882 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
1883 ret = genpd_add_device(pd, dev, NULL);
1884 if (ret != -EAGAIN)
1885 break;
1886
1887 mdelay(i);
1888 cond_resched();
1889 }
1890 mutex_unlock(&gpd_list_lock);
1891
1892 if (ret < 0) {
1893 dev_err(dev, "failed to add to PM domain %s: %d",
1894 pd->name, ret);
1895 goto out;
1896 }
1897
1898 dev->pm_domain->detach = genpd_dev_pm_detach;
1899 dev->pm_domain->sync = genpd_dev_pm_sync;
1900
1901 mutex_lock(&pd->lock);
1902 ret = genpd_poweron(pd, 0);
1903 mutex_unlock(&pd->lock);
1904out:
1905 return ret ? -EPROBE_DEFER : 0;
1906}
1907EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
1908#endif
1909
1910
1911
1912
1913#ifdef CONFIG_DEBUG_FS
1914#include <linux/pm.h>
1915#include <linux/device.h>
1916#include <linux/debugfs.h>
1917#include <linux/seq_file.h>
1918#include <linux/init.h>
1919#include <linux/kobject.h>
1920static struct dentry *pm_genpd_debugfs_dir;
1921
1922
1923
1924
1925
1926static void rtpm_status_str(struct seq_file *s, struct device *dev)
1927{
1928 static const char * const status_lookup[] = {
1929 [RPM_ACTIVE] = "active",
1930 [RPM_RESUMING] = "resuming",
1931 [RPM_SUSPENDED] = "suspended",
1932 [RPM_SUSPENDING] = "suspending"
1933 };
1934 const char *p = "";
1935
1936 if (dev->power.runtime_error)
1937 p = "error";
1938 else if (dev->power.disable_depth)
1939 p = "unsupported";
1940 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
1941 p = status_lookup[dev->power.runtime_status];
1942 else
1943 WARN_ON(1);
1944
1945 seq_puts(s, p);
1946}
1947
1948static int pm_genpd_summary_one(struct seq_file *s,
1949 struct generic_pm_domain *genpd)
1950{
1951 static const char * const status_lookup[] = {
1952 [GPD_STATE_ACTIVE] = "on",
1953 [GPD_STATE_POWER_OFF] = "off"
1954 };
1955 struct pm_domain_data *pm_data;
1956 const char *kobj_path;
1957 struct gpd_link *link;
1958 char state[16];
1959 int ret;
1960
1961 ret = mutex_lock_interruptible(&genpd->lock);
1962 if (ret)
1963 return -ERESTARTSYS;
1964
1965 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
1966 goto exit;
1967 if (genpd->status == GPD_STATE_POWER_OFF)
1968 snprintf(state, sizeof(state), "%s-%u",
1969 status_lookup[genpd->status], genpd->state_idx);
1970 else
1971 snprintf(state, sizeof(state), "%s",
1972 status_lookup[genpd->status]);
1973 seq_printf(s, "%-30s %-15s ", genpd->name, state);
1974
1975
1976
1977
1978
1979
1980 list_for_each_entry(link, &genpd->master_links, master_node) {
1981 seq_printf(s, "%s", link->slave->name);
1982 if (!list_is_last(&link->master_node, &genpd->master_links))
1983 seq_puts(s, ", ");
1984 }
1985
1986 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
1987 kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL);
1988 if (kobj_path == NULL)
1989 continue;
1990
1991 seq_printf(s, "\n %-50s ", kobj_path);
1992 rtpm_status_str(s, pm_data->dev);
1993 kfree(kobj_path);
1994 }
1995
1996 seq_puts(s, "\n");
1997exit:
1998 mutex_unlock(&genpd->lock);
1999
2000 return 0;
2001}
2002
2003static int pm_genpd_summary_show(struct seq_file *s, void *data)
2004{
2005 struct generic_pm_domain *genpd;
2006 int ret = 0;
2007
2008 seq_puts(s, "domain status slaves\n");
2009 seq_puts(s, " /device runtime status\n");
2010 seq_puts(s, "----------------------------------------------------------------------\n");
2011
2012 ret = mutex_lock_interruptible(&gpd_list_lock);
2013 if (ret)
2014 return -ERESTARTSYS;
2015
2016 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
2017 ret = pm_genpd_summary_one(s, genpd);
2018 if (ret)
2019 break;
2020 }
2021 mutex_unlock(&gpd_list_lock);
2022
2023 return ret;
2024}
2025
2026static int pm_genpd_summary_open(struct inode *inode, struct file *file)
2027{
2028 return single_open(file, pm_genpd_summary_show, NULL);
2029}
2030
2031static const struct file_operations pm_genpd_summary_fops = {
2032 .open = pm_genpd_summary_open,
2033 .read = seq_read,
2034 .llseek = seq_lseek,
2035 .release = single_release,
2036};
2037
2038static int __init pm_genpd_debug_init(void)
2039{
2040 struct dentry *d;
2041
2042 pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
2043
2044 if (!pm_genpd_debugfs_dir)
2045 return -ENOMEM;
2046
2047 d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
2048 pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops);
2049 if (!d)
2050 return -ENOMEM;
2051
2052 return 0;
2053}
2054late_initcall(pm_genpd_debug_init);
2055
2056static void __exit pm_genpd_debug_exit(void)
2057{
2058 debugfs_remove_recursive(pm_genpd_debugfs_dir);
2059}
2060__exitcall(pm_genpd_debug_exit);
2061#endif
2062