1
2
3
4
5
6
7#define pr_fmt(fmt) "PM: " fmt
8
9#include <linux/delay.h>
10#include <linux/kernel.h>
11#include <linux/io.h>
12#include <linux/platform_device.h>
13#include <linux/pm_opp.h>
14#include <linux/pm_runtime.h>
15#include <linux/pm_domain.h>
16#include <linux/pm_qos.h>
17#include <linux/pm_clock.h>
18#include <linux/slab.h>
19#include <linux/err.h>
20#include <linux/sched.h>
21#include <linux/suspend.h>
22#include <linux/export.h>
23#include <linux/cpu.h>
24
25#include "power.h"
26
27#define GENPD_RETRY_MAX_MS 250
28
29#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
30({ \
31 type (*__routine)(struct device *__d); \
32 type __ret = (type)0; \
33 \
34 __routine = genpd->dev_ops.callback; \
35 if (__routine) { \
36 __ret = __routine(dev); \
37 } \
38 __ret; \
39})
40
41static LIST_HEAD(gpd_list);
42static DEFINE_MUTEX(gpd_list_lock);
43
44struct genpd_lock_ops {
45 void (*lock)(struct generic_pm_domain *genpd);
46 void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
47 int (*lock_interruptible)(struct generic_pm_domain *genpd);
48 void (*unlock)(struct generic_pm_domain *genpd);
49};
50
51static void genpd_lock_mtx(struct generic_pm_domain *genpd)
52{
53 mutex_lock(&genpd->mlock);
54}
55
56static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
57 int depth)
58{
59 mutex_lock_nested(&genpd->mlock, depth);
60}
61
62static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
63{
64 return mutex_lock_interruptible(&genpd->mlock);
65}
66
67static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
68{
69 return mutex_unlock(&genpd->mlock);
70}
71
72static const struct genpd_lock_ops genpd_mtx_ops = {
73 .lock = genpd_lock_mtx,
74 .lock_nested = genpd_lock_nested_mtx,
75 .lock_interruptible = genpd_lock_interruptible_mtx,
76 .unlock = genpd_unlock_mtx,
77};
78
79static void genpd_lock_spin(struct generic_pm_domain *genpd)
80 __acquires(&genpd->slock)
81{
82 unsigned long flags;
83
84 spin_lock_irqsave(&genpd->slock, flags);
85 genpd->lock_flags = flags;
86}
87
88static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
89 int depth)
90 __acquires(&genpd->slock)
91{
92 unsigned long flags;
93
94 spin_lock_irqsave_nested(&genpd->slock, flags, depth);
95 genpd->lock_flags = flags;
96}
97
98static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
99 __acquires(&genpd->slock)
100{
101 unsigned long flags;
102
103 spin_lock_irqsave(&genpd->slock, flags);
104 genpd->lock_flags = flags;
105 return 0;
106}
107
108static void genpd_unlock_spin(struct generic_pm_domain *genpd)
109 __releases(&genpd->slock)
110{
111 spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
112}
113
114static const struct genpd_lock_ops genpd_spin_ops = {
115 .lock = genpd_lock_spin,
116 .lock_nested = genpd_lock_nested_spin,
117 .lock_interruptible = genpd_lock_interruptible_spin,
118 .unlock = genpd_unlock_spin,
119};
120
121#define genpd_lock(p) p->lock_ops->lock(p)
122#define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d)
123#define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p)
124#define genpd_unlock(p) p->lock_ops->unlock(p)
125
126#define genpd_status_on(genpd) (genpd->status == GPD_STATE_ACTIVE)
127#define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
128#define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON)
129#define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
130#define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN)
131#define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
132
133static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
134 const struct generic_pm_domain *genpd)
135{
136 bool ret;
137
138 ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
139
140
141
142
143
144
145 if (ret && !genpd_is_always_on(genpd))
146 dev_warn_once(dev, "PM domain %s will not be powered off\n",
147 genpd->name);
148
149 return ret;
150}
151
152static int genpd_runtime_suspend(struct device *dev);
153
154
155
156
157
158
159
160static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev)
161{
162 if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
163 return NULL;
164
165
166 if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend)
167 return pd_to_genpd(dev->pm_domain);
168
169 return NULL;
170}
171
172
173
174
175
176static struct generic_pm_domain *dev_to_genpd(struct device *dev)
177{
178 if (IS_ERR_OR_NULL(dev->pm_domain))
179 return ERR_PTR(-EINVAL);
180
181 return pd_to_genpd(dev->pm_domain);
182}
183
184static int genpd_stop_dev(const struct generic_pm_domain *genpd,
185 struct device *dev)
186{
187 return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
188}
189
190static int genpd_start_dev(const struct generic_pm_domain *genpd,
191 struct device *dev)
192{
193 return GENPD_DEV_CALLBACK(genpd, int, start, dev);
194}
195
196static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
197{
198 bool ret = false;
199
200 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
201 ret = !!atomic_dec_and_test(&genpd->sd_count);
202
203 return ret;
204}
205
206static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
207{
208 atomic_inc(&genpd->sd_count);
209 smp_mb__after_atomic();
210}
211
212#ifdef CONFIG_DEBUG_FS
213static void genpd_update_accounting(struct generic_pm_domain *genpd)
214{
215 ktime_t delta, now;
216
217 now = ktime_get();
218 delta = ktime_sub(now, genpd->accounting_time);
219
220
221
222
223
224
225 if (genpd->status == GPD_STATE_ACTIVE) {
226 int state_idx = genpd->state_idx;
227
228 genpd->states[state_idx].idle_time =
229 ktime_add(genpd->states[state_idx].idle_time, delta);
230 } else {
231 genpd->on_time = ktime_add(genpd->on_time, delta);
232 }
233
234 genpd->accounting_time = now;
235}
236#else
237static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
238#endif
239
240static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
241 unsigned int state)
242{
243 struct generic_pm_domain_data *pd_data;
244 struct pm_domain_data *pdd;
245 struct gpd_link *link;
246
247
248 if (state == genpd->performance_state)
249 return state;
250
251
252 if (state > genpd->performance_state)
253 return state;
254
255
256 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
257 pd_data = to_gpd_data(pdd);
258
259 if (pd_data->performance_state > state)
260 state = pd_data->performance_state;
261 }
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277 list_for_each_entry(link, &genpd->parent_links, parent_node) {
278 if (link->performance_state > state)
279 state = link->performance_state;
280 }
281
282 return state;
283}
284
285static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
286 unsigned int state, int depth)
287{
288 struct generic_pm_domain *parent;
289 struct gpd_link *link;
290 int parent_state, ret;
291
292 if (state == genpd->performance_state)
293 return 0;
294
295
296 list_for_each_entry(link, &genpd->child_links, child_node) {
297 parent = link->parent;
298
299 if (!parent->set_performance_state)
300 continue;
301
302
303 ret = dev_pm_opp_xlate_performance_state(genpd->opp_table,
304 parent->opp_table,
305 state);
306 if (unlikely(ret < 0))
307 goto err;
308
309 parent_state = ret;
310
311 genpd_lock_nested(parent, depth + 1);
312
313 link->prev_performance_state = link->performance_state;
314 link->performance_state = parent_state;
315 parent_state = _genpd_reeval_performance_state(parent,
316 parent_state);
317 ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
318 if (ret)
319 link->performance_state = link->prev_performance_state;
320
321 genpd_unlock(parent);
322
323 if (ret)
324 goto err;
325 }
326
327 ret = genpd->set_performance_state(genpd, state);
328 if (ret)
329 goto err;
330
331 genpd->performance_state = state;
332 return 0;
333
334err:
335
336 list_for_each_entry_continue_reverse(link, &genpd->child_links,
337 child_node) {
338 parent = link->parent;
339
340 if (!parent->set_performance_state)
341 continue;
342
343 genpd_lock_nested(parent, depth + 1);
344
345 parent_state = link->prev_performance_state;
346 link->performance_state = parent_state;
347
348 parent_state = _genpd_reeval_performance_state(parent,
349 parent_state);
350 if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
351 pr_err("%s: Failed to roll back to %d performance state\n",
352 parent->name, parent_state);
353 }
354
355 genpd_unlock(parent);
356 }
357
358 return ret;
359}
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
377{
378 struct generic_pm_domain *genpd;
379 struct generic_pm_domain_data *gpd_data;
380 unsigned int prev;
381 int ret;
382
383 genpd = dev_to_genpd_safe(dev);
384 if (!genpd)
385 return -ENODEV;
386
387 if (unlikely(!genpd->set_performance_state))
388 return -EINVAL;
389
390 if (WARN_ON(!dev->power.subsys_data ||
391 !dev->power.subsys_data->domain_data))
392 return -EINVAL;
393
394 genpd_lock(genpd);
395
396 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
397 prev = gpd_data->performance_state;
398 gpd_data->performance_state = state;
399
400 state = _genpd_reeval_performance_state(genpd, state);
401 ret = _genpd_set_performance_state(genpd, state, 0);
402 if (ret)
403 gpd_data->performance_state = prev;
404
405 genpd_unlock(genpd);
406
407 return ret;
408}
409EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
410
411static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
412{
413 unsigned int state_idx = genpd->state_idx;
414 ktime_t time_start;
415 s64 elapsed_ns;
416 int ret;
417
418 if (!genpd->power_on)
419 return 0;
420
421 if (!timed)
422 return genpd->power_on(genpd);
423
424 time_start = ktime_get();
425 ret = genpd->power_on(genpd);
426 if (ret)
427 return ret;
428
429 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
430 if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
431 return ret;
432
433 genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
434 genpd->max_off_time_changed = true;
435 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
436 genpd->name, "on", elapsed_ns);
437
438 return ret;
439}
440
441static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
442{
443 unsigned int state_idx = genpd->state_idx;
444 ktime_t time_start;
445 s64 elapsed_ns;
446 int ret;
447
448 if (!genpd->power_off)
449 return 0;
450
451 if (!timed)
452 return genpd->power_off(genpd);
453
454 time_start = ktime_get();
455 ret = genpd->power_off(genpd);
456 if (ret)
457 return ret;
458
459 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
460 if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
461 return 0;
462
463 genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
464 genpd->max_off_time_changed = true;
465 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
466 genpd->name, "off", elapsed_ns);
467
468 return 0;
469}
470
471
472
473
474
475
476
477
478static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
479{
480 queue_work(pm_wq, &genpd->power_off_work);
481}
482
483
484
485
486
487
488
489
490
491
492
493
494static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
495 unsigned int depth)
496{
497 struct pm_domain_data *pdd;
498 struct gpd_link *link;
499 unsigned int not_suspended = 0;
500
501
502
503
504
505
506 if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
507 return 0;
508
509
510
511
512
513
514 if (genpd_is_always_on(genpd) ||
515 genpd_is_rpm_always_on(genpd) ||
516 atomic_read(&genpd->sd_count) > 0)
517 return -EBUSY;
518
519 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
520 enum pm_qos_flags_status stat;
521
522 stat = dev_pm_qos_flags(pdd->dev, PM_QOS_FLAG_NO_POWER_OFF);
523 if (stat > PM_QOS_FLAGS_NONE)
524 return -EBUSY;
525
526
527
528
529
530 if (!pm_runtime_suspended(pdd->dev) ||
531 irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
532 not_suspended++;
533 }
534
535 if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
536 return -EBUSY;
537
538 if (genpd->gov && genpd->gov->power_down_ok) {
539 if (!genpd->gov->power_down_ok(&genpd->domain))
540 return -EAGAIN;
541 }
542
543
544 if (!genpd->gov)
545 genpd->state_idx = 0;
546
547 if (genpd->power_off) {
548 int ret;
549
550 if (atomic_read(&genpd->sd_count) > 0)
551 return -EBUSY;
552
553
554
555
556
557
558
559
560
561 ret = _genpd_power_off(genpd, true);
562 if (ret)
563 return ret;
564 }
565
566 genpd->status = GPD_STATE_POWER_OFF;
567 genpd_update_accounting(genpd);
568
569 list_for_each_entry(link, &genpd->child_links, child_node) {
570 genpd_sd_counter_dec(link->parent);
571 genpd_lock_nested(link->parent, depth + 1);
572 genpd_power_off(link->parent, false, depth + 1);
573 genpd_unlock(link->parent);
574 }
575
576 return 0;
577}
578
579
580
581
582
583
584
585
586
587static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
588{
589 struct gpd_link *link;
590 int ret = 0;
591
592 if (genpd_status_on(genpd))
593 return 0;
594
595
596
597
598
599
600 list_for_each_entry(link, &genpd->child_links, child_node) {
601 struct generic_pm_domain *parent = link->parent;
602
603 genpd_sd_counter_inc(parent);
604
605 genpd_lock_nested(parent, depth + 1);
606 ret = genpd_power_on(parent, depth + 1);
607 genpd_unlock(parent);
608
609 if (ret) {
610 genpd_sd_counter_dec(parent);
611 goto err;
612 }
613 }
614
615 ret = _genpd_power_on(genpd, true);
616 if (ret)
617 goto err;
618
619 genpd->status = GPD_STATE_ACTIVE;
620 genpd_update_accounting(genpd);
621
622 return 0;
623
624 err:
625 list_for_each_entry_continue_reverse(link,
626 &genpd->child_links,
627 child_node) {
628 genpd_sd_counter_dec(link->parent);
629 genpd_lock_nested(link->parent, depth + 1);
630 genpd_power_off(link->parent, false, depth + 1);
631 genpd_unlock(link->parent);
632 }
633
634 return ret;
635}
636
637static int genpd_dev_pm_start(struct device *dev)
638{
639 struct generic_pm_domain *genpd = dev_to_genpd(dev);
640
641 return genpd_start_dev(genpd, dev);
642}
643
644static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
645 unsigned long val, void *ptr)
646{
647 struct generic_pm_domain_data *gpd_data;
648 struct device *dev;
649
650 gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
651 dev = gpd_data->base.dev;
652
653 for (;;) {
654 struct generic_pm_domain *genpd;
655 struct pm_domain_data *pdd;
656
657 spin_lock_irq(&dev->power.lock);
658
659 pdd = dev->power.subsys_data ?
660 dev->power.subsys_data->domain_data : NULL;
661 if (pdd) {
662 to_gpd_data(pdd)->td.constraint_changed = true;
663 genpd = dev_to_genpd(dev);
664 } else {
665 genpd = ERR_PTR(-ENODATA);
666 }
667
668 spin_unlock_irq(&dev->power.lock);
669
670 if (!IS_ERR(genpd)) {
671 genpd_lock(genpd);
672 genpd->max_off_time_changed = true;
673 genpd_unlock(genpd);
674 }
675
676 dev = dev->parent;
677 if (!dev || dev->power.ignore_children)
678 break;
679 }
680
681 return NOTIFY_DONE;
682}
683
684
685
686
687
688static void genpd_power_off_work_fn(struct work_struct *work)
689{
690 struct generic_pm_domain *genpd;
691
692 genpd = container_of(work, struct generic_pm_domain, power_off_work);
693
694 genpd_lock(genpd);
695 genpd_power_off(genpd, false, 0);
696 genpd_unlock(genpd);
697}
698
699
700
701
702
703static int __genpd_runtime_suspend(struct device *dev)
704{
705 int (*cb)(struct device *__dev);
706
707 if (dev->type && dev->type->pm)
708 cb = dev->type->pm->runtime_suspend;
709 else if (dev->class && dev->class->pm)
710 cb = dev->class->pm->runtime_suspend;
711 else if (dev->bus && dev->bus->pm)
712 cb = dev->bus->pm->runtime_suspend;
713 else
714 cb = NULL;
715
716 if (!cb && dev->driver && dev->driver->pm)
717 cb = dev->driver->pm->runtime_suspend;
718
719 return cb ? cb(dev) : 0;
720}
721
722
723
724
725
726static int __genpd_runtime_resume(struct device *dev)
727{
728 int (*cb)(struct device *__dev);
729
730 if (dev->type && dev->type->pm)
731 cb = dev->type->pm->runtime_resume;
732 else if (dev->class && dev->class->pm)
733 cb = dev->class->pm->runtime_resume;
734 else if (dev->bus && dev->bus->pm)
735 cb = dev->bus->pm->runtime_resume;
736 else
737 cb = NULL;
738
739 if (!cb && dev->driver && dev->driver->pm)
740 cb = dev->driver->pm->runtime_resume;
741
742 return cb ? cb(dev) : 0;
743}
744
745
746
747
748
749
750
751
752
753static int genpd_runtime_suspend(struct device *dev)
754{
755 struct generic_pm_domain *genpd;
756 bool (*suspend_ok)(struct device *__dev);
757 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
758 bool runtime_pm = pm_runtime_enabled(dev);
759 ktime_t time_start;
760 s64 elapsed_ns;
761 int ret;
762
763 dev_dbg(dev, "%s()\n", __func__);
764
765 genpd = dev_to_genpd(dev);
766 if (IS_ERR(genpd))
767 return -EINVAL;
768
769
770
771
772
773
774
775 suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
776 if (runtime_pm && suspend_ok && !suspend_ok(dev))
777 return -EBUSY;
778
779
780 time_start = 0;
781 if (runtime_pm)
782 time_start = ktime_get();
783
784 ret = __genpd_runtime_suspend(dev);
785 if (ret)
786 return ret;
787
788 ret = genpd_stop_dev(genpd, dev);
789 if (ret) {
790 __genpd_runtime_resume(dev);
791 return ret;
792 }
793
794
795 if (runtime_pm) {
796 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
797 if (elapsed_ns > td->suspend_latency_ns) {
798 td->suspend_latency_ns = elapsed_ns;
799 dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
800 elapsed_ns);
801 genpd->max_off_time_changed = true;
802 td->constraint_changed = true;
803 }
804 }
805
806
807
808
809
810 if (irq_safe_dev_in_no_sleep_domain(dev, genpd))
811 return 0;
812
813 genpd_lock(genpd);
814 genpd_power_off(genpd, true, 0);
815 genpd_unlock(genpd);
816
817 return 0;
818}
819
820
821
822
823
824
825
826
827
828static int genpd_runtime_resume(struct device *dev)
829{
830 struct generic_pm_domain *genpd;
831 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
832 bool runtime_pm = pm_runtime_enabled(dev);
833 ktime_t time_start;
834 s64 elapsed_ns;
835 int ret;
836 bool timed = true;
837
838 dev_dbg(dev, "%s()\n", __func__);
839
840 genpd = dev_to_genpd(dev);
841 if (IS_ERR(genpd))
842 return -EINVAL;
843
844
845
846
847
848 if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) {
849 timed = false;
850 goto out;
851 }
852
853 genpd_lock(genpd);
854 ret = genpd_power_on(genpd, 0);
855 genpd_unlock(genpd);
856
857 if (ret)
858 return ret;
859
860 out:
861
862 time_start = 0;
863 if (timed && runtime_pm)
864 time_start = ktime_get();
865
866 ret = genpd_start_dev(genpd, dev);
867 if (ret)
868 goto err_poweroff;
869
870 ret = __genpd_runtime_resume(dev);
871 if (ret)
872 goto err_stop;
873
874
875 if (timed && runtime_pm) {
876 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
877 if (elapsed_ns > td->resume_latency_ns) {
878 td->resume_latency_ns = elapsed_ns;
879 dev_dbg(dev, "resume latency exceeded, %lld ns\n",
880 elapsed_ns);
881 genpd->max_off_time_changed = true;
882 td->constraint_changed = true;
883 }
884 }
885
886 return 0;
887
888err_stop:
889 genpd_stop_dev(genpd, dev);
890err_poweroff:
891 if (!pm_runtime_is_irq_safe(dev) ||
892 (pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
893 genpd_lock(genpd);
894 genpd_power_off(genpd, true, 0);
895 genpd_unlock(genpd);
896 }
897
898 return ret;
899}
900
901static bool pd_ignore_unused;
902static int __init pd_ignore_unused_setup(char *__unused)
903{
904 pd_ignore_unused = true;
905 return 1;
906}
907__setup("pd_ignore_unused", pd_ignore_unused_setup);
908
909
910
911
912static int __init genpd_power_off_unused(void)
913{
914 struct generic_pm_domain *genpd;
915
916 if (pd_ignore_unused) {
917 pr_warn("genpd: Not disabling unused power domains\n");
918 return 0;
919 }
920
921 mutex_lock(&gpd_list_lock);
922
923 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
924 genpd_queue_power_off_work(genpd);
925
926 mutex_unlock(&gpd_list_lock);
927
928 return 0;
929}
930late_initcall(genpd_power_off_unused);
931
932#ifdef CONFIG_PM_SLEEP
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
948 unsigned int depth)
949{
950 struct gpd_link *link;
951
952 if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
953 return;
954
955 if (genpd->suspended_count != genpd->device_count
956 || atomic_read(&genpd->sd_count) > 0)
957 return;
958
959
960 genpd->state_idx = genpd->state_count - 1;
961 if (_genpd_power_off(genpd, false))
962 return;
963
964 genpd->status = GPD_STATE_POWER_OFF;
965
966 list_for_each_entry(link, &genpd->child_links, child_node) {
967 genpd_sd_counter_dec(link->parent);
968
969 if (use_lock)
970 genpd_lock_nested(link->parent, depth + 1);
971
972 genpd_sync_power_off(link->parent, use_lock, depth + 1);
973
974 if (use_lock)
975 genpd_unlock(link->parent);
976 }
977}
978
979
980
981
982
983
984
985
986
987
988
989static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
990 unsigned int depth)
991{
992 struct gpd_link *link;
993
994 if (genpd_status_on(genpd))
995 return;
996
997 list_for_each_entry(link, &genpd->child_links, child_node) {
998 genpd_sd_counter_inc(link->parent);
999
1000 if (use_lock)
1001 genpd_lock_nested(link->parent, depth + 1);
1002
1003 genpd_sync_power_on(link->parent, use_lock, depth + 1);
1004
1005 if (use_lock)
1006 genpd_unlock(link->parent);
1007 }
1008
1009 _genpd_power_on(genpd, false);
1010
1011 genpd->status = GPD_STATE_ACTIVE;
1012}
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030static bool resume_needed(struct device *dev,
1031 const struct generic_pm_domain *genpd)
1032{
1033 bool active_wakeup;
1034
1035 if (!device_can_wakeup(dev))
1036 return false;
1037
1038 active_wakeup = genpd_is_active_wakeup(genpd);
1039 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
1040}
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051static int genpd_prepare(struct device *dev)
1052{
1053 struct generic_pm_domain *genpd;
1054 int ret;
1055
1056 dev_dbg(dev, "%s()\n", __func__);
1057
1058 genpd = dev_to_genpd(dev);
1059 if (IS_ERR(genpd))
1060 return -EINVAL;
1061
1062
1063
1064
1065
1066
1067 if (resume_needed(dev, genpd))
1068 pm_runtime_resume(dev);
1069
1070 genpd_lock(genpd);
1071
1072 if (genpd->prepared_count++ == 0)
1073 genpd->suspended_count = 0;
1074
1075 genpd_unlock(genpd);
1076
1077 ret = pm_generic_prepare(dev);
1078 if (ret < 0) {
1079 genpd_lock(genpd);
1080
1081 genpd->prepared_count--;
1082
1083 genpd_unlock(genpd);
1084 }
1085
1086
1087 return ret >= 0 ? 0 : ret;
1088}
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099static int genpd_finish_suspend(struct device *dev, bool poweroff)
1100{
1101 struct generic_pm_domain *genpd;
1102 int ret = 0;
1103
1104 genpd = dev_to_genpd(dev);
1105 if (IS_ERR(genpd))
1106 return -EINVAL;
1107
1108 if (poweroff)
1109 ret = pm_generic_poweroff_noirq(dev);
1110 else
1111 ret = pm_generic_suspend_noirq(dev);
1112 if (ret)
1113 return ret;
1114
1115 if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
1116 return 0;
1117
1118 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1119 !pm_runtime_status_suspended(dev)) {
1120 ret = genpd_stop_dev(genpd, dev);
1121 if (ret) {
1122 if (poweroff)
1123 pm_generic_restore_noirq(dev);
1124 else
1125 pm_generic_resume_noirq(dev);
1126 return ret;
1127 }
1128 }
1129
1130 genpd_lock(genpd);
1131 genpd->suspended_count++;
1132 genpd_sync_power_off(genpd, true, 0);
1133 genpd_unlock(genpd);
1134
1135 return 0;
1136}
1137
1138
1139
1140
1141
1142
1143
1144
1145static int genpd_suspend_noirq(struct device *dev)
1146{
1147 dev_dbg(dev, "%s()\n", __func__);
1148
1149 return genpd_finish_suspend(dev, false);
1150}
1151
1152
1153
1154
1155
1156
1157
1158static int genpd_resume_noirq(struct device *dev)
1159{
1160 struct generic_pm_domain *genpd;
1161 int ret;
1162
1163 dev_dbg(dev, "%s()\n", __func__);
1164
1165 genpd = dev_to_genpd(dev);
1166 if (IS_ERR(genpd))
1167 return -EINVAL;
1168
1169 if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
1170 return pm_generic_resume_noirq(dev);
1171
1172 genpd_lock(genpd);
1173 genpd_sync_power_on(genpd, true, 0);
1174 genpd->suspended_count--;
1175 genpd_unlock(genpd);
1176
1177 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1178 !pm_runtime_status_suspended(dev)) {
1179 ret = genpd_start_dev(genpd, dev);
1180 if (ret)
1181 return ret;
1182 }
1183
1184 return pm_generic_resume_noirq(dev);
1185}
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196static int genpd_freeze_noirq(struct device *dev)
1197{
1198 const struct generic_pm_domain *genpd;
1199 int ret = 0;
1200
1201 dev_dbg(dev, "%s()\n", __func__);
1202
1203 genpd = dev_to_genpd(dev);
1204 if (IS_ERR(genpd))
1205 return -EINVAL;
1206
1207 ret = pm_generic_freeze_noirq(dev);
1208 if (ret)
1209 return ret;
1210
1211 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1212 !pm_runtime_status_suspended(dev))
1213 ret = genpd_stop_dev(genpd, dev);
1214
1215 return ret;
1216}
1217
1218
1219
1220
1221
1222
1223
1224
1225static int genpd_thaw_noirq(struct device *dev)
1226{
1227 const struct generic_pm_domain *genpd;
1228 int ret = 0;
1229
1230 dev_dbg(dev, "%s()\n", __func__);
1231
1232 genpd = dev_to_genpd(dev);
1233 if (IS_ERR(genpd))
1234 return -EINVAL;
1235
1236 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1237 !pm_runtime_status_suspended(dev)) {
1238 ret = genpd_start_dev(genpd, dev);
1239 if (ret)
1240 return ret;
1241 }
1242
1243 return pm_generic_thaw_noirq(dev);
1244}
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254static int genpd_poweroff_noirq(struct device *dev)
1255{
1256 dev_dbg(dev, "%s()\n", __func__);
1257
1258 return genpd_finish_suspend(dev, true);
1259}
1260
1261
1262
1263
1264
1265
1266
1267
1268static int genpd_restore_noirq(struct device *dev)
1269{
1270 struct generic_pm_domain *genpd;
1271 int ret = 0;
1272
1273 dev_dbg(dev, "%s()\n", __func__);
1274
1275 genpd = dev_to_genpd(dev);
1276 if (IS_ERR(genpd))
1277 return -EINVAL;
1278
1279
1280
1281
1282
1283 genpd_lock(genpd);
1284 if (genpd->suspended_count++ == 0)
1285
1286
1287
1288
1289
1290 genpd->status = GPD_STATE_POWER_OFF;
1291
1292 genpd_sync_power_on(genpd, true, 0);
1293 genpd_unlock(genpd);
1294
1295 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1296 !pm_runtime_status_suspended(dev)) {
1297 ret = genpd_start_dev(genpd, dev);
1298 if (ret)
1299 return ret;
1300 }
1301
1302 return pm_generic_restore_noirq(dev);
1303}
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314static void genpd_complete(struct device *dev)
1315{
1316 struct generic_pm_domain *genpd;
1317
1318 dev_dbg(dev, "%s()\n", __func__);
1319
1320 genpd = dev_to_genpd(dev);
1321 if (IS_ERR(genpd))
1322 return;
1323
1324 pm_generic_complete(dev);
1325
1326 genpd_lock(genpd);
1327
1328 genpd->prepared_count--;
1329 if (!genpd->prepared_count)
1330 genpd_queue_power_off_work(genpd);
1331
1332 genpd_unlock(genpd);
1333}
1334
1335
1336
1337
1338
1339
1340
1341
1342static void genpd_syscore_switch(struct device *dev, bool suspend)
1343{
1344 struct generic_pm_domain *genpd;
1345
1346 genpd = dev_to_genpd_safe(dev);
1347 if (!genpd)
1348 return;
1349
1350 if (suspend) {
1351 genpd->suspended_count++;
1352 genpd_sync_power_off(genpd, false, 0);
1353 } else {
1354 genpd_sync_power_on(genpd, false, 0);
1355 genpd->suspended_count--;
1356 }
1357}
1358
1359void pm_genpd_syscore_poweroff(struct device *dev)
1360{
1361 genpd_syscore_switch(dev, true);
1362}
1363EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);
1364
1365void pm_genpd_syscore_poweron(struct device *dev)
1366{
1367 genpd_syscore_switch(dev, false);
1368}
1369EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
1370
1371#else
1372
1373#define genpd_prepare NULL
1374#define genpd_suspend_noirq NULL
1375#define genpd_resume_noirq NULL
1376#define genpd_freeze_noirq NULL
1377#define genpd_thaw_noirq NULL
1378#define genpd_poweroff_noirq NULL
1379#define genpd_restore_noirq NULL
1380#define genpd_complete NULL
1381
1382#endif
1383
1384static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev)
1385{
1386 struct generic_pm_domain_data *gpd_data;
1387 int ret;
1388
1389 ret = dev_pm_get_subsys_data(dev);
1390 if (ret)
1391 return ERR_PTR(ret);
1392
1393 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1394 if (!gpd_data) {
1395 ret = -ENOMEM;
1396 goto err_put;
1397 }
1398
1399 gpd_data->base.dev = dev;
1400 gpd_data->td.constraint_changed = true;
1401 gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
1402 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1403
1404 spin_lock_irq(&dev->power.lock);
1405
1406 if (dev->power.subsys_data->domain_data) {
1407 ret = -EINVAL;
1408 goto err_free;
1409 }
1410
1411 dev->power.subsys_data->domain_data = &gpd_data->base;
1412
1413 spin_unlock_irq(&dev->power.lock);
1414
1415 return gpd_data;
1416
1417 err_free:
1418 spin_unlock_irq(&dev->power.lock);
1419 kfree(gpd_data);
1420 err_put:
1421 dev_pm_put_subsys_data(dev);
1422 return ERR_PTR(ret);
1423}
1424
1425static void genpd_free_dev_data(struct device *dev,
1426 struct generic_pm_domain_data *gpd_data)
1427{
1428 spin_lock_irq(&dev->power.lock);
1429
1430 dev->power.subsys_data->domain_data = NULL;
1431
1432 spin_unlock_irq(&dev->power.lock);
1433
1434 kfree(gpd_data);
1435 dev_pm_put_subsys_data(dev);
1436}
1437
1438static void genpd_update_cpumask(struct generic_pm_domain *genpd,
1439 int cpu, bool set, unsigned int depth)
1440{
1441 struct gpd_link *link;
1442
1443 if (!genpd_is_cpu_domain(genpd))
1444 return;
1445
1446 list_for_each_entry(link, &genpd->child_links, child_node) {
1447 struct generic_pm_domain *parent = link->parent;
1448
1449 genpd_lock_nested(parent, depth + 1);
1450 genpd_update_cpumask(parent, cpu, set, depth + 1);
1451 genpd_unlock(parent);
1452 }
1453
1454 if (set)
1455 cpumask_set_cpu(cpu, genpd->cpus);
1456 else
1457 cpumask_clear_cpu(cpu, genpd->cpus);
1458}
1459
1460static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
1461{
1462 if (cpu >= 0)
1463 genpd_update_cpumask(genpd, cpu, true, 0);
1464}
1465
1466static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
1467{
1468 if (cpu >= 0)
1469 genpd_update_cpumask(genpd, cpu, false, 0);
1470}
1471
1472static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
1473{
1474 int cpu;
1475
1476 if (!genpd_is_cpu_domain(genpd))
1477 return -1;
1478
1479 for_each_possible_cpu(cpu) {
1480 if (get_cpu_device(cpu) == dev)
1481 return cpu;
1482 }
1483
1484 return -1;
1485}
1486
1487static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1488 struct device *base_dev)
1489{
1490 struct generic_pm_domain_data *gpd_data;
1491 int ret;
1492
1493 dev_dbg(dev, "%s()\n", __func__);
1494
1495 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1496 return -EINVAL;
1497
1498 gpd_data = genpd_alloc_dev_data(dev);
1499 if (IS_ERR(gpd_data))
1500 return PTR_ERR(gpd_data);
1501
1502 gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
1503
1504 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1505 if (ret)
1506 goto out;
1507
1508 genpd_lock(genpd);
1509
1510 genpd_set_cpumask(genpd, gpd_data->cpu);
1511 dev_pm_domain_set(dev, &genpd->domain);
1512
1513 genpd->device_count++;
1514 genpd->max_off_time_changed = true;
1515
1516 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1517
1518 genpd_unlock(genpd);
1519 out:
1520 if (ret)
1521 genpd_free_dev_data(dev, gpd_data);
1522 else
1523 dev_pm_qos_add_notifier(dev, &gpd_data->nb,
1524 DEV_PM_QOS_RESUME_LATENCY);
1525
1526 return ret;
1527}
1528
1529
1530
1531
1532
1533
1534int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1535{
1536 int ret;
1537
1538 mutex_lock(&gpd_list_lock);
1539 ret = genpd_add_device(genpd, dev, dev);
1540 mutex_unlock(&gpd_list_lock);
1541
1542 return ret;
1543}
1544EXPORT_SYMBOL_GPL(pm_genpd_add_device);
1545
1546static int genpd_remove_device(struct generic_pm_domain *genpd,
1547 struct device *dev)
1548{
1549 struct generic_pm_domain_data *gpd_data;
1550 struct pm_domain_data *pdd;
1551 int ret = 0;
1552
1553 dev_dbg(dev, "%s()\n", __func__);
1554
1555 pdd = dev->power.subsys_data->domain_data;
1556 gpd_data = to_gpd_data(pdd);
1557 dev_pm_qos_remove_notifier(dev, &gpd_data->nb,
1558 DEV_PM_QOS_RESUME_LATENCY);
1559
1560 genpd_lock(genpd);
1561
1562 if (genpd->prepared_count > 0) {
1563 ret = -EAGAIN;
1564 goto out;
1565 }
1566
1567 genpd->device_count--;
1568 genpd->max_off_time_changed = true;
1569
1570 genpd_clear_cpumask(genpd, gpd_data->cpu);
1571 dev_pm_domain_set(dev, NULL);
1572
1573 list_del_init(&pdd->list_node);
1574
1575 genpd_unlock(genpd);
1576
1577 if (genpd->detach_dev)
1578 genpd->detach_dev(genpd, dev);
1579
1580 genpd_free_dev_data(dev, gpd_data);
1581
1582 return 0;
1583
1584 out:
1585 genpd_unlock(genpd);
1586 dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY);
1587
1588 return ret;
1589}
1590
1591
1592
1593
1594
1595int pm_genpd_remove_device(struct device *dev)
1596{
1597 struct generic_pm_domain *genpd = dev_to_genpd_safe(dev);
1598
1599 if (!genpd)
1600 return -EINVAL;
1601
1602 return genpd_remove_device(genpd, dev);
1603}
1604EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
1605
1606static int genpd_add_subdomain(struct generic_pm_domain *genpd,
1607 struct generic_pm_domain *subdomain)
1608{
1609 struct gpd_link *link, *itr;
1610 int ret = 0;
1611
1612 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1613 || genpd == subdomain)
1614 return -EINVAL;
1615
1616
1617
1618
1619
1620
1621 if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
1622 WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
1623 genpd->name, subdomain->name);
1624 return -EINVAL;
1625 }
1626
1627 link = kzalloc(sizeof(*link), GFP_KERNEL);
1628 if (!link)
1629 return -ENOMEM;
1630
1631 genpd_lock(subdomain);
1632 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1633
1634 if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
1635 ret = -EINVAL;
1636 goto out;
1637 }
1638
1639 list_for_each_entry(itr, &genpd->parent_links, parent_node) {
1640 if (itr->child == subdomain && itr->parent == genpd) {
1641 ret = -EINVAL;
1642 goto out;
1643 }
1644 }
1645
1646 link->parent = genpd;
1647 list_add_tail(&link->parent_node, &genpd->parent_links);
1648 link->child = subdomain;
1649 list_add_tail(&link->child_node, &subdomain->child_links);
1650 if (genpd_status_on(subdomain))
1651 genpd_sd_counter_inc(genpd);
1652
1653 out:
1654 genpd_unlock(genpd);
1655 genpd_unlock(subdomain);
1656 if (ret)
1657 kfree(link);
1658 return ret;
1659}
1660
1661
1662
1663
1664
1665
1666int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1667 struct generic_pm_domain *subdomain)
1668{
1669 int ret;
1670
1671 mutex_lock(&gpd_list_lock);
1672 ret = genpd_add_subdomain(genpd, subdomain);
1673 mutex_unlock(&gpd_list_lock);
1674
1675 return ret;
1676}
1677EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
1678
1679
1680
1681
1682
1683
1684int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1685 struct generic_pm_domain *subdomain)
1686{
1687 struct gpd_link *l, *link;
1688 int ret = -EINVAL;
1689
1690 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1691 return -EINVAL;
1692
1693 genpd_lock(subdomain);
1694 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1695
1696 if (!list_empty(&subdomain->parent_links) || subdomain->device_count) {
1697 pr_warn("%s: unable to remove subdomain %s\n",
1698 genpd->name, subdomain->name);
1699 ret = -EBUSY;
1700 goto out;
1701 }
1702
1703 list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) {
1704 if (link->child != subdomain)
1705 continue;
1706
1707 list_del(&link->parent_node);
1708 list_del(&link->child_node);
1709 kfree(link);
1710 if (genpd_status_on(subdomain))
1711 genpd_sd_counter_dec(genpd);
1712
1713 ret = 0;
1714 break;
1715 }
1716
1717out:
1718 genpd_unlock(genpd);
1719 genpd_unlock(subdomain);
1720
1721 return ret;
1722}
1723EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
1724
1725static void genpd_free_default_power_state(struct genpd_power_state *states,
1726 unsigned int state_count)
1727{
1728 kfree(states);
1729}
1730
1731static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
1732{
1733 struct genpd_power_state *state;
1734
1735 state = kzalloc(sizeof(*state), GFP_KERNEL);
1736 if (!state)
1737 return -ENOMEM;
1738
1739 genpd->states = state;
1740 genpd->state_count = 1;
1741 genpd->free_states = genpd_free_default_power_state;
1742
1743 return 0;
1744}
1745
1746static void genpd_lock_init(struct generic_pm_domain *genpd)
1747{
1748 if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
1749 spin_lock_init(&genpd->slock);
1750 genpd->lock_ops = &genpd_spin_ops;
1751 } else {
1752 mutex_init(&genpd->mlock);
1753 genpd->lock_ops = &genpd_mtx_ops;
1754 }
1755}
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765int pm_genpd_init(struct generic_pm_domain *genpd,
1766 struct dev_power_governor *gov, bool is_off)
1767{
1768 int ret;
1769
1770 if (IS_ERR_OR_NULL(genpd))
1771 return -EINVAL;
1772
1773 INIT_LIST_HEAD(&genpd->parent_links);
1774 INIT_LIST_HEAD(&genpd->child_links);
1775 INIT_LIST_HEAD(&genpd->dev_list);
1776 genpd_lock_init(genpd);
1777 genpd->gov = gov;
1778 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1779 atomic_set(&genpd->sd_count, 0);
1780 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1781 genpd->device_count = 0;
1782 genpd->max_off_time_ns = -1;
1783 genpd->max_off_time_changed = true;
1784 genpd->provider = NULL;
1785 genpd->has_provider = false;
1786 genpd->accounting_time = ktime_get();
1787 genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
1788 genpd->domain.ops.runtime_resume = genpd_runtime_resume;
1789 genpd->domain.ops.prepare = genpd_prepare;
1790 genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
1791 genpd->domain.ops.resume_noirq = genpd_resume_noirq;
1792 genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
1793 genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
1794 genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
1795 genpd->domain.ops.restore_noirq = genpd_restore_noirq;
1796 genpd->domain.ops.complete = genpd_complete;
1797 genpd->domain.start = genpd_dev_pm_start;
1798
1799 if (genpd->flags & GENPD_FLAG_PM_CLK) {
1800 genpd->dev_ops.stop = pm_clk_suspend;
1801 genpd->dev_ops.start = pm_clk_resume;
1802 }
1803
1804
1805 if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
1806 !genpd_status_on(genpd))
1807 return -EINVAL;
1808
1809 if (genpd_is_cpu_domain(genpd) &&
1810 !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
1811 return -ENOMEM;
1812
1813
1814 if (genpd->state_count == 0) {
1815 ret = genpd_set_default_power_state(genpd);
1816 if (ret) {
1817 if (genpd_is_cpu_domain(genpd))
1818 free_cpumask_var(genpd->cpus);
1819 return ret;
1820 }
1821 } else if (!gov && genpd->state_count > 1) {
1822 pr_warn("%s: no governor for states\n", genpd->name);
1823 }
1824
1825 device_initialize(&genpd->dev);
1826 dev_set_name(&genpd->dev, "%s", genpd->name);
1827
1828 mutex_lock(&gpd_list_lock);
1829 list_add(&genpd->gpd_list_node, &gpd_list);
1830 mutex_unlock(&gpd_list_lock);
1831
1832 return 0;
1833}
1834EXPORT_SYMBOL_GPL(pm_genpd_init);
1835
1836static int genpd_remove(struct generic_pm_domain *genpd)
1837{
1838 struct gpd_link *l, *link;
1839
1840 if (IS_ERR_OR_NULL(genpd))
1841 return -EINVAL;
1842
1843 genpd_lock(genpd);
1844
1845 if (genpd->has_provider) {
1846 genpd_unlock(genpd);
1847 pr_err("Provider present, unable to remove %s\n", genpd->name);
1848 return -EBUSY;
1849 }
1850
1851 if (!list_empty(&genpd->parent_links) || genpd->device_count) {
1852 genpd_unlock(genpd);
1853 pr_err("%s: unable to remove %s\n", __func__, genpd->name);
1854 return -EBUSY;
1855 }
1856
1857 list_for_each_entry_safe(link, l, &genpd->child_links, child_node) {
1858 list_del(&link->parent_node);
1859 list_del(&link->child_node);
1860 kfree(link);
1861 }
1862
1863 list_del(&genpd->gpd_list_node);
1864 genpd_unlock(genpd);
1865 cancel_work_sync(&genpd->power_off_work);
1866 if (genpd_is_cpu_domain(genpd))
1867 free_cpumask_var(genpd->cpus);
1868 if (genpd->free_states)
1869 genpd->free_states(genpd->states, genpd->state_count);
1870
1871 pr_debug("%s: removed %s\n", __func__, genpd->name);
1872
1873 return 0;
1874}
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889int pm_genpd_remove(struct generic_pm_domain *genpd)
1890{
1891 int ret;
1892
1893 mutex_lock(&gpd_list_lock);
1894 ret = genpd_remove(genpd);
1895 mutex_unlock(&gpd_list_lock);
1896
1897 return ret;
1898}
1899EXPORT_SYMBOL_GPL(pm_genpd_remove);
1900
1901#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927struct of_genpd_provider {
1928 struct list_head link;
1929 struct device_node *node;
1930 genpd_xlate_t xlate;
1931 void *data;
1932};
1933
1934
1935static LIST_HEAD(of_genpd_providers);
1936
1937static DEFINE_MUTEX(of_genpd_mutex);
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948static struct generic_pm_domain *genpd_xlate_simple(
1949 struct of_phandle_args *genpdspec,
1950 void *data)
1951{
1952 return data;
1953}
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965static struct generic_pm_domain *genpd_xlate_onecell(
1966 struct of_phandle_args *genpdspec,
1967 void *data)
1968{
1969 struct genpd_onecell_data *genpd_data = data;
1970 unsigned int idx = genpdspec->args[0];
1971
1972 if (genpdspec->args_count != 1)
1973 return ERR_PTR(-EINVAL);
1974
1975 if (idx >= genpd_data->num_domains) {
1976 pr_err("%s: invalid domain index %u\n", __func__, idx);
1977 return ERR_PTR(-EINVAL);
1978 }
1979
1980 if (!genpd_data->domains[idx])
1981 return ERR_PTR(-ENOENT);
1982
1983 return genpd_data->domains[idx];
1984}
1985
1986
1987
1988
1989
1990
1991
1992static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
1993 void *data)
1994{
1995 struct of_genpd_provider *cp;
1996
1997 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
1998 if (!cp)
1999 return -ENOMEM;
2000
2001 cp->node = of_node_get(np);
2002 cp->data = data;
2003 cp->xlate = xlate;
2004
2005 mutex_lock(&of_genpd_mutex);
2006 list_add(&cp->link, &of_genpd_providers);
2007 mutex_unlock(&of_genpd_mutex);
2008 pr_debug("Added domain provider from %pOF\n", np);
2009
2010 return 0;
2011}
2012
2013static bool genpd_present(const struct generic_pm_domain *genpd)
2014{
2015 const struct generic_pm_domain *gpd;
2016
2017 list_for_each_entry(gpd, &gpd_list, gpd_list_node)
2018 if (gpd == genpd)
2019 return true;
2020 return false;
2021}
2022
2023
2024
2025
2026
2027
2028int of_genpd_add_provider_simple(struct device_node *np,
2029 struct generic_pm_domain *genpd)
2030{
2031 int ret = -EINVAL;
2032
2033 if (!np || !genpd)
2034 return -EINVAL;
2035
2036 mutex_lock(&gpd_list_lock);
2037
2038 if (!genpd_present(genpd))
2039 goto unlock;
2040
2041 genpd->dev.of_node = np;
2042
2043
2044 if (genpd->set_performance_state) {
2045 ret = dev_pm_opp_of_add_table(&genpd->dev);
2046 if (ret) {
2047 dev_err(&genpd->dev, "Failed to add OPP table: %d\n",
2048 ret);
2049 goto unlock;
2050 }
2051
2052
2053
2054
2055
2056 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2057 WARN_ON(!genpd->opp_table);
2058 }
2059
2060 ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
2061 if (ret) {
2062 if (genpd->set_performance_state) {
2063 dev_pm_opp_put_opp_table(genpd->opp_table);
2064 dev_pm_opp_of_remove_table(&genpd->dev);
2065 }
2066
2067 goto unlock;
2068 }
2069
2070 genpd->provider = &np->fwnode;
2071 genpd->has_provider = true;
2072
2073unlock:
2074 mutex_unlock(&gpd_list_lock);
2075
2076 return ret;
2077}
2078EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
2079
2080
2081
2082
2083
2084
2085int of_genpd_add_provider_onecell(struct device_node *np,
2086 struct genpd_onecell_data *data)
2087{
2088 struct generic_pm_domain *genpd;
2089 unsigned int i;
2090 int ret = -EINVAL;
2091
2092 if (!np || !data)
2093 return -EINVAL;
2094
2095 mutex_lock(&gpd_list_lock);
2096
2097 if (!data->xlate)
2098 data->xlate = genpd_xlate_onecell;
2099
2100 for (i = 0; i < data->num_domains; i++) {
2101 genpd = data->domains[i];
2102
2103 if (!genpd)
2104 continue;
2105 if (!genpd_present(genpd))
2106 goto error;
2107
2108 genpd->dev.of_node = np;
2109
2110
2111 if (genpd->set_performance_state) {
2112 ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
2113 if (ret) {
2114 dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n",
2115 i, ret);
2116 goto error;
2117 }
2118
2119
2120
2121
2122
2123 genpd->opp_table = dev_pm_opp_get_opp_table_indexed(&genpd->dev, i);
2124 WARN_ON(!genpd->opp_table);
2125 }
2126
2127 genpd->provider = &np->fwnode;
2128 genpd->has_provider = true;
2129 }
2130
2131 ret = genpd_add_provider(np, data->xlate, data);
2132 if (ret < 0)
2133 goto error;
2134
2135 mutex_unlock(&gpd_list_lock);
2136
2137 return 0;
2138
2139error:
2140 while (i--) {
2141 genpd = data->domains[i];
2142
2143 if (!genpd)
2144 continue;
2145
2146 genpd->provider = NULL;
2147 genpd->has_provider = false;
2148
2149 if (genpd->set_performance_state) {
2150 dev_pm_opp_put_opp_table(genpd->opp_table);
2151 dev_pm_opp_of_remove_table(&genpd->dev);
2152 }
2153 }
2154
2155 mutex_unlock(&gpd_list_lock);
2156
2157 return ret;
2158}
2159EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
2160
2161
2162
2163
2164
2165void of_genpd_del_provider(struct device_node *np)
2166{
2167 struct of_genpd_provider *cp, *tmp;
2168 struct generic_pm_domain *gpd;
2169
2170 mutex_lock(&gpd_list_lock);
2171 mutex_lock(&of_genpd_mutex);
2172 list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
2173 if (cp->node == np) {
2174
2175
2176
2177
2178
2179 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2180 if (gpd->provider == &np->fwnode) {
2181 gpd->has_provider = false;
2182
2183 if (!gpd->set_performance_state)
2184 continue;
2185
2186 dev_pm_opp_put_opp_table(gpd->opp_table);
2187 dev_pm_opp_of_remove_table(&gpd->dev);
2188 }
2189 }
2190
2191 list_del(&cp->link);
2192 of_node_put(cp->node);
2193 kfree(cp);
2194 break;
2195 }
2196 }
2197 mutex_unlock(&of_genpd_mutex);
2198 mutex_unlock(&gpd_list_lock);
2199}
2200EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213static struct generic_pm_domain *genpd_get_from_provider(
2214 struct of_phandle_args *genpdspec)
2215{
2216 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2217 struct of_genpd_provider *provider;
2218
2219 if (!genpdspec)
2220 return ERR_PTR(-EINVAL);
2221
2222 mutex_lock(&of_genpd_mutex);
2223
2224
2225 list_for_each_entry(provider, &of_genpd_providers, link) {
2226 if (provider->node == genpdspec->np)
2227 genpd = provider->xlate(genpdspec, provider->data);
2228 if (!IS_ERR(genpd))
2229 break;
2230 }
2231
2232 mutex_unlock(&of_genpd_mutex);
2233
2234 return genpd;
2235}
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
2246{
2247 struct generic_pm_domain *genpd;
2248 int ret;
2249
2250 mutex_lock(&gpd_list_lock);
2251
2252 genpd = genpd_get_from_provider(genpdspec);
2253 if (IS_ERR(genpd)) {
2254 ret = PTR_ERR(genpd);
2255 goto out;
2256 }
2257
2258 ret = genpd_add_device(genpd, dev, dev);
2259
2260out:
2261 mutex_unlock(&gpd_list_lock);
2262
2263 return ret;
2264}
2265EXPORT_SYMBOL_GPL(of_genpd_add_device);
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
2277 struct of_phandle_args *subdomain_spec)
2278{
2279 struct generic_pm_domain *parent, *subdomain;
2280 int ret;
2281
2282 mutex_lock(&gpd_list_lock);
2283
2284 parent = genpd_get_from_provider(parent_spec);
2285 if (IS_ERR(parent)) {
2286 ret = PTR_ERR(parent);
2287 goto out;
2288 }
2289
2290 subdomain = genpd_get_from_provider(subdomain_spec);
2291 if (IS_ERR(subdomain)) {
2292 ret = PTR_ERR(subdomain);
2293 goto out;
2294 }
2295
2296 ret = genpd_add_subdomain(parent, subdomain);
2297
2298out:
2299 mutex_unlock(&gpd_list_lock);
2300
2301 return ret;
2302}
2303EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec,
2315 struct of_phandle_args *subdomain_spec)
2316{
2317 struct generic_pm_domain *parent, *subdomain;
2318 int ret;
2319
2320 mutex_lock(&gpd_list_lock);
2321
2322 parent = genpd_get_from_provider(parent_spec);
2323 if (IS_ERR(parent)) {
2324 ret = PTR_ERR(parent);
2325 goto out;
2326 }
2327
2328 subdomain = genpd_get_from_provider(subdomain_spec);
2329 if (IS_ERR(subdomain)) {
2330 ret = PTR_ERR(subdomain);
2331 goto out;
2332 }
2333
2334 ret = pm_genpd_remove_subdomain(parent, subdomain);
2335
2336out:
2337 mutex_unlock(&gpd_list_lock);
2338
2339 return ret;
2340}
2341EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain);
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
2357{
2358 struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
2359 int ret;
2360
2361 if (IS_ERR_OR_NULL(np))
2362 return ERR_PTR(-EINVAL);
2363
2364 mutex_lock(&gpd_list_lock);
2365 list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
2366 if (gpd->provider == &np->fwnode) {
2367 ret = genpd_remove(gpd);
2368 genpd = ret ? ERR_PTR(ret) : gpd;
2369 break;
2370 }
2371 }
2372 mutex_unlock(&gpd_list_lock);
2373
2374 return genpd;
2375}
2376EXPORT_SYMBOL_GPL(of_genpd_remove_last);
2377
2378static void genpd_release_dev(struct device *dev)
2379{
2380 of_node_put(dev->of_node);
2381 kfree(dev);
2382}
2383
2384static struct bus_type genpd_bus_type = {
2385 .name = "genpd",
2386};
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2397{
2398 struct generic_pm_domain *pd;
2399 unsigned int i;
2400 int ret = 0;
2401
2402 pd = dev_to_genpd(dev);
2403 if (IS_ERR(pd))
2404 return;
2405
2406 dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2407
2408 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2409 ret = genpd_remove_device(pd, dev);
2410 if (ret != -EAGAIN)
2411 break;
2412
2413 mdelay(i);
2414 cond_resched();
2415 }
2416
2417 if (ret < 0) {
2418 dev_err(dev, "failed to remove from PM domain %s: %d",
2419 pd->name, ret);
2420 return;
2421 }
2422
2423
2424 genpd_queue_power_off_work(pd);
2425
2426
2427 if (dev->bus == &genpd_bus_type)
2428 device_unregister(dev);
2429}
2430
2431static void genpd_dev_pm_sync(struct device *dev)
2432{
2433 struct generic_pm_domain *pd;
2434
2435 pd = dev_to_genpd(dev);
2436 if (IS_ERR(pd))
2437 return;
2438
2439 genpd_queue_power_off_work(pd);
2440}
2441
2442static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
2443 unsigned int index, bool power_on)
2444{
2445 struct of_phandle_args pd_args;
2446 struct generic_pm_domain *pd;
2447 int ret;
2448
2449 ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
2450 "#power-domain-cells", index, &pd_args);
2451 if (ret < 0)
2452 return ret;
2453
2454 mutex_lock(&gpd_list_lock);
2455 pd = genpd_get_from_provider(&pd_args);
2456 of_node_put(pd_args.np);
2457 if (IS_ERR(pd)) {
2458 mutex_unlock(&gpd_list_lock);
2459 dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2460 __func__, PTR_ERR(pd));
2461 return driver_deferred_probe_check_state(base_dev);
2462 }
2463
2464 dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2465
2466 ret = genpd_add_device(pd, dev, base_dev);
2467 mutex_unlock(&gpd_list_lock);
2468
2469 if (ret < 0) {
2470 if (ret != -EPROBE_DEFER)
2471 dev_err(dev, "failed to add to PM domain %s: %d",
2472 pd->name, ret);
2473 return ret;
2474 }
2475
2476 dev->pm_domain->detach = genpd_dev_pm_detach;
2477 dev->pm_domain->sync = genpd_dev_pm_sync;
2478
2479 if (power_on) {
2480 genpd_lock(pd);
2481 ret = genpd_power_on(pd, 0);
2482 genpd_unlock(pd);
2483 }
2484
2485 if (ret)
2486 genpd_remove_device(pd, dev);
2487
2488 return ret ? -EPROBE_DEFER : 1;
2489}
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504int genpd_dev_pm_attach(struct device *dev)
2505{
2506 if (!dev->of_node)
2507 return 0;
2508
2509
2510
2511
2512
2513 if (of_count_phandle_with_args(dev->of_node, "power-domains",
2514 "#power-domain-cells") != 1)
2515 return 0;
2516
2517 return __genpd_dev_pm_attach(dev, dev, 0, true);
2518}
2519EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537struct device *genpd_dev_pm_attach_by_id(struct device *dev,
2538 unsigned int index)
2539{
2540 struct device *virt_dev;
2541 int num_domains;
2542 int ret;
2543
2544 if (!dev->of_node)
2545 return NULL;
2546
2547
2548 num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
2549 "#power-domain-cells");
2550 if (index >= num_domains)
2551 return NULL;
2552
2553
2554 virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
2555 if (!virt_dev)
2556 return ERR_PTR(-ENOMEM);
2557
2558 dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
2559 virt_dev->bus = &genpd_bus_type;
2560 virt_dev->release = genpd_release_dev;
2561 virt_dev->of_node = of_node_get(dev->of_node);
2562
2563 ret = device_register(virt_dev);
2564 if (ret) {
2565 put_device(virt_dev);
2566 return ERR_PTR(ret);
2567 }
2568
2569
2570 ret = __genpd_dev_pm_attach(virt_dev, dev, index, false);
2571 if (ret < 1) {
2572 device_unregister(virt_dev);
2573 return ret ? ERR_PTR(ret) : NULL;
2574 }
2575
2576 pm_runtime_enable(virt_dev);
2577 genpd_queue_power_off_work(dev_to_genpd(virt_dev));
2578
2579 return virt_dev;
2580}
2581EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
2593{
2594 int index;
2595
2596 if (!dev->of_node)
2597 return NULL;
2598
2599 index = of_property_match_string(dev->of_node, "power-domain-names",
2600 name);
2601 if (index < 0)
2602 return NULL;
2603
2604 return genpd_dev_pm_attach_by_id(dev, index);
2605}
2606
2607static const struct of_device_id idle_state_match[] = {
2608 { .compatible = "domain-idle-state", },
2609 { }
2610};
2611
2612static int genpd_parse_state(struct genpd_power_state *genpd_state,
2613 struct device_node *state_node)
2614{
2615 int err;
2616 u32 residency;
2617 u32 entry_latency, exit_latency;
2618
2619 err = of_property_read_u32(state_node, "entry-latency-us",
2620 &entry_latency);
2621 if (err) {
2622 pr_debug(" * %pOF missing entry-latency-us property\n",
2623 state_node);
2624 return -EINVAL;
2625 }
2626
2627 err = of_property_read_u32(state_node, "exit-latency-us",
2628 &exit_latency);
2629 if (err) {
2630 pr_debug(" * %pOF missing exit-latency-us property\n",
2631 state_node);
2632 return -EINVAL;
2633 }
2634
2635 err = of_property_read_u32(state_node, "min-residency-us", &residency);
2636 if (!err)
2637 genpd_state->residency_ns = 1000 * residency;
2638
2639 genpd_state->power_on_latency_ns = 1000 * exit_latency;
2640 genpd_state->power_off_latency_ns = 1000 * entry_latency;
2641 genpd_state->fwnode = &state_node->fwnode;
2642
2643 return 0;
2644}
2645
2646static int genpd_iterate_idle_states(struct device_node *dn,
2647 struct genpd_power_state *states)
2648{
2649 int ret;
2650 struct of_phandle_iterator it;
2651 struct device_node *np;
2652 int i = 0;
2653
2654 ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
2655 if (ret <= 0)
2656 return ret == -ENOENT ? 0 : ret;
2657
2658
2659 of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
2660 np = it.node;
2661 if (!of_match_node(idle_state_match, np))
2662 continue;
2663 if (states) {
2664 ret = genpd_parse_state(&states[i], np);
2665 if (ret) {
2666 pr_err("Parsing idle state node %pOF failed with err %d\n",
2667 np, ret);
2668 of_node_put(np);
2669 return ret;
2670 }
2671 }
2672 i++;
2673 }
2674
2675 return i;
2676}
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690int of_genpd_parse_idle_states(struct device_node *dn,
2691 struct genpd_power_state **states, int *n)
2692{
2693 struct genpd_power_state *st;
2694 int ret;
2695
2696 ret = genpd_iterate_idle_states(dn, NULL);
2697 if (ret < 0)
2698 return ret;
2699
2700 if (!ret) {
2701 *states = NULL;
2702 *n = 0;
2703 return 0;
2704 }
2705
2706 st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
2707 if (!st)
2708 return -ENOMEM;
2709
2710 ret = genpd_iterate_idle_states(dn, st);
2711 if (ret <= 0) {
2712 kfree(st);
2713 return ret < 0 ? ret : -EINVAL;
2714 }
2715
2716 *states = st;
2717 *n = ret;
2718
2719 return 0;
2720}
2721EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
2737 struct dev_pm_opp *opp)
2738{
2739 struct generic_pm_domain *genpd = NULL;
2740 int state;
2741
2742 genpd = container_of(genpd_dev, struct generic_pm_domain, dev);
2743
2744 if (unlikely(!genpd->opp_to_performance_state))
2745 return 0;
2746
2747 genpd_lock(genpd);
2748 state = genpd->opp_to_performance_state(genpd, opp);
2749 genpd_unlock(genpd);
2750
2751 return state;
2752}
2753EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state);
2754
2755static int __init genpd_bus_init(void)
2756{
2757 return bus_register(&genpd_bus_type);
2758}
2759core_initcall(genpd_bus_init);
2760
2761#endif
2762
2763
2764
2765
2766#ifdef CONFIG_DEBUG_FS
2767#include <linux/pm.h>
2768#include <linux/device.h>
2769#include <linux/debugfs.h>
2770#include <linux/seq_file.h>
2771#include <linux/init.h>
2772#include <linux/kobject.h>
2773static struct dentry *genpd_debugfs_dir;
2774
2775
2776
2777
2778
2779static void rtpm_status_str(struct seq_file *s, struct device *dev)
2780{
2781 static const char * const status_lookup[] = {
2782 [RPM_ACTIVE] = "active",
2783 [RPM_RESUMING] = "resuming",
2784 [RPM_SUSPENDED] = "suspended",
2785 [RPM_SUSPENDING] = "suspending"
2786 };
2787 const char *p = "";
2788
2789 if (dev->power.runtime_error)
2790 p = "error";
2791 else if (dev->power.disable_depth)
2792 p = "unsupported";
2793 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
2794 p = status_lookup[dev->power.runtime_status];
2795 else
2796 WARN_ON(1);
2797
2798 seq_puts(s, p);
2799}
2800
2801static int genpd_summary_one(struct seq_file *s,
2802 struct generic_pm_domain *genpd)
2803{
2804 static const char * const status_lookup[] = {
2805 [GPD_STATE_ACTIVE] = "on",
2806 [GPD_STATE_POWER_OFF] = "off"
2807 };
2808 struct pm_domain_data *pm_data;
2809 const char *kobj_path;
2810 struct gpd_link *link;
2811 char state[16];
2812 int ret;
2813
2814 ret = genpd_lock_interruptible(genpd);
2815 if (ret)
2816 return -ERESTARTSYS;
2817
2818 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
2819 goto exit;
2820 if (!genpd_status_on(genpd))
2821 snprintf(state, sizeof(state), "%s-%u",
2822 status_lookup[genpd->status], genpd->state_idx);
2823 else
2824 snprintf(state, sizeof(state), "%s",
2825 status_lookup[genpd->status]);
2826 seq_printf(s, "%-30s %-15s ", genpd->name, state);
2827
2828
2829
2830
2831
2832
2833 list_for_each_entry(link, &genpd->parent_links, parent_node) {
2834 seq_printf(s, "%s", link->child->name);
2835 if (!list_is_last(&link->parent_node, &genpd->parent_links))
2836 seq_puts(s, ", ");
2837 }
2838
2839 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
2840 kobj_path = kobject_get_path(&pm_data->dev->kobj,
2841 genpd_is_irq_safe(genpd) ?
2842 GFP_ATOMIC : GFP_KERNEL);
2843 if (kobj_path == NULL)
2844 continue;
2845
2846 seq_printf(s, "\n %-50s ", kobj_path);
2847 rtpm_status_str(s, pm_data->dev);
2848 kfree(kobj_path);
2849 }
2850
2851 seq_puts(s, "\n");
2852exit:
2853 genpd_unlock(genpd);
2854
2855 return 0;
2856}
2857
2858static int summary_show(struct seq_file *s, void *data)
2859{
2860 struct generic_pm_domain *genpd;
2861 int ret = 0;
2862
2863 seq_puts(s, "domain status children\n");
2864 seq_puts(s, " /device runtime status\n");
2865 seq_puts(s, "----------------------------------------------------------------------\n");
2866
2867 ret = mutex_lock_interruptible(&gpd_list_lock);
2868 if (ret)
2869 return -ERESTARTSYS;
2870
2871 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
2872 ret = genpd_summary_one(s, genpd);
2873 if (ret)
2874 break;
2875 }
2876 mutex_unlock(&gpd_list_lock);
2877
2878 return ret;
2879}
2880
2881static int status_show(struct seq_file *s, void *data)
2882{
2883 static const char * const status_lookup[] = {
2884 [GPD_STATE_ACTIVE] = "on",
2885 [GPD_STATE_POWER_OFF] = "off"
2886 };
2887
2888 struct generic_pm_domain *genpd = s->private;
2889 int ret = 0;
2890
2891 ret = genpd_lock_interruptible(genpd);
2892 if (ret)
2893 return -ERESTARTSYS;
2894
2895 if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
2896 goto exit;
2897
2898 if (genpd->status == GPD_STATE_POWER_OFF)
2899 seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
2900 genpd->state_idx);
2901 else
2902 seq_printf(s, "%s\n", status_lookup[genpd->status]);
2903exit:
2904 genpd_unlock(genpd);
2905 return ret;
2906}
2907
2908static int sub_domains_show(struct seq_file *s, void *data)
2909{
2910 struct generic_pm_domain *genpd = s->private;
2911 struct gpd_link *link;
2912 int ret = 0;
2913
2914 ret = genpd_lock_interruptible(genpd);
2915 if (ret)
2916 return -ERESTARTSYS;
2917
2918 list_for_each_entry(link, &genpd->parent_links, parent_node)
2919 seq_printf(s, "%s\n", link->child->name);
2920
2921 genpd_unlock(genpd);
2922 return ret;
2923}
2924
2925static int idle_states_show(struct seq_file *s, void *data)
2926{
2927 struct generic_pm_domain *genpd = s->private;
2928 unsigned int i;
2929 int ret = 0;
2930
2931 ret = genpd_lock_interruptible(genpd);
2932 if (ret)
2933 return -ERESTARTSYS;
2934
2935 seq_puts(s, "State Time Spent(ms)\n");
2936
2937 for (i = 0; i < genpd->state_count; i++) {
2938 ktime_t delta = 0;
2939 s64 msecs;
2940
2941 if ((genpd->status == GPD_STATE_POWER_OFF) &&
2942 (genpd->state_idx == i))
2943 delta = ktime_sub(ktime_get(), genpd->accounting_time);
2944
2945 msecs = ktime_to_ms(
2946 ktime_add(genpd->states[i].idle_time, delta));
2947 seq_printf(s, "S%-13i %lld\n", i, msecs);
2948 }
2949
2950 genpd_unlock(genpd);
2951 return ret;
2952}
2953
2954static int active_time_show(struct seq_file *s, void *data)
2955{
2956 struct generic_pm_domain *genpd = s->private;
2957 ktime_t delta = 0;
2958 int ret = 0;
2959
2960 ret = genpd_lock_interruptible(genpd);
2961 if (ret)
2962 return -ERESTARTSYS;
2963
2964 if (genpd->status == GPD_STATE_ACTIVE)
2965 delta = ktime_sub(ktime_get(), genpd->accounting_time);
2966
2967 seq_printf(s, "%lld ms\n", ktime_to_ms(
2968 ktime_add(genpd->on_time, delta)));
2969
2970 genpd_unlock(genpd);
2971 return ret;
2972}
2973
2974static int total_idle_time_show(struct seq_file *s, void *data)
2975{
2976 struct generic_pm_domain *genpd = s->private;
2977 ktime_t delta = 0, total = 0;
2978 unsigned int i;
2979 int ret = 0;
2980
2981 ret = genpd_lock_interruptible(genpd);
2982 if (ret)
2983 return -ERESTARTSYS;
2984
2985 for (i = 0; i < genpd->state_count; i++) {
2986
2987 if ((genpd->status == GPD_STATE_POWER_OFF) &&
2988 (genpd->state_idx == i))
2989 delta = ktime_sub(ktime_get(), genpd->accounting_time);
2990
2991 total = ktime_add(total, genpd->states[i].idle_time);
2992 }
2993 total = ktime_add(total, delta);
2994
2995 seq_printf(s, "%lld ms\n", ktime_to_ms(total));
2996
2997 genpd_unlock(genpd);
2998 return ret;
2999}
3000
3001
3002static int devices_show(struct seq_file *s, void *data)
3003{
3004 struct generic_pm_domain *genpd = s->private;
3005 struct pm_domain_data *pm_data;
3006 const char *kobj_path;
3007 int ret = 0;
3008
3009 ret = genpd_lock_interruptible(genpd);
3010 if (ret)
3011 return -ERESTARTSYS;
3012
3013 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3014 kobj_path = kobject_get_path(&pm_data->dev->kobj,
3015 genpd_is_irq_safe(genpd) ?
3016 GFP_ATOMIC : GFP_KERNEL);
3017 if (kobj_path == NULL)
3018 continue;
3019
3020 seq_printf(s, "%s\n", kobj_path);
3021 kfree(kobj_path);
3022 }
3023
3024 genpd_unlock(genpd);
3025 return ret;
3026}
3027
3028static int perf_state_show(struct seq_file *s, void *data)
3029{
3030 struct generic_pm_domain *genpd = s->private;
3031
3032 if (genpd_lock_interruptible(genpd))
3033 return -ERESTARTSYS;
3034
3035 seq_printf(s, "%u\n", genpd->performance_state);
3036
3037 genpd_unlock(genpd);
3038 return 0;
3039}
3040
3041DEFINE_SHOW_ATTRIBUTE(summary);
3042DEFINE_SHOW_ATTRIBUTE(status);
3043DEFINE_SHOW_ATTRIBUTE(sub_domains);
3044DEFINE_SHOW_ATTRIBUTE(idle_states);
3045DEFINE_SHOW_ATTRIBUTE(active_time);
3046DEFINE_SHOW_ATTRIBUTE(total_idle_time);
3047DEFINE_SHOW_ATTRIBUTE(devices);
3048DEFINE_SHOW_ATTRIBUTE(perf_state);
3049
3050static int __init genpd_debug_init(void)
3051{
3052 struct dentry *d;
3053 struct generic_pm_domain *genpd;
3054
3055 genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
3056
3057 debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
3058 NULL, &summary_fops);
3059
3060 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
3061 d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
3062
3063 debugfs_create_file("current_state", 0444,
3064 d, genpd, &status_fops);
3065 debugfs_create_file("sub_domains", 0444,
3066 d, genpd, &sub_domains_fops);
3067 debugfs_create_file("idle_states", 0444,
3068 d, genpd, &idle_states_fops);
3069 debugfs_create_file("active_time", 0444,
3070 d, genpd, &active_time_fops);
3071 debugfs_create_file("total_idle_time", 0444,
3072 d, genpd, &total_idle_time_fops);
3073 debugfs_create_file("devices", 0444,
3074 d, genpd, &devices_fops);
3075 if (genpd->set_performance_state)
3076 debugfs_create_file("perf_state", 0444,
3077 d, genpd, &perf_state_fops);
3078 }
3079
3080 return 0;
3081}
3082late_initcall(genpd_debug_init);
3083
3084static void __exit genpd_debug_exit(void)
3085{
3086 debugfs_remove_recursive(genpd_debugfs_dir);
3087}
3088__exitcall(genpd_debug_exit);
3089#endif
3090