1
2
3
4
5
6
7
8
9#define pr_fmt(fmt) "PM: " fmt
10
11#include <linux/delay.h>
12#include <linux/kernel.h>
13#include <linux/io.h>
14#include <linux/platform_device.h>
15#include <linux/pm_opp.h>
16#include <linux/pm_runtime.h>
17#include <linux/pm_domain.h>
18#include <linux/pm_qos.h>
19#include <linux/pm_clock.h>
20#include <linux/slab.h>
21#include <linux/err.h>
22#include <linux/sched.h>
23#include <linux/suspend.h>
24#include <linux/export.h>
25#include <linux/cpu.h>
26#include <linux/debugfs.h>
27
28#include "power.h"
29
30#define GENPD_RETRY_MAX_MS 250
31
32#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
33({ \
34 type (*__routine)(struct device *__d); \
35 type __ret = (type)0; \
36 \
37 __routine = genpd->dev_ops.callback; \
38 if (__routine) { \
39 __ret = __routine(dev); \
40 } \
41 __ret; \
42})
43
44static LIST_HEAD(gpd_list);
45static DEFINE_MUTEX(gpd_list_lock);
46
47struct genpd_lock_ops {
48 void (*lock)(struct generic_pm_domain *genpd);
49 void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
50 int (*lock_interruptible)(struct generic_pm_domain *genpd);
51 void (*unlock)(struct generic_pm_domain *genpd);
52};
53
54static void genpd_lock_mtx(struct generic_pm_domain *genpd)
55{
56 mutex_lock(&genpd->mlock);
57}
58
59static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
60 int depth)
61{
62 mutex_lock_nested(&genpd->mlock, depth);
63}
64
65static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
66{
67 return mutex_lock_interruptible(&genpd->mlock);
68}
69
70static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
71{
72 return mutex_unlock(&genpd->mlock);
73}
74
75static const struct genpd_lock_ops genpd_mtx_ops = {
76 .lock = genpd_lock_mtx,
77 .lock_nested = genpd_lock_nested_mtx,
78 .lock_interruptible = genpd_lock_interruptible_mtx,
79 .unlock = genpd_unlock_mtx,
80};
81
82static void genpd_lock_spin(struct generic_pm_domain *genpd)
83 __acquires(&genpd->slock)
84{
85 unsigned long flags;
86
87 spin_lock_irqsave(&genpd->slock, flags);
88 genpd->lock_flags = flags;
89}
90
91static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
92 int depth)
93 __acquires(&genpd->slock)
94{
95 unsigned long flags;
96
97 spin_lock_irqsave_nested(&genpd->slock, flags, depth);
98 genpd->lock_flags = flags;
99}
100
101static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
102 __acquires(&genpd->slock)
103{
104 unsigned long flags;
105
106 spin_lock_irqsave(&genpd->slock, flags);
107 genpd->lock_flags = flags;
108 return 0;
109}
110
111static void genpd_unlock_spin(struct generic_pm_domain *genpd)
112 __releases(&genpd->slock)
113{
114 spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
115}
116
117static const struct genpd_lock_ops genpd_spin_ops = {
118 .lock = genpd_lock_spin,
119 .lock_nested = genpd_lock_nested_spin,
120 .lock_interruptible = genpd_lock_interruptible_spin,
121 .unlock = genpd_unlock_spin,
122};
123
124#define genpd_lock(p) p->lock_ops->lock(p)
125#define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d)
126#define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p)
127#define genpd_unlock(p) p->lock_ops->unlock(p)
128
129#define genpd_status_on(genpd) (genpd->status == GENPD_STATE_ON)
130#define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
131#define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON)
132#define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
133#define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN)
134#define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
135
136static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
137 const struct generic_pm_domain *genpd)
138{
139 bool ret;
140
141 ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
142
143
144
145
146
147
148 if (ret && !genpd_is_always_on(genpd))
149 dev_warn_once(dev, "PM domain %s will not be powered off\n",
150 genpd->name);
151
152 return ret;
153}
154
155static int genpd_runtime_suspend(struct device *dev);
156
157
158
159
160
161
162
163static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev)
164{
165 if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
166 return NULL;
167
168
169 if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend)
170 return pd_to_genpd(dev->pm_domain);
171
172 return NULL;
173}
174
175
176
177
178
179static struct generic_pm_domain *dev_to_genpd(struct device *dev)
180{
181 if (IS_ERR_OR_NULL(dev->pm_domain))
182 return ERR_PTR(-EINVAL);
183
184 return pd_to_genpd(dev->pm_domain);
185}
186
187static int genpd_stop_dev(const struct generic_pm_domain *genpd,
188 struct device *dev)
189{
190 return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
191}
192
193static int genpd_start_dev(const struct generic_pm_domain *genpd,
194 struct device *dev)
195{
196 return GENPD_DEV_CALLBACK(genpd, int, start, dev);
197}
198
199static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
200{
201 bool ret = false;
202
203 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
204 ret = !!atomic_dec_and_test(&genpd->sd_count);
205
206 return ret;
207}
208
209static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
210{
211 atomic_inc(&genpd->sd_count);
212 smp_mb__after_atomic();
213}
214
215#ifdef CONFIG_DEBUG_FS
216static struct dentry *genpd_debugfs_dir;
217
218static void genpd_debug_add(struct generic_pm_domain *genpd);
219
220static void genpd_debug_remove(struct generic_pm_domain *genpd)
221{
222 struct dentry *d;
223
224 d = debugfs_lookup(genpd->name, genpd_debugfs_dir);
225 debugfs_remove(d);
226}
227
228static void genpd_update_accounting(struct generic_pm_domain *genpd)
229{
230 ktime_t delta, now;
231
232 now = ktime_get();
233 delta = ktime_sub(now, genpd->accounting_time);
234
235
236
237
238
239
240 if (genpd->status == GENPD_STATE_ON) {
241 int state_idx = genpd->state_idx;
242
243 genpd->states[state_idx].idle_time =
244 ktime_add(genpd->states[state_idx].idle_time, delta);
245 } else {
246 genpd->on_time = ktime_add(genpd->on_time, delta);
247 }
248
249 genpd->accounting_time = now;
250}
251#else
252static inline void genpd_debug_add(struct generic_pm_domain *genpd) {}
253static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {}
254static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
255#endif
256
257static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
258 unsigned int state)
259{
260 struct generic_pm_domain_data *pd_data;
261 struct pm_domain_data *pdd;
262 struct gpd_link *link;
263
264
265 if (state == genpd->performance_state)
266 return state;
267
268
269 if (state > genpd->performance_state)
270 return state;
271
272
273 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
274 pd_data = to_gpd_data(pdd);
275
276 if (pd_data->performance_state > state)
277 state = pd_data->performance_state;
278 }
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294 list_for_each_entry(link, &genpd->parent_links, parent_node) {
295 if (link->performance_state > state)
296 state = link->performance_state;
297 }
298
299 return state;
300}
301
302static int genpd_xlate_performance_state(struct generic_pm_domain *genpd,
303 struct generic_pm_domain *parent,
304 unsigned int pstate)
305{
306 if (!parent->set_performance_state)
307 return pstate;
308
309 return dev_pm_opp_xlate_performance_state(genpd->opp_table,
310 parent->opp_table,
311 pstate);
312}
313
314static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
315 unsigned int state, int depth)
316{
317 struct generic_pm_domain *parent;
318 struct gpd_link *link;
319 int parent_state, ret;
320
321 if (state == genpd->performance_state)
322 return 0;
323
324
325 list_for_each_entry(link, &genpd->child_links, child_node) {
326 parent = link->parent;
327
328
329 ret = genpd_xlate_performance_state(genpd, parent, state);
330 if (unlikely(ret < 0))
331 goto err;
332
333 parent_state = ret;
334
335 genpd_lock_nested(parent, depth + 1);
336
337 link->prev_performance_state = link->performance_state;
338 link->performance_state = parent_state;
339 parent_state = _genpd_reeval_performance_state(parent,
340 parent_state);
341 ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
342 if (ret)
343 link->performance_state = link->prev_performance_state;
344
345 genpd_unlock(parent);
346
347 if (ret)
348 goto err;
349 }
350
351 if (genpd->set_performance_state) {
352 ret = genpd->set_performance_state(genpd, state);
353 if (ret)
354 goto err;
355 }
356
357 genpd->performance_state = state;
358 return 0;
359
360err:
361
362 list_for_each_entry_continue_reverse(link, &genpd->child_links,
363 child_node) {
364 parent = link->parent;
365
366 genpd_lock_nested(parent, depth + 1);
367
368 parent_state = link->prev_performance_state;
369 link->performance_state = parent_state;
370
371 parent_state = _genpd_reeval_performance_state(parent,
372 parent_state);
373 if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
374 pr_err("%s: Failed to roll back to %d performance state\n",
375 parent->name, parent_state);
376 }
377
378 genpd_unlock(parent);
379 }
380
381 return ret;
382}
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
400{
401 struct generic_pm_domain *genpd;
402 struct generic_pm_domain_data *gpd_data;
403 unsigned int prev;
404 int ret;
405
406 genpd = dev_to_genpd(dev);
407 if (IS_ERR(genpd))
408 return -ENODEV;
409
410 if (WARN_ON(!dev->power.subsys_data ||
411 !dev->power.subsys_data->domain_data))
412 return -EINVAL;
413
414 genpd_lock(genpd);
415
416 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
417 prev = gpd_data->performance_state;
418 gpd_data->performance_state = state;
419
420 state = _genpd_reeval_performance_state(genpd, state);
421 ret = _genpd_set_performance_state(genpd, state, 0);
422 if (ret)
423 gpd_data->performance_state = prev;
424
425 genpd_unlock(genpd);
426
427 return ret;
428}
429EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next)
447{
448 struct generic_pm_domain_data *gpd_data;
449 struct generic_pm_domain *genpd;
450
451 genpd = dev_to_genpd_safe(dev);
452 if (!genpd)
453 return;
454
455 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
456 gpd_data->next_wakeup = next;
457}
458EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup);
459
460static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
461{
462 unsigned int state_idx = genpd->state_idx;
463 ktime_t time_start;
464 s64 elapsed_ns;
465 int ret, nr_calls = 0;
466
467
468 ret = __raw_notifier_call_chain(&genpd->power_notifiers,
469 GENPD_NOTIFY_PRE_ON, NULL, -1,
470 &nr_calls);
471 ret = notifier_to_errno(ret);
472 if (ret)
473 goto err;
474
475 if (!genpd->power_on)
476 goto out;
477
478 if (!timed) {
479 ret = genpd->power_on(genpd);
480 if (ret)
481 goto err;
482
483 goto out;
484 }
485
486 time_start = ktime_get();
487 ret = genpd->power_on(genpd);
488 if (ret)
489 goto err;
490
491 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
492 if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
493 goto out;
494
495 genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
496 genpd->max_off_time_changed = true;
497 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
498 genpd->name, "on", elapsed_ns);
499
500out:
501 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
502 return 0;
503err:
504 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
505 NULL);
506 return ret;
507}
508
509static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
510{
511 unsigned int state_idx = genpd->state_idx;
512 ktime_t time_start;
513 s64 elapsed_ns;
514 int ret, nr_calls = 0;
515
516
517 ret = __raw_notifier_call_chain(&genpd->power_notifiers,
518 GENPD_NOTIFY_PRE_OFF, NULL, -1,
519 &nr_calls);
520 ret = notifier_to_errno(ret);
521 if (ret)
522 goto busy;
523
524 if (!genpd->power_off)
525 goto out;
526
527 if (!timed) {
528 ret = genpd->power_off(genpd);
529 if (ret)
530 goto busy;
531
532 goto out;
533 }
534
535 time_start = ktime_get();
536 ret = genpd->power_off(genpd);
537 if (ret)
538 goto busy;
539
540 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
541 if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
542 goto out;
543
544 genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
545 genpd->max_off_time_changed = true;
546 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
547 genpd->name, "off", elapsed_ns);
548
549out:
550 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
551 NULL);
552 return 0;
553busy:
554 if (nr_calls)
555 __raw_notifier_call_chain(&genpd->power_notifiers,
556 GENPD_NOTIFY_ON, NULL, nr_calls - 1,
557 NULL);
558 return ret;
559}
560
561
562
563
564
565
566
567
568static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
569{
570 queue_work(pm_wq, &genpd->power_off_work);
571}
572
573
574
575
576
577
578
579
580
581
582
583
584static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
585 unsigned int depth)
586{
587 struct pm_domain_data *pdd;
588 struct gpd_link *link;
589 unsigned int not_suspended = 0;
590 int ret;
591
592
593
594
595
596
597 if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
598 return 0;
599
600
601
602
603
604
605 if (genpd_is_always_on(genpd) ||
606 genpd_is_rpm_always_on(genpd) ||
607 atomic_read(&genpd->sd_count) > 0)
608 return -EBUSY;
609
610 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
611 enum pm_qos_flags_status stat;
612
613 stat = dev_pm_qos_flags(pdd->dev, PM_QOS_FLAG_NO_POWER_OFF);
614 if (stat > PM_QOS_FLAGS_NONE)
615 return -EBUSY;
616
617
618
619
620
621 if (!pm_runtime_suspended(pdd->dev) ||
622 irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
623 not_suspended++;
624 }
625
626 if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
627 return -EBUSY;
628
629 if (genpd->gov && genpd->gov->power_down_ok) {
630 if (!genpd->gov->power_down_ok(&genpd->domain))
631 return -EAGAIN;
632 }
633
634
635 if (!genpd->gov)
636 genpd->state_idx = 0;
637
638
639 if (atomic_read(&genpd->sd_count) > 0)
640 return -EBUSY;
641
642 ret = _genpd_power_off(genpd, true);
643 if (ret) {
644 genpd->states[genpd->state_idx].rejected++;
645 return ret;
646 }
647
648 genpd->status = GENPD_STATE_OFF;
649 genpd_update_accounting(genpd);
650 genpd->states[genpd->state_idx].usage++;
651
652 list_for_each_entry(link, &genpd->child_links, child_node) {
653 genpd_sd_counter_dec(link->parent);
654 genpd_lock_nested(link->parent, depth + 1);
655 genpd_power_off(link->parent, false, depth + 1);
656 genpd_unlock(link->parent);
657 }
658
659 return 0;
660}
661
662
663
664
665
666
667
668
669
670static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
671{
672 struct gpd_link *link;
673 int ret = 0;
674
675 if (genpd_status_on(genpd))
676 return 0;
677
678
679
680
681
682
683 list_for_each_entry(link, &genpd->child_links, child_node) {
684 struct generic_pm_domain *parent = link->parent;
685
686 genpd_sd_counter_inc(parent);
687
688 genpd_lock_nested(parent, depth + 1);
689 ret = genpd_power_on(parent, depth + 1);
690 genpd_unlock(parent);
691
692 if (ret) {
693 genpd_sd_counter_dec(parent);
694 goto err;
695 }
696 }
697
698 ret = _genpd_power_on(genpd, true);
699 if (ret)
700 goto err;
701
702 genpd->status = GENPD_STATE_ON;
703 genpd_update_accounting(genpd);
704
705 return 0;
706
707 err:
708 list_for_each_entry_continue_reverse(link,
709 &genpd->child_links,
710 child_node) {
711 genpd_sd_counter_dec(link->parent);
712 genpd_lock_nested(link->parent, depth + 1);
713 genpd_power_off(link->parent, false, depth + 1);
714 genpd_unlock(link->parent);
715 }
716
717 return ret;
718}
719
720static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
721 unsigned long val, void *ptr)
722{
723 struct generic_pm_domain_data *gpd_data;
724 struct device *dev;
725
726 gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
727 dev = gpd_data->base.dev;
728
729 for (;;) {
730 struct generic_pm_domain *genpd;
731 struct pm_domain_data *pdd;
732
733 spin_lock_irq(&dev->power.lock);
734
735 pdd = dev->power.subsys_data ?
736 dev->power.subsys_data->domain_data : NULL;
737 if (pdd) {
738 to_gpd_data(pdd)->td.constraint_changed = true;
739 genpd = dev_to_genpd(dev);
740 } else {
741 genpd = ERR_PTR(-ENODATA);
742 }
743
744 spin_unlock_irq(&dev->power.lock);
745
746 if (!IS_ERR(genpd)) {
747 genpd_lock(genpd);
748 genpd->max_off_time_changed = true;
749 genpd_unlock(genpd);
750 }
751
752 dev = dev->parent;
753 if (!dev || dev->power.ignore_children)
754 break;
755 }
756
757 return NOTIFY_DONE;
758}
759
760
761
762
763
764static void genpd_power_off_work_fn(struct work_struct *work)
765{
766 struct generic_pm_domain *genpd;
767
768 genpd = container_of(work, struct generic_pm_domain, power_off_work);
769
770 genpd_lock(genpd);
771 genpd_power_off(genpd, false, 0);
772 genpd_unlock(genpd);
773}
774
775
776
777
778
779static int __genpd_runtime_suspend(struct device *dev)
780{
781 int (*cb)(struct device *__dev);
782
783 if (dev->type && dev->type->pm)
784 cb = dev->type->pm->runtime_suspend;
785 else if (dev->class && dev->class->pm)
786 cb = dev->class->pm->runtime_suspend;
787 else if (dev->bus && dev->bus->pm)
788 cb = dev->bus->pm->runtime_suspend;
789 else
790 cb = NULL;
791
792 if (!cb && dev->driver && dev->driver->pm)
793 cb = dev->driver->pm->runtime_suspend;
794
795 return cb ? cb(dev) : 0;
796}
797
798
799
800
801
802static int __genpd_runtime_resume(struct device *dev)
803{
804 int (*cb)(struct device *__dev);
805
806 if (dev->type && dev->type->pm)
807 cb = dev->type->pm->runtime_resume;
808 else if (dev->class && dev->class->pm)
809 cb = dev->class->pm->runtime_resume;
810 else if (dev->bus && dev->bus->pm)
811 cb = dev->bus->pm->runtime_resume;
812 else
813 cb = NULL;
814
815 if (!cb && dev->driver && dev->driver->pm)
816 cb = dev->driver->pm->runtime_resume;
817
818 return cb ? cb(dev) : 0;
819}
820
821
822
823
824
825
826
827
828
829static int genpd_runtime_suspend(struct device *dev)
830{
831 struct generic_pm_domain *genpd;
832 bool (*suspend_ok)(struct device *__dev);
833 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
834 bool runtime_pm = pm_runtime_enabled(dev);
835 ktime_t time_start;
836 s64 elapsed_ns;
837 int ret;
838
839 dev_dbg(dev, "%s()\n", __func__);
840
841 genpd = dev_to_genpd(dev);
842 if (IS_ERR(genpd))
843 return -EINVAL;
844
845
846
847
848
849
850
851 suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
852 if (runtime_pm && suspend_ok && !suspend_ok(dev))
853 return -EBUSY;
854
855
856 time_start = 0;
857 if (runtime_pm)
858 time_start = ktime_get();
859
860 ret = __genpd_runtime_suspend(dev);
861 if (ret)
862 return ret;
863
864 ret = genpd_stop_dev(genpd, dev);
865 if (ret) {
866 __genpd_runtime_resume(dev);
867 return ret;
868 }
869
870
871 if (runtime_pm) {
872 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
873 if (elapsed_ns > td->suspend_latency_ns) {
874 td->suspend_latency_ns = elapsed_ns;
875 dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
876 elapsed_ns);
877 genpd->max_off_time_changed = true;
878 td->constraint_changed = true;
879 }
880 }
881
882
883
884
885
886 if (irq_safe_dev_in_no_sleep_domain(dev, genpd))
887 return 0;
888
889 genpd_lock(genpd);
890 genpd_power_off(genpd, true, 0);
891 genpd_unlock(genpd);
892
893 return 0;
894}
895
896
897
898
899
900
901
902
903
904static int genpd_runtime_resume(struct device *dev)
905{
906 struct generic_pm_domain *genpd;
907 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
908 bool runtime_pm = pm_runtime_enabled(dev);
909 ktime_t time_start;
910 s64 elapsed_ns;
911 int ret;
912 bool timed = true;
913
914 dev_dbg(dev, "%s()\n", __func__);
915
916 genpd = dev_to_genpd(dev);
917 if (IS_ERR(genpd))
918 return -EINVAL;
919
920
921
922
923
924 if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) {
925 timed = false;
926 goto out;
927 }
928
929 genpd_lock(genpd);
930 ret = genpd_power_on(genpd, 0);
931 genpd_unlock(genpd);
932
933 if (ret)
934 return ret;
935
936 out:
937
938 time_start = 0;
939 if (timed && runtime_pm)
940 time_start = ktime_get();
941
942 ret = genpd_start_dev(genpd, dev);
943 if (ret)
944 goto err_poweroff;
945
946 ret = __genpd_runtime_resume(dev);
947 if (ret)
948 goto err_stop;
949
950
951 if (timed && runtime_pm) {
952 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
953 if (elapsed_ns > td->resume_latency_ns) {
954 td->resume_latency_ns = elapsed_ns;
955 dev_dbg(dev, "resume latency exceeded, %lld ns\n",
956 elapsed_ns);
957 genpd->max_off_time_changed = true;
958 td->constraint_changed = true;
959 }
960 }
961
962 return 0;
963
964err_stop:
965 genpd_stop_dev(genpd, dev);
966err_poweroff:
967 if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) {
968 genpd_lock(genpd);
969 genpd_power_off(genpd, true, 0);
970 genpd_unlock(genpd);
971 }
972
973 return ret;
974}
975
976static bool pd_ignore_unused;
977static int __init pd_ignore_unused_setup(char *__unused)
978{
979 pd_ignore_unused = true;
980 return 1;
981}
982__setup("pd_ignore_unused", pd_ignore_unused_setup);
983
984
985
986
987static int __init genpd_power_off_unused(void)
988{
989 struct generic_pm_domain *genpd;
990
991 if (pd_ignore_unused) {
992 pr_warn("genpd: Not disabling unused power domains\n");
993 return 0;
994 }
995
996 mutex_lock(&gpd_list_lock);
997
998 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
999 genpd_queue_power_off_work(genpd);
1000
1001 mutex_unlock(&gpd_list_lock);
1002
1003 return 0;
1004}
1005late_initcall(genpd_power_off_unused);
1006
1007#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_GENERIC_DOMAINS_OF)
1008
1009static bool genpd_present(const struct generic_pm_domain *genpd)
1010{
1011 const struct generic_pm_domain *gpd;
1012
1013 if (IS_ERR_OR_NULL(genpd))
1014 return false;
1015
1016 list_for_each_entry(gpd, &gpd_list, gpd_list_node)
1017 if (gpd == genpd)
1018 return true;
1019
1020 return false;
1021}
1022
1023#endif
1024
1025#ifdef CONFIG_PM_SLEEP
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
1041 unsigned int depth)
1042{
1043 struct gpd_link *link;
1044
1045 if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
1046 return;
1047
1048 if (genpd->suspended_count != genpd->device_count
1049 || atomic_read(&genpd->sd_count) > 0)
1050 return;
1051
1052
1053 genpd->state_idx = genpd->state_count - 1;
1054 if (_genpd_power_off(genpd, false))
1055 return;
1056
1057 genpd->status = GENPD_STATE_OFF;
1058
1059 list_for_each_entry(link, &genpd->child_links, child_node) {
1060 genpd_sd_counter_dec(link->parent);
1061
1062 if (use_lock)
1063 genpd_lock_nested(link->parent, depth + 1);
1064
1065 genpd_sync_power_off(link->parent, use_lock, depth + 1);
1066
1067 if (use_lock)
1068 genpd_unlock(link->parent);
1069 }
1070}
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
1083 unsigned int depth)
1084{
1085 struct gpd_link *link;
1086
1087 if (genpd_status_on(genpd))
1088 return;
1089
1090 list_for_each_entry(link, &genpd->child_links, child_node) {
1091 genpd_sd_counter_inc(link->parent);
1092
1093 if (use_lock)
1094 genpd_lock_nested(link->parent, depth + 1);
1095
1096 genpd_sync_power_on(link->parent, use_lock, depth + 1);
1097
1098 if (use_lock)
1099 genpd_unlock(link->parent);
1100 }
1101
1102 _genpd_power_on(genpd, false);
1103 genpd->status = GENPD_STATE_ON;
1104}
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122static bool resume_needed(struct device *dev,
1123 const struct generic_pm_domain *genpd)
1124{
1125 bool active_wakeup;
1126
1127 if (!device_can_wakeup(dev))
1128 return false;
1129
1130 active_wakeup = genpd_is_active_wakeup(genpd);
1131 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
1132}
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143static int genpd_prepare(struct device *dev)
1144{
1145 struct generic_pm_domain *genpd;
1146 int ret;
1147
1148 dev_dbg(dev, "%s()\n", __func__);
1149
1150 genpd = dev_to_genpd(dev);
1151 if (IS_ERR(genpd))
1152 return -EINVAL;
1153
1154
1155
1156
1157
1158
1159 if (resume_needed(dev, genpd))
1160 pm_runtime_resume(dev);
1161
1162 genpd_lock(genpd);
1163
1164 if (genpd->prepared_count++ == 0)
1165 genpd->suspended_count = 0;
1166
1167 genpd_unlock(genpd);
1168
1169 ret = pm_generic_prepare(dev);
1170 if (ret < 0) {
1171 genpd_lock(genpd);
1172
1173 genpd->prepared_count--;
1174
1175 genpd_unlock(genpd);
1176 }
1177
1178
1179 return ret >= 0 ? 0 : ret;
1180}
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191static int genpd_finish_suspend(struct device *dev, bool poweroff)
1192{
1193 struct generic_pm_domain *genpd;
1194 int ret = 0;
1195
1196 genpd = dev_to_genpd(dev);
1197 if (IS_ERR(genpd))
1198 return -EINVAL;
1199
1200 if (poweroff)
1201 ret = pm_generic_poweroff_noirq(dev);
1202 else
1203 ret = pm_generic_suspend_noirq(dev);
1204 if (ret)
1205 return ret;
1206
1207 if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
1208 return 0;
1209
1210 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1211 !pm_runtime_status_suspended(dev)) {
1212 ret = genpd_stop_dev(genpd, dev);
1213 if (ret) {
1214 if (poweroff)
1215 pm_generic_restore_noirq(dev);
1216 else
1217 pm_generic_resume_noirq(dev);
1218 return ret;
1219 }
1220 }
1221
1222 genpd_lock(genpd);
1223 genpd->suspended_count++;
1224 genpd_sync_power_off(genpd, true, 0);
1225 genpd_unlock(genpd);
1226
1227 return 0;
1228}
1229
1230
1231
1232
1233
1234
1235
1236
1237static int genpd_suspend_noirq(struct device *dev)
1238{
1239 dev_dbg(dev, "%s()\n", __func__);
1240
1241 return genpd_finish_suspend(dev, false);
1242}
1243
1244
1245
1246
1247
1248
1249
1250static int genpd_resume_noirq(struct device *dev)
1251{
1252 struct generic_pm_domain *genpd;
1253 int ret;
1254
1255 dev_dbg(dev, "%s()\n", __func__);
1256
1257 genpd = dev_to_genpd(dev);
1258 if (IS_ERR(genpd))
1259 return -EINVAL;
1260
1261 if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
1262 return pm_generic_resume_noirq(dev);
1263
1264 genpd_lock(genpd);
1265 genpd_sync_power_on(genpd, true, 0);
1266 genpd->suspended_count--;
1267 genpd_unlock(genpd);
1268
1269 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1270 !pm_runtime_status_suspended(dev)) {
1271 ret = genpd_start_dev(genpd, dev);
1272 if (ret)
1273 return ret;
1274 }
1275
1276 return pm_generic_resume_noirq(dev);
1277}
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288static int genpd_freeze_noirq(struct device *dev)
1289{
1290 const struct generic_pm_domain *genpd;
1291 int ret = 0;
1292
1293 dev_dbg(dev, "%s()\n", __func__);
1294
1295 genpd = dev_to_genpd(dev);
1296 if (IS_ERR(genpd))
1297 return -EINVAL;
1298
1299 ret = pm_generic_freeze_noirq(dev);
1300 if (ret)
1301 return ret;
1302
1303 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1304 !pm_runtime_status_suspended(dev))
1305 ret = genpd_stop_dev(genpd, dev);
1306
1307 return ret;
1308}
1309
1310
1311
1312
1313
1314
1315
1316
1317static int genpd_thaw_noirq(struct device *dev)
1318{
1319 const struct generic_pm_domain *genpd;
1320 int ret = 0;
1321
1322 dev_dbg(dev, "%s()\n", __func__);
1323
1324 genpd = dev_to_genpd(dev);
1325 if (IS_ERR(genpd))
1326 return -EINVAL;
1327
1328 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1329 !pm_runtime_status_suspended(dev)) {
1330 ret = genpd_start_dev(genpd, dev);
1331 if (ret)
1332 return ret;
1333 }
1334
1335 return pm_generic_thaw_noirq(dev);
1336}
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346static int genpd_poweroff_noirq(struct device *dev)
1347{
1348 dev_dbg(dev, "%s()\n", __func__);
1349
1350 return genpd_finish_suspend(dev, true);
1351}
1352
1353
1354
1355
1356
1357
1358
1359
1360static int genpd_restore_noirq(struct device *dev)
1361{
1362 struct generic_pm_domain *genpd;
1363 int ret = 0;
1364
1365 dev_dbg(dev, "%s()\n", __func__);
1366
1367 genpd = dev_to_genpd(dev);
1368 if (IS_ERR(genpd))
1369 return -EINVAL;
1370
1371
1372
1373
1374
1375 genpd_lock(genpd);
1376 if (genpd->suspended_count++ == 0) {
1377
1378
1379
1380
1381
1382 genpd->status = GENPD_STATE_OFF;
1383 }
1384
1385 genpd_sync_power_on(genpd, true, 0);
1386 genpd_unlock(genpd);
1387
1388 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1389 !pm_runtime_status_suspended(dev)) {
1390 ret = genpd_start_dev(genpd, dev);
1391 if (ret)
1392 return ret;
1393 }
1394
1395 return pm_generic_restore_noirq(dev);
1396}
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407static void genpd_complete(struct device *dev)
1408{
1409 struct generic_pm_domain *genpd;
1410
1411 dev_dbg(dev, "%s()\n", __func__);
1412
1413 genpd = dev_to_genpd(dev);
1414 if (IS_ERR(genpd))
1415 return;
1416
1417 pm_generic_complete(dev);
1418
1419 genpd_lock(genpd);
1420
1421 genpd->prepared_count--;
1422 if (!genpd->prepared_count)
1423 genpd_queue_power_off_work(genpd);
1424
1425 genpd_unlock(genpd);
1426}
1427
1428static void genpd_switch_state(struct device *dev, bool suspend)
1429{
1430 struct generic_pm_domain *genpd;
1431 bool use_lock;
1432
1433 genpd = dev_to_genpd(dev);
1434 if (!genpd_present(genpd))
1435 return;
1436
1437 use_lock = genpd_is_irq_safe(genpd);
1438
1439 if (use_lock)
1440 genpd_lock(genpd);
1441
1442 if (suspend) {
1443 genpd->suspended_count++;
1444 genpd_sync_power_off(genpd, use_lock, 0);
1445 } else {
1446 genpd_sync_power_on(genpd, use_lock, 0);
1447 genpd->suspended_count--;
1448 }
1449
1450 if (use_lock)
1451 genpd_unlock(genpd);
1452}
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463void dev_pm_genpd_suspend(struct device *dev)
1464{
1465 genpd_switch_state(dev, true);
1466}
1467EXPORT_SYMBOL_GPL(dev_pm_genpd_suspend);
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477void dev_pm_genpd_resume(struct device *dev)
1478{
1479 genpd_switch_state(dev, false);
1480}
1481EXPORT_SYMBOL_GPL(dev_pm_genpd_resume);
1482
1483#else
1484
1485#define genpd_prepare NULL
1486#define genpd_suspend_noirq NULL
1487#define genpd_resume_noirq NULL
1488#define genpd_freeze_noirq NULL
1489#define genpd_thaw_noirq NULL
1490#define genpd_poweroff_noirq NULL
1491#define genpd_restore_noirq NULL
1492#define genpd_complete NULL
1493
1494#endif
1495
1496static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev)
1497{
1498 struct generic_pm_domain_data *gpd_data;
1499 int ret;
1500
1501 ret = dev_pm_get_subsys_data(dev);
1502 if (ret)
1503 return ERR_PTR(ret);
1504
1505 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1506 if (!gpd_data) {
1507 ret = -ENOMEM;
1508 goto err_put;
1509 }
1510
1511 gpd_data->base.dev = dev;
1512 gpd_data->td.constraint_changed = true;
1513 gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
1514 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1515 gpd_data->next_wakeup = KTIME_MAX;
1516
1517 spin_lock_irq(&dev->power.lock);
1518
1519 if (dev->power.subsys_data->domain_data) {
1520 ret = -EINVAL;
1521 goto err_free;
1522 }
1523
1524 dev->power.subsys_data->domain_data = &gpd_data->base;
1525
1526 spin_unlock_irq(&dev->power.lock);
1527
1528 return gpd_data;
1529
1530 err_free:
1531 spin_unlock_irq(&dev->power.lock);
1532 kfree(gpd_data);
1533 err_put:
1534 dev_pm_put_subsys_data(dev);
1535 return ERR_PTR(ret);
1536}
1537
1538static void genpd_free_dev_data(struct device *dev,
1539 struct generic_pm_domain_data *gpd_data)
1540{
1541 spin_lock_irq(&dev->power.lock);
1542
1543 dev->power.subsys_data->domain_data = NULL;
1544
1545 spin_unlock_irq(&dev->power.lock);
1546
1547 kfree(gpd_data);
1548 dev_pm_put_subsys_data(dev);
1549}
1550
1551static void genpd_update_cpumask(struct generic_pm_domain *genpd,
1552 int cpu, bool set, unsigned int depth)
1553{
1554 struct gpd_link *link;
1555
1556 if (!genpd_is_cpu_domain(genpd))
1557 return;
1558
1559 list_for_each_entry(link, &genpd->child_links, child_node) {
1560 struct generic_pm_domain *parent = link->parent;
1561
1562 genpd_lock_nested(parent, depth + 1);
1563 genpd_update_cpumask(parent, cpu, set, depth + 1);
1564 genpd_unlock(parent);
1565 }
1566
1567 if (set)
1568 cpumask_set_cpu(cpu, genpd->cpus);
1569 else
1570 cpumask_clear_cpu(cpu, genpd->cpus);
1571}
1572
1573static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
1574{
1575 if (cpu >= 0)
1576 genpd_update_cpumask(genpd, cpu, true, 0);
1577}
1578
1579static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
1580{
1581 if (cpu >= 0)
1582 genpd_update_cpumask(genpd, cpu, false, 0);
1583}
1584
1585static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
1586{
1587 int cpu;
1588
1589 if (!genpd_is_cpu_domain(genpd))
1590 return -1;
1591
1592 for_each_possible_cpu(cpu) {
1593 if (get_cpu_device(cpu) == dev)
1594 return cpu;
1595 }
1596
1597 return -1;
1598}
1599
1600static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1601 struct device *base_dev)
1602{
1603 struct generic_pm_domain_data *gpd_data;
1604 int ret;
1605
1606 dev_dbg(dev, "%s()\n", __func__);
1607
1608 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1609 return -EINVAL;
1610
1611 gpd_data = genpd_alloc_dev_data(dev);
1612 if (IS_ERR(gpd_data))
1613 return PTR_ERR(gpd_data);
1614
1615 gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
1616
1617 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1618 if (ret)
1619 goto out;
1620
1621 genpd_lock(genpd);
1622
1623 genpd_set_cpumask(genpd, gpd_data->cpu);
1624 dev_pm_domain_set(dev, &genpd->domain);
1625
1626 genpd->device_count++;
1627 genpd->max_off_time_changed = true;
1628
1629 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1630
1631 genpd_unlock(genpd);
1632 out:
1633 if (ret)
1634 genpd_free_dev_data(dev, gpd_data);
1635 else
1636 dev_pm_qos_add_notifier(dev, &gpd_data->nb,
1637 DEV_PM_QOS_RESUME_LATENCY);
1638
1639 return ret;
1640}
1641
1642
1643
1644
1645
1646
1647int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1648{
1649 int ret;
1650
1651 mutex_lock(&gpd_list_lock);
1652 ret = genpd_add_device(genpd, dev, dev);
1653 mutex_unlock(&gpd_list_lock);
1654
1655 return ret;
1656}
1657EXPORT_SYMBOL_GPL(pm_genpd_add_device);
1658
1659static int genpd_remove_device(struct generic_pm_domain *genpd,
1660 struct device *dev)
1661{
1662 struct generic_pm_domain_data *gpd_data;
1663 struct pm_domain_data *pdd;
1664 int ret = 0;
1665
1666 dev_dbg(dev, "%s()\n", __func__);
1667
1668 pdd = dev->power.subsys_data->domain_data;
1669 gpd_data = to_gpd_data(pdd);
1670 dev_pm_qos_remove_notifier(dev, &gpd_data->nb,
1671 DEV_PM_QOS_RESUME_LATENCY);
1672
1673 genpd_lock(genpd);
1674
1675 if (genpd->prepared_count > 0) {
1676 ret = -EAGAIN;
1677 goto out;
1678 }
1679
1680 genpd->device_count--;
1681 genpd->max_off_time_changed = true;
1682
1683 genpd_clear_cpumask(genpd, gpd_data->cpu);
1684 dev_pm_domain_set(dev, NULL);
1685
1686 list_del_init(&pdd->list_node);
1687
1688 genpd_unlock(genpd);
1689
1690 if (genpd->detach_dev)
1691 genpd->detach_dev(genpd, dev);
1692
1693 genpd_free_dev_data(dev, gpd_data);
1694
1695 return 0;
1696
1697 out:
1698 genpd_unlock(genpd);
1699 dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY);
1700
1701 return ret;
1702}
1703
1704
1705
1706
1707
1708int pm_genpd_remove_device(struct device *dev)
1709{
1710 struct generic_pm_domain *genpd = dev_to_genpd_safe(dev);
1711
1712 if (!genpd)
1713 return -EINVAL;
1714
1715 return genpd_remove_device(genpd, dev);
1716}
1717EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb)
1735{
1736 struct generic_pm_domain *genpd;
1737 struct generic_pm_domain_data *gpd_data;
1738 int ret;
1739
1740 genpd = dev_to_genpd_safe(dev);
1741 if (!genpd)
1742 return -ENODEV;
1743
1744 if (WARN_ON(!dev->power.subsys_data ||
1745 !dev->power.subsys_data->domain_data))
1746 return -EINVAL;
1747
1748 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1749 if (gpd_data->power_nb)
1750 return -EEXIST;
1751
1752 genpd_lock(genpd);
1753 ret = raw_notifier_chain_register(&genpd->power_notifiers, nb);
1754 genpd_unlock(genpd);
1755
1756 if (ret) {
1757 dev_warn(dev, "failed to add notifier for PM domain %s\n",
1758 genpd->name);
1759 return ret;
1760 }
1761
1762 gpd_data->power_nb = nb;
1763 return 0;
1764}
1765EXPORT_SYMBOL_GPL(dev_pm_genpd_add_notifier);
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780int dev_pm_genpd_remove_notifier(struct device *dev)
1781{
1782 struct generic_pm_domain *genpd;
1783 struct generic_pm_domain_data *gpd_data;
1784 int ret;
1785
1786 genpd = dev_to_genpd_safe(dev);
1787 if (!genpd)
1788 return -ENODEV;
1789
1790 if (WARN_ON(!dev->power.subsys_data ||
1791 !dev->power.subsys_data->domain_data))
1792 return -EINVAL;
1793
1794 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1795 if (!gpd_data->power_nb)
1796 return -ENODEV;
1797
1798 genpd_lock(genpd);
1799 ret = raw_notifier_chain_unregister(&genpd->power_notifiers,
1800 gpd_data->power_nb);
1801 genpd_unlock(genpd);
1802
1803 if (ret) {
1804 dev_warn(dev, "failed to remove notifier for PM domain %s\n",
1805 genpd->name);
1806 return ret;
1807 }
1808
1809 gpd_data->power_nb = NULL;
1810 return 0;
1811}
1812EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier);
1813
1814static int genpd_add_subdomain(struct generic_pm_domain *genpd,
1815 struct generic_pm_domain *subdomain)
1816{
1817 struct gpd_link *link, *itr;
1818 int ret = 0;
1819
1820 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1821 || genpd == subdomain)
1822 return -EINVAL;
1823
1824
1825
1826
1827
1828
1829 if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
1830 WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
1831 genpd->name, subdomain->name);
1832 return -EINVAL;
1833 }
1834
1835 link = kzalloc(sizeof(*link), GFP_KERNEL);
1836 if (!link)
1837 return -ENOMEM;
1838
1839 genpd_lock(subdomain);
1840 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1841
1842 if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
1843 ret = -EINVAL;
1844 goto out;
1845 }
1846
1847 list_for_each_entry(itr, &genpd->parent_links, parent_node) {
1848 if (itr->child == subdomain && itr->parent == genpd) {
1849 ret = -EINVAL;
1850 goto out;
1851 }
1852 }
1853
1854 link->parent = genpd;
1855 list_add_tail(&link->parent_node, &genpd->parent_links);
1856 link->child = subdomain;
1857 list_add_tail(&link->child_node, &subdomain->child_links);
1858 if (genpd_status_on(subdomain))
1859 genpd_sd_counter_inc(genpd);
1860
1861 out:
1862 genpd_unlock(genpd);
1863 genpd_unlock(subdomain);
1864 if (ret)
1865 kfree(link);
1866 return ret;
1867}
1868
1869
1870
1871
1872
1873
1874int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1875 struct generic_pm_domain *subdomain)
1876{
1877 int ret;
1878
1879 mutex_lock(&gpd_list_lock);
1880 ret = genpd_add_subdomain(genpd, subdomain);
1881 mutex_unlock(&gpd_list_lock);
1882
1883 return ret;
1884}
1885EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
1886
1887
1888
1889
1890
1891
1892int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1893 struct generic_pm_domain *subdomain)
1894{
1895 struct gpd_link *l, *link;
1896 int ret = -EINVAL;
1897
1898 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1899 return -EINVAL;
1900
1901 genpd_lock(subdomain);
1902 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1903
1904 if (!list_empty(&subdomain->parent_links) || subdomain->device_count) {
1905 pr_warn("%s: unable to remove subdomain %s\n",
1906 genpd->name, subdomain->name);
1907 ret = -EBUSY;
1908 goto out;
1909 }
1910
1911 list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) {
1912 if (link->child != subdomain)
1913 continue;
1914
1915 list_del(&link->parent_node);
1916 list_del(&link->child_node);
1917 kfree(link);
1918 if (genpd_status_on(subdomain))
1919 genpd_sd_counter_dec(genpd);
1920
1921 ret = 0;
1922 break;
1923 }
1924
1925out:
1926 genpd_unlock(genpd);
1927 genpd_unlock(subdomain);
1928
1929 return ret;
1930}
1931EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
1932
1933static void genpd_free_default_power_state(struct genpd_power_state *states,
1934 unsigned int state_count)
1935{
1936 kfree(states);
1937}
1938
1939static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
1940{
1941 struct genpd_power_state *state;
1942
1943 state = kzalloc(sizeof(*state), GFP_KERNEL);
1944 if (!state)
1945 return -ENOMEM;
1946
1947 genpd->states = state;
1948 genpd->state_count = 1;
1949 genpd->free_states = genpd_free_default_power_state;
1950
1951 return 0;
1952}
1953
1954static void genpd_lock_init(struct generic_pm_domain *genpd)
1955{
1956 if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
1957 spin_lock_init(&genpd->slock);
1958 genpd->lock_ops = &genpd_spin_ops;
1959 } else {
1960 mutex_init(&genpd->mlock);
1961 genpd->lock_ops = &genpd_mtx_ops;
1962 }
1963}
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973int pm_genpd_init(struct generic_pm_domain *genpd,
1974 struct dev_power_governor *gov, bool is_off)
1975{
1976 int ret;
1977
1978 if (IS_ERR_OR_NULL(genpd))
1979 return -EINVAL;
1980
1981 INIT_LIST_HEAD(&genpd->parent_links);
1982 INIT_LIST_HEAD(&genpd->child_links);
1983 INIT_LIST_HEAD(&genpd->dev_list);
1984 RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers);
1985 genpd_lock_init(genpd);
1986 genpd->gov = gov;
1987 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1988 atomic_set(&genpd->sd_count, 0);
1989 genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON;
1990 genpd->device_count = 0;
1991 genpd->max_off_time_ns = -1;
1992 genpd->max_off_time_changed = true;
1993 genpd->provider = NULL;
1994 genpd->has_provider = false;
1995 genpd->accounting_time = ktime_get();
1996 genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
1997 genpd->domain.ops.runtime_resume = genpd_runtime_resume;
1998 genpd->domain.ops.prepare = genpd_prepare;
1999 genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
2000 genpd->domain.ops.resume_noirq = genpd_resume_noirq;
2001 genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
2002 genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
2003 genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
2004 genpd->domain.ops.restore_noirq = genpd_restore_noirq;
2005 genpd->domain.ops.complete = genpd_complete;
2006
2007 if (genpd->flags & GENPD_FLAG_PM_CLK) {
2008 genpd->dev_ops.stop = pm_clk_suspend;
2009 genpd->dev_ops.start = pm_clk_resume;
2010 }
2011
2012
2013 if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
2014 !genpd_status_on(genpd))
2015 return -EINVAL;
2016
2017 if (genpd_is_cpu_domain(genpd) &&
2018 !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
2019 return -ENOMEM;
2020
2021
2022 if (genpd->state_count == 0) {
2023 ret = genpd_set_default_power_state(genpd);
2024 if (ret) {
2025 if (genpd_is_cpu_domain(genpd))
2026 free_cpumask_var(genpd->cpus);
2027 return ret;
2028 }
2029 } else if (!gov && genpd->state_count > 1) {
2030 pr_warn("%s : no governor for states\n", genpd->name);
2031 }
2032
2033 device_initialize(&genpd->dev);
2034 dev_set_name(&genpd->dev, "%s", genpd->name);
2035
2036 mutex_lock(&gpd_list_lock);
2037 list_add(&genpd->gpd_list_node, &gpd_list);
2038 genpd_debug_add(genpd);
2039 mutex_unlock(&gpd_list_lock);
2040
2041 return 0;
2042}
2043EXPORT_SYMBOL_GPL(pm_genpd_init);
2044
2045static int genpd_remove(struct generic_pm_domain *genpd)
2046{
2047 struct gpd_link *l, *link;
2048
2049 if (IS_ERR_OR_NULL(genpd))
2050 return -EINVAL;
2051
2052 genpd_lock(genpd);
2053
2054 if (genpd->has_provider) {
2055 genpd_unlock(genpd);
2056 pr_err("Provider present, unable to remove %s\n", genpd->name);
2057 return -EBUSY;
2058 }
2059
2060 if (!list_empty(&genpd->parent_links) || genpd->device_count) {
2061 genpd_unlock(genpd);
2062 pr_err("%s: unable to remove %s\n", __func__, genpd->name);
2063 return -EBUSY;
2064 }
2065
2066 list_for_each_entry_safe(link, l, &genpd->child_links, child_node) {
2067 list_del(&link->parent_node);
2068 list_del(&link->child_node);
2069 kfree(link);
2070 }
2071
2072 genpd_debug_remove(genpd);
2073 list_del(&genpd->gpd_list_node);
2074 genpd_unlock(genpd);
2075 cancel_work_sync(&genpd->power_off_work);
2076 if (genpd_is_cpu_domain(genpd))
2077 free_cpumask_var(genpd->cpus);
2078 if (genpd->free_states)
2079 genpd->free_states(genpd->states, genpd->state_count);
2080
2081 pr_debug("%s: removed %s\n", __func__, genpd->name);
2082
2083 return 0;
2084}
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099int pm_genpd_remove(struct generic_pm_domain *genpd)
2100{
2101 int ret;
2102
2103 mutex_lock(&gpd_list_lock);
2104 ret = genpd_remove(genpd);
2105 mutex_unlock(&gpd_list_lock);
2106
2107 return ret;
2108}
2109EXPORT_SYMBOL_GPL(pm_genpd_remove);
2110
2111#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137struct of_genpd_provider {
2138 struct list_head link;
2139 struct device_node *node;
2140 genpd_xlate_t xlate;
2141 void *data;
2142};
2143
2144
2145static LIST_HEAD(of_genpd_providers);
2146
2147static DEFINE_MUTEX(of_genpd_mutex);
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158static struct generic_pm_domain *genpd_xlate_simple(
2159 struct of_phandle_args *genpdspec,
2160 void *data)
2161{
2162 return data;
2163}
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175static struct generic_pm_domain *genpd_xlate_onecell(
2176 struct of_phandle_args *genpdspec,
2177 void *data)
2178{
2179 struct genpd_onecell_data *genpd_data = data;
2180 unsigned int idx = genpdspec->args[0];
2181
2182 if (genpdspec->args_count != 1)
2183 return ERR_PTR(-EINVAL);
2184
2185 if (idx >= genpd_data->num_domains) {
2186 pr_err("%s: invalid domain index %u\n", __func__, idx);
2187 return ERR_PTR(-EINVAL);
2188 }
2189
2190 if (!genpd_data->domains[idx])
2191 return ERR_PTR(-ENOENT);
2192
2193 return genpd_data->domains[idx];
2194}
2195
2196
2197
2198
2199
2200
2201
2202static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
2203 void *data)
2204{
2205 struct of_genpd_provider *cp;
2206
2207 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2208 if (!cp)
2209 return -ENOMEM;
2210
2211 cp->node = of_node_get(np);
2212 cp->data = data;
2213 cp->xlate = xlate;
2214 fwnode_dev_initialized(&np->fwnode, true);
2215
2216 mutex_lock(&of_genpd_mutex);
2217 list_add(&cp->link, &of_genpd_providers);
2218 mutex_unlock(&of_genpd_mutex);
2219 pr_debug("Added domain provider from %pOF\n", np);
2220
2221 return 0;
2222}
2223
2224
2225
2226
2227
2228
2229int of_genpd_add_provider_simple(struct device_node *np,
2230 struct generic_pm_domain *genpd)
2231{
2232 int ret = -EINVAL;
2233
2234 if (!np || !genpd)
2235 return -EINVAL;
2236
2237 mutex_lock(&gpd_list_lock);
2238
2239 if (!genpd_present(genpd))
2240 goto unlock;
2241
2242 genpd->dev.of_node = np;
2243
2244
2245 if (genpd->set_performance_state) {
2246 ret = dev_pm_opp_of_add_table(&genpd->dev);
2247 if (ret) {
2248 dev_err(&genpd->dev, "Failed to add OPP table: %d\n",
2249 ret);
2250 goto unlock;
2251 }
2252
2253
2254
2255
2256
2257 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2258 WARN_ON(!genpd->opp_table);
2259 }
2260
2261 ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
2262 if (ret) {
2263 if (genpd->set_performance_state) {
2264 dev_pm_opp_put_opp_table(genpd->opp_table);
2265 dev_pm_opp_of_remove_table(&genpd->dev);
2266 }
2267
2268 goto unlock;
2269 }
2270
2271 genpd->provider = &np->fwnode;
2272 genpd->has_provider = true;
2273
2274unlock:
2275 mutex_unlock(&gpd_list_lock);
2276
2277 return ret;
2278}
2279EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
2280
2281
2282
2283
2284
2285
2286int of_genpd_add_provider_onecell(struct device_node *np,
2287 struct genpd_onecell_data *data)
2288{
2289 struct generic_pm_domain *genpd;
2290 unsigned int i;
2291 int ret = -EINVAL;
2292
2293 if (!np || !data)
2294 return -EINVAL;
2295
2296 mutex_lock(&gpd_list_lock);
2297
2298 if (!data->xlate)
2299 data->xlate = genpd_xlate_onecell;
2300
2301 for (i = 0; i < data->num_domains; i++) {
2302 genpd = data->domains[i];
2303
2304 if (!genpd)
2305 continue;
2306 if (!genpd_present(genpd))
2307 goto error;
2308
2309 genpd->dev.of_node = np;
2310
2311
2312 if (genpd->set_performance_state) {
2313 ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
2314 if (ret) {
2315 dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n",
2316 i, ret);
2317 goto error;
2318 }
2319
2320
2321
2322
2323
2324 genpd->opp_table = dev_pm_opp_get_opp_table_indexed(&genpd->dev, i);
2325 WARN_ON(!genpd->opp_table);
2326 }
2327
2328 genpd->provider = &np->fwnode;
2329 genpd->has_provider = true;
2330 }
2331
2332 ret = genpd_add_provider(np, data->xlate, data);
2333 if (ret < 0)
2334 goto error;
2335
2336 mutex_unlock(&gpd_list_lock);
2337
2338 return 0;
2339
2340error:
2341 while (i--) {
2342 genpd = data->domains[i];
2343
2344 if (!genpd)
2345 continue;
2346
2347 genpd->provider = NULL;
2348 genpd->has_provider = false;
2349
2350 if (genpd->set_performance_state) {
2351 dev_pm_opp_put_opp_table(genpd->opp_table);
2352 dev_pm_opp_of_remove_table(&genpd->dev);
2353 }
2354 }
2355
2356 mutex_unlock(&gpd_list_lock);
2357
2358 return ret;
2359}
2360EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
2361
2362
2363
2364
2365
2366void of_genpd_del_provider(struct device_node *np)
2367{
2368 struct of_genpd_provider *cp, *tmp;
2369 struct generic_pm_domain *gpd;
2370
2371 mutex_lock(&gpd_list_lock);
2372 mutex_lock(&of_genpd_mutex);
2373 list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
2374 if (cp->node == np) {
2375
2376
2377
2378
2379
2380 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2381 if (gpd->provider == &np->fwnode) {
2382 gpd->has_provider = false;
2383
2384 if (!gpd->set_performance_state)
2385 continue;
2386
2387 dev_pm_opp_put_opp_table(gpd->opp_table);
2388 dev_pm_opp_of_remove_table(&gpd->dev);
2389 }
2390 }
2391
2392 fwnode_dev_initialized(&cp->node->fwnode, false);
2393 list_del(&cp->link);
2394 of_node_put(cp->node);
2395 kfree(cp);
2396 break;
2397 }
2398 }
2399 mutex_unlock(&of_genpd_mutex);
2400 mutex_unlock(&gpd_list_lock);
2401}
2402EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415static struct generic_pm_domain *genpd_get_from_provider(
2416 struct of_phandle_args *genpdspec)
2417{
2418 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2419 struct of_genpd_provider *provider;
2420
2421 if (!genpdspec)
2422 return ERR_PTR(-EINVAL);
2423
2424 mutex_lock(&of_genpd_mutex);
2425
2426
2427 list_for_each_entry(provider, &of_genpd_providers, link) {
2428 if (provider->node == genpdspec->np)
2429 genpd = provider->xlate(genpdspec, provider->data);
2430 if (!IS_ERR(genpd))
2431 break;
2432 }
2433
2434 mutex_unlock(&of_genpd_mutex);
2435
2436 return genpd;
2437}
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
2448{
2449 struct generic_pm_domain *genpd;
2450 int ret;
2451
2452 mutex_lock(&gpd_list_lock);
2453
2454 genpd = genpd_get_from_provider(genpdspec);
2455 if (IS_ERR(genpd)) {
2456 ret = PTR_ERR(genpd);
2457 goto out;
2458 }
2459
2460 ret = genpd_add_device(genpd, dev, dev);
2461
2462out:
2463 mutex_unlock(&gpd_list_lock);
2464
2465 return ret;
2466}
2467EXPORT_SYMBOL_GPL(of_genpd_add_device);
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
2479 struct of_phandle_args *subdomain_spec)
2480{
2481 struct generic_pm_domain *parent, *subdomain;
2482 int ret;
2483
2484 mutex_lock(&gpd_list_lock);
2485
2486 parent = genpd_get_from_provider(parent_spec);
2487 if (IS_ERR(parent)) {
2488 ret = PTR_ERR(parent);
2489 goto out;
2490 }
2491
2492 subdomain = genpd_get_from_provider(subdomain_spec);
2493 if (IS_ERR(subdomain)) {
2494 ret = PTR_ERR(subdomain);
2495 goto out;
2496 }
2497
2498 ret = genpd_add_subdomain(parent, subdomain);
2499
2500out:
2501 mutex_unlock(&gpd_list_lock);
2502
2503 return ret == -ENOENT ? -EPROBE_DEFER : ret;
2504}
2505EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
2521{
2522 struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
2523 int ret;
2524
2525 if (IS_ERR_OR_NULL(np))
2526 return ERR_PTR(-EINVAL);
2527
2528 mutex_lock(&gpd_list_lock);
2529 list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
2530 if (gpd->provider == &np->fwnode) {
2531 ret = genpd_remove(gpd);
2532 genpd = ret ? ERR_PTR(ret) : gpd;
2533 break;
2534 }
2535 }
2536 mutex_unlock(&gpd_list_lock);
2537
2538 return genpd;
2539}
2540EXPORT_SYMBOL_GPL(of_genpd_remove_last);
2541
2542static void genpd_release_dev(struct device *dev)
2543{
2544 of_node_put(dev->of_node);
2545 kfree(dev);
2546}
2547
2548static struct bus_type genpd_bus_type = {
2549 .name = "genpd",
2550};
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2561{
2562 struct generic_pm_domain *pd;
2563 unsigned int i;
2564 int ret = 0;
2565
2566 pd = dev_to_genpd(dev);
2567 if (IS_ERR(pd))
2568 return;
2569
2570 dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2571
2572 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2573 ret = genpd_remove_device(pd, dev);
2574 if (ret != -EAGAIN)
2575 break;
2576
2577 mdelay(i);
2578 cond_resched();
2579 }
2580
2581 if (ret < 0) {
2582 dev_err(dev, "failed to remove from PM domain %s: %d",
2583 pd->name, ret);
2584 return;
2585 }
2586
2587
2588 genpd_queue_power_off_work(pd);
2589
2590
2591 if (dev->bus == &genpd_bus_type)
2592 device_unregister(dev);
2593}
2594
2595static void genpd_dev_pm_sync(struct device *dev)
2596{
2597 struct generic_pm_domain *pd;
2598
2599 pd = dev_to_genpd(dev);
2600 if (IS_ERR(pd))
2601 return;
2602
2603 genpd_queue_power_off_work(pd);
2604}
2605
2606static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
2607 unsigned int index, bool power_on)
2608{
2609 struct of_phandle_args pd_args;
2610 struct generic_pm_domain *pd;
2611 int ret;
2612
2613 ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
2614 "#power-domain-cells", index, &pd_args);
2615 if (ret < 0)
2616 return ret;
2617
2618 mutex_lock(&gpd_list_lock);
2619 pd = genpd_get_from_provider(&pd_args);
2620 of_node_put(pd_args.np);
2621 if (IS_ERR(pd)) {
2622 mutex_unlock(&gpd_list_lock);
2623 dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2624 __func__, PTR_ERR(pd));
2625 return driver_deferred_probe_check_state(base_dev);
2626 }
2627
2628 dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2629
2630 ret = genpd_add_device(pd, dev, base_dev);
2631 mutex_unlock(&gpd_list_lock);
2632
2633 if (ret < 0) {
2634 if (ret != -EPROBE_DEFER)
2635 dev_err(dev, "failed to add to PM domain %s: %d",
2636 pd->name, ret);
2637 return ret;
2638 }
2639
2640 dev->pm_domain->detach = genpd_dev_pm_detach;
2641 dev->pm_domain->sync = genpd_dev_pm_sync;
2642
2643 if (power_on) {
2644 genpd_lock(pd);
2645 ret = genpd_power_on(pd, 0);
2646 genpd_unlock(pd);
2647 }
2648
2649 if (ret)
2650 genpd_remove_device(pd, dev);
2651
2652 return ret ? -EPROBE_DEFER : 1;
2653}
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668int genpd_dev_pm_attach(struct device *dev)
2669{
2670 if (!dev->of_node)
2671 return 0;
2672
2673
2674
2675
2676
2677 if (of_count_phandle_with_args(dev->of_node, "power-domains",
2678 "#power-domain-cells") != 1)
2679 return 0;
2680
2681 return __genpd_dev_pm_attach(dev, dev, 0, true);
2682}
2683EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701struct device *genpd_dev_pm_attach_by_id(struct device *dev,
2702 unsigned int index)
2703{
2704 struct device *virt_dev;
2705 int num_domains;
2706 int ret;
2707
2708 if (!dev->of_node)
2709 return NULL;
2710
2711
2712 num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
2713 "#power-domain-cells");
2714 if (index >= num_domains)
2715 return NULL;
2716
2717
2718 virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
2719 if (!virt_dev)
2720 return ERR_PTR(-ENOMEM);
2721
2722 dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
2723 virt_dev->bus = &genpd_bus_type;
2724 virt_dev->release = genpd_release_dev;
2725 virt_dev->of_node = of_node_get(dev->of_node);
2726
2727 ret = device_register(virt_dev);
2728 if (ret) {
2729 put_device(virt_dev);
2730 return ERR_PTR(ret);
2731 }
2732
2733
2734 ret = __genpd_dev_pm_attach(virt_dev, dev, index, false);
2735 if (ret < 1) {
2736 device_unregister(virt_dev);
2737 return ret ? ERR_PTR(ret) : NULL;
2738 }
2739
2740 pm_runtime_enable(virt_dev);
2741 genpd_queue_power_off_work(dev_to_genpd(virt_dev));
2742
2743 return virt_dev;
2744}
2745EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
2757{
2758 int index;
2759
2760 if (!dev->of_node)
2761 return NULL;
2762
2763 index = of_property_match_string(dev->of_node, "power-domain-names",
2764 name);
2765 if (index < 0)
2766 return NULL;
2767
2768 return genpd_dev_pm_attach_by_id(dev, index);
2769}
2770
2771static const struct of_device_id idle_state_match[] = {
2772 { .compatible = "domain-idle-state", },
2773 { }
2774};
2775
2776static int genpd_parse_state(struct genpd_power_state *genpd_state,
2777 struct device_node *state_node)
2778{
2779 int err;
2780 u32 residency;
2781 u32 entry_latency, exit_latency;
2782
2783 err = of_property_read_u32(state_node, "entry-latency-us",
2784 &entry_latency);
2785 if (err) {
2786 pr_debug(" * %pOF missing entry-latency-us property\n",
2787 state_node);
2788 return -EINVAL;
2789 }
2790
2791 err = of_property_read_u32(state_node, "exit-latency-us",
2792 &exit_latency);
2793 if (err) {
2794 pr_debug(" * %pOF missing exit-latency-us property\n",
2795 state_node);
2796 return -EINVAL;
2797 }
2798
2799 err = of_property_read_u32(state_node, "min-residency-us", &residency);
2800 if (!err)
2801 genpd_state->residency_ns = 1000 * residency;
2802
2803 genpd_state->power_on_latency_ns = 1000 * exit_latency;
2804 genpd_state->power_off_latency_ns = 1000 * entry_latency;
2805 genpd_state->fwnode = &state_node->fwnode;
2806
2807 return 0;
2808}
2809
2810static int genpd_iterate_idle_states(struct device_node *dn,
2811 struct genpd_power_state *states)
2812{
2813 int ret;
2814 struct of_phandle_iterator it;
2815 struct device_node *np;
2816 int i = 0;
2817
2818 ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
2819 if (ret <= 0)
2820 return ret;
2821
2822
2823 of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
2824 np = it.node;
2825 if (!of_match_node(idle_state_match, np))
2826 continue;
2827 if (states) {
2828 ret = genpd_parse_state(&states[i], np);
2829 if (ret) {
2830 pr_err("Parsing idle state node %pOF failed with err %d\n",
2831 np, ret);
2832 of_node_put(np);
2833 return ret;
2834 }
2835 }
2836 i++;
2837 }
2838
2839 return i;
2840}
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854int of_genpd_parse_idle_states(struct device_node *dn,
2855 struct genpd_power_state **states, int *n)
2856{
2857 struct genpd_power_state *st;
2858 int ret;
2859
2860 ret = genpd_iterate_idle_states(dn, NULL);
2861 if (ret < 0)
2862 return ret;
2863
2864 if (!ret) {
2865 *states = NULL;
2866 *n = 0;
2867 return 0;
2868 }
2869
2870 st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
2871 if (!st)
2872 return -ENOMEM;
2873
2874 ret = genpd_iterate_idle_states(dn, st);
2875 if (ret <= 0) {
2876 kfree(st);
2877 return ret < 0 ? ret : -EINVAL;
2878 }
2879
2880 *states = st;
2881 *n = ret;
2882
2883 return 0;
2884}
2885EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
2901 struct dev_pm_opp *opp)
2902{
2903 struct generic_pm_domain *genpd = NULL;
2904 int state;
2905
2906 genpd = container_of(genpd_dev, struct generic_pm_domain, dev);
2907
2908 if (unlikely(!genpd->opp_to_performance_state))
2909 return 0;
2910
2911 genpd_lock(genpd);
2912 state = genpd->opp_to_performance_state(genpd, opp);
2913 genpd_unlock(genpd);
2914
2915 return state;
2916}
2917EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state);
2918
2919static int __init genpd_bus_init(void)
2920{
2921 return bus_register(&genpd_bus_type);
2922}
2923core_initcall(genpd_bus_init);
2924
2925#endif
2926
2927
2928
2929
2930#ifdef CONFIG_DEBUG_FS
2931
2932
2933
2934
2935static void rtpm_status_str(struct seq_file *s, struct device *dev)
2936{
2937 static const char * const status_lookup[] = {
2938 [RPM_ACTIVE] = "active",
2939 [RPM_RESUMING] = "resuming",
2940 [RPM_SUSPENDED] = "suspended",
2941 [RPM_SUSPENDING] = "suspending"
2942 };
2943 const char *p = "";
2944
2945 if (dev->power.runtime_error)
2946 p = "error";
2947 else if (dev->power.disable_depth)
2948 p = "unsupported";
2949 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
2950 p = status_lookup[dev->power.runtime_status];
2951 else
2952 WARN_ON(1);
2953
2954 seq_printf(s, "%-25s ", p);
2955}
2956
2957static void perf_status_str(struct seq_file *s, struct device *dev)
2958{
2959 struct generic_pm_domain_data *gpd_data;
2960
2961 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
2962 seq_put_decimal_ull(s, "", gpd_data->performance_state);
2963}
2964
2965static int genpd_summary_one(struct seq_file *s,
2966 struct generic_pm_domain *genpd)
2967{
2968 static const char * const status_lookup[] = {
2969 [GENPD_STATE_ON] = "on",
2970 [GENPD_STATE_OFF] = "off"
2971 };
2972 struct pm_domain_data *pm_data;
2973 const char *kobj_path;
2974 struct gpd_link *link;
2975 char state[16];
2976 int ret;
2977
2978 ret = genpd_lock_interruptible(genpd);
2979 if (ret)
2980 return -ERESTARTSYS;
2981
2982 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
2983 goto exit;
2984 if (!genpd_status_on(genpd))
2985 snprintf(state, sizeof(state), "%s-%u",
2986 status_lookup[genpd->status], genpd->state_idx);
2987 else
2988 snprintf(state, sizeof(state), "%s",
2989 status_lookup[genpd->status]);
2990 seq_printf(s, "%-30s %-50s %u", genpd->name, state, genpd->performance_state);
2991
2992
2993
2994
2995
2996
2997 list_for_each_entry(link, &genpd->parent_links, parent_node) {
2998 if (list_is_first(&link->parent_node, &genpd->parent_links))
2999 seq_printf(s, "\n%48s", " ");
3000 seq_printf(s, "%s", link->child->name);
3001 if (!list_is_last(&link->parent_node, &genpd->parent_links))
3002 seq_puts(s, ", ");
3003 }
3004
3005 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3006 kobj_path = kobject_get_path(&pm_data->dev->kobj,
3007 genpd_is_irq_safe(genpd) ?
3008 GFP_ATOMIC : GFP_KERNEL);
3009 if (kobj_path == NULL)
3010 continue;
3011
3012 seq_printf(s, "\n %-50s ", kobj_path);
3013 rtpm_status_str(s, pm_data->dev);
3014 perf_status_str(s, pm_data->dev);
3015 kfree(kobj_path);
3016 }
3017
3018 seq_puts(s, "\n");
3019exit:
3020 genpd_unlock(genpd);
3021
3022 return 0;
3023}
3024
3025static int summary_show(struct seq_file *s, void *data)
3026{
3027 struct generic_pm_domain *genpd;
3028 int ret = 0;
3029
3030 seq_puts(s, "domain status children performance\n");
3031 seq_puts(s, " /device runtime status\n");
3032 seq_puts(s, "----------------------------------------------------------------------------------------------\n");
3033
3034 ret = mutex_lock_interruptible(&gpd_list_lock);
3035 if (ret)
3036 return -ERESTARTSYS;
3037
3038 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
3039 ret = genpd_summary_one(s, genpd);
3040 if (ret)
3041 break;
3042 }
3043 mutex_unlock(&gpd_list_lock);
3044
3045 return ret;
3046}
3047
3048static int status_show(struct seq_file *s, void *data)
3049{
3050 static const char * const status_lookup[] = {
3051 [GENPD_STATE_ON] = "on",
3052 [GENPD_STATE_OFF] = "off"
3053 };
3054
3055 struct generic_pm_domain *genpd = s->private;
3056 int ret = 0;
3057
3058 ret = genpd_lock_interruptible(genpd);
3059 if (ret)
3060 return -ERESTARTSYS;
3061
3062 if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
3063 goto exit;
3064
3065 if (genpd->status == GENPD_STATE_OFF)
3066 seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
3067 genpd->state_idx);
3068 else
3069 seq_printf(s, "%s\n", status_lookup[genpd->status]);
3070exit:
3071 genpd_unlock(genpd);
3072 return ret;
3073}
3074
3075static int sub_domains_show(struct seq_file *s, void *data)
3076{
3077 struct generic_pm_domain *genpd = s->private;
3078 struct gpd_link *link;
3079 int ret = 0;
3080
3081 ret = genpd_lock_interruptible(genpd);
3082 if (ret)
3083 return -ERESTARTSYS;
3084
3085 list_for_each_entry(link, &genpd->parent_links, parent_node)
3086 seq_printf(s, "%s\n", link->child->name);
3087
3088 genpd_unlock(genpd);
3089 return ret;
3090}
3091
3092static int idle_states_show(struct seq_file *s, void *data)
3093{
3094 struct generic_pm_domain *genpd = s->private;
3095 unsigned int i;
3096 int ret = 0;
3097
3098 ret = genpd_lock_interruptible(genpd);
3099 if (ret)
3100 return -ERESTARTSYS;
3101
3102 seq_puts(s, "State Time Spent(ms) Usage Rejected\n");
3103
3104 for (i = 0; i < genpd->state_count; i++) {
3105 ktime_t delta = 0;
3106 s64 msecs;
3107
3108 if ((genpd->status == GENPD_STATE_OFF) &&
3109 (genpd->state_idx == i))
3110 delta = ktime_sub(ktime_get(), genpd->accounting_time);
3111
3112 msecs = ktime_to_ms(
3113 ktime_add(genpd->states[i].idle_time, delta));
3114 seq_printf(s, "S%-13i %-14lld %-14llu %llu\n", i, msecs,
3115 genpd->states[i].usage, genpd->states[i].rejected);
3116 }
3117
3118 genpd_unlock(genpd);
3119 return ret;
3120}
3121
3122static int active_time_show(struct seq_file *s, void *data)
3123{
3124 struct generic_pm_domain *genpd = s->private;
3125 ktime_t delta = 0;
3126 int ret = 0;
3127
3128 ret = genpd_lock_interruptible(genpd);
3129 if (ret)
3130 return -ERESTARTSYS;
3131
3132 if (genpd->status == GENPD_STATE_ON)
3133 delta = ktime_sub(ktime_get(), genpd->accounting_time);
3134
3135 seq_printf(s, "%lld ms\n", ktime_to_ms(
3136 ktime_add(genpd->on_time, delta)));
3137
3138 genpd_unlock(genpd);
3139 return ret;
3140}
3141
3142static int total_idle_time_show(struct seq_file *s, void *data)
3143{
3144 struct generic_pm_domain *genpd = s->private;
3145 ktime_t delta = 0, total = 0;
3146 unsigned int i;
3147 int ret = 0;
3148
3149 ret = genpd_lock_interruptible(genpd);
3150 if (ret)
3151 return -ERESTARTSYS;
3152
3153 for (i = 0; i < genpd->state_count; i++) {
3154
3155 if ((genpd->status == GENPD_STATE_OFF) &&
3156 (genpd->state_idx == i))
3157 delta = ktime_sub(ktime_get(), genpd->accounting_time);
3158
3159 total = ktime_add(total, genpd->states[i].idle_time);
3160 }
3161 total = ktime_add(total, delta);
3162
3163 seq_printf(s, "%lld ms\n", ktime_to_ms(total));
3164
3165 genpd_unlock(genpd);
3166 return ret;
3167}
3168
3169
3170static int devices_show(struct seq_file *s, void *data)
3171{
3172 struct generic_pm_domain *genpd = s->private;
3173 struct pm_domain_data *pm_data;
3174 const char *kobj_path;
3175 int ret = 0;
3176
3177 ret = genpd_lock_interruptible(genpd);
3178 if (ret)
3179 return -ERESTARTSYS;
3180
3181 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3182 kobj_path = kobject_get_path(&pm_data->dev->kobj,
3183 genpd_is_irq_safe(genpd) ?
3184 GFP_ATOMIC : GFP_KERNEL);
3185 if (kobj_path == NULL)
3186 continue;
3187
3188 seq_printf(s, "%s\n", kobj_path);
3189 kfree(kobj_path);
3190 }
3191
3192 genpd_unlock(genpd);
3193 return ret;
3194}
3195
3196static int perf_state_show(struct seq_file *s, void *data)
3197{
3198 struct generic_pm_domain *genpd = s->private;
3199
3200 if (genpd_lock_interruptible(genpd))
3201 return -ERESTARTSYS;
3202
3203 seq_printf(s, "%u\n", genpd->performance_state);
3204
3205 genpd_unlock(genpd);
3206 return 0;
3207}
3208
3209DEFINE_SHOW_ATTRIBUTE(summary);
3210DEFINE_SHOW_ATTRIBUTE(status);
3211DEFINE_SHOW_ATTRIBUTE(sub_domains);
3212DEFINE_SHOW_ATTRIBUTE(idle_states);
3213DEFINE_SHOW_ATTRIBUTE(active_time);
3214DEFINE_SHOW_ATTRIBUTE(total_idle_time);
3215DEFINE_SHOW_ATTRIBUTE(devices);
3216DEFINE_SHOW_ATTRIBUTE(perf_state);
3217
3218static void genpd_debug_add(struct generic_pm_domain *genpd)
3219{
3220 struct dentry *d;
3221
3222 if (!genpd_debugfs_dir)
3223 return;
3224
3225 d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
3226
3227 debugfs_create_file("current_state", 0444,
3228 d, genpd, &status_fops);
3229 debugfs_create_file("sub_domains", 0444,
3230 d, genpd, &sub_domains_fops);
3231 debugfs_create_file("idle_states", 0444,
3232 d, genpd, &idle_states_fops);
3233 debugfs_create_file("active_time", 0444,
3234 d, genpd, &active_time_fops);
3235 debugfs_create_file("total_idle_time", 0444,
3236 d, genpd, &total_idle_time_fops);
3237 debugfs_create_file("devices", 0444,
3238 d, genpd, &devices_fops);
3239 if (genpd->set_performance_state)
3240 debugfs_create_file("perf_state", 0444,
3241 d, genpd, &perf_state_fops);
3242}
3243
3244static int __init genpd_debug_init(void)
3245{
3246 struct generic_pm_domain *genpd;
3247
3248 genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
3249
3250 debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
3251 NULL, &summary_fops);
3252
3253 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
3254 genpd_debug_add(genpd);
3255
3256 return 0;
3257}
3258late_initcall(genpd_debug_init);
3259
3260static void __exit genpd_debug_exit(void)
3261{
3262 debugfs_remove_recursive(genpd_debugfs_dir);
3263}
3264__exitcall(genpd_debug_exit);
3265#endif
3266