1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/device.h>
21#include <linux/kallsyms.h>
22#include <linux/export.h>
23#include <linux/mutex.h>
24#include <linux/pm.h>
25#include <linux/pm_runtime.h>
26#include <linux/resume-trace.h>
27#include <linux/interrupt.h>
28#include <linux/sched.h>
29#include <linux/async.h>
30#include <linux/suspend.h>
31#include <linux/cpufreq.h>
32#include <linux/cpuidle.h>
33#include "../base.h"
34#include "power.h"
35
36typedef int (*pm_callback_t)(struct device *);
37
38
39
40
41
42
43
44
45
46
47
48LIST_HEAD(dpm_list);
49static LIST_HEAD(dpm_prepared_list);
50static LIST_HEAD(dpm_suspended_list);
51static LIST_HEAD(dpm_late_early_list);
52static LIST_HEAD(dpm_noirq_list);
53
54struct suspend_stats suspend_stats;
55static DEFINE_MUTEX(dpm_list_mtx);
56static pm_message_t pm_transition;
57
58static int async_error;
59
60
61
62
63
64void device_pm_sleep_init(struct device *dev)
65{
66 dev->power.is_prepared = false;
67 dev->power.is_suspended = false;
68 init_completion(&dev->power.completion);
69 complete_all(&dev->power.completion);
70 dev->power.wakeup = NULL;
71 INIT_LIST_HEAD(&dev->power.entry);
72}
73
74
75
76
77void device_pm_lock(void)
78{
79 mutex_lock(&dpm_list_mtx);
80}
81
82
83
84
85void device_pm_unlock(void)
86{
87 mutex_unlock(&dpm_list_mtx);
88}
89
90
91
92
93
94void device_pm_add(struct device *dev)
95{
96 pr_debug("PM: Adding info for %s:%s\n",
97 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
98 mutex_lock(&dpm_list_mtx);
99 if (dev->parent && dev->parent->power.is_prepared)
100 dev_warn(dev, "parent %s should not be sleeping\n",
101 dev_name(dev->parent));
102 list_add_tail(&dev->power.entry, &dpm_list);
103 mutex_unlock(&dpm_list_mtx);
104}
105
106
107
108
109
110void device_pm_remove(struct device *dev)
111{
112 pr_debug("PM: Removing info for %s:%s\n",
113 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
114 complete_all(&dev->power.completion);
115 mutex_lock(&dpm_list_mtx);
116 list_del_init(&dev->power.entry);
117 mutex_unlock(&dpm_list_mtx);
118 device_wakeup_disable(dev);
119 pm_runtime_remove(dev);
120}
121
122
123
124
125
126
127void device_pm_move_before(struct device *deva, struct device *devb)
128{
129 pr_debug("PM: Moving %s:%s before %s:%s\n",
130 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
131 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
132
133 list_move_tail(&deva->power.entry, &devb->power.entry);
134}
135
136
137
138
139
140
141void device_pm_move_after(struct device *deva, struct device *devb)
142{
143 pr_debug("PM: Moving %s:%s after %s:%s\n",
144 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
145 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
146
147 list_move(&deva->power.entry, &devb->power.entry);
148}
149
150
151
152
153
154void device_pm_move_last(struct device *dev)
155{
156 pr_debug("PM: Moving %s:%s to end of list\n",
157 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
158 list_move_tail(&dev->power.entry, &dpm_list);
159}
160
161static ktime_t initcall_debug_start(struct device *dev)
162{
163 ktime_t calltime = ktime_set(0, 0);
164
165 if (pm_print_times_enabled) {
166 pr_info("calling %s+ @ %i, parent: %s\n",
167 dev_name(dev), task_pid_nr(current),
168 dev->parent ? dev_name(dev->parent) : "none");
169 calltime = ktime_get();
170 }
171
172 return calltime;
173}
174
175static void initcall_debug_report(struct device *dev, ktime_t calltime,
176 int error)
177{
178 ktime_t delta, rettime;
179
180 if (pm_print_times_enabled) {
181 rettime = ktime_get();
182 delta = ktime_sub(rettime, calltime);
183 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
184 error, (unsigned long long)ktime_to_ns(delta) >> 10);
185 }
186}
187
188
189
190
191
192
193static void dpm_wait(struct device *dev, bool async)
194{
195 if (!dev)
196 return;
197
198 if (async || (pm_async_enabled && dev->power.async_suspend))
199 wait_for_completion(&dev->power.completion);
200}
201
202static int dpm_wait_fn(struct device *dev, void *async_ptr)
203{
204 dpm_wait(dev, *((bool *)async_ptr));
205 return 0;
206}
207
208static void dpm_wait_for_children(struct device *dev, bool async)
209{
210 device_for_each_child(dev, &async, dpm_wait_fn);
211}
212
213
214
215
216
217
218static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
219{
220 switch (state.event) {
221#ifdef CONFIG_SUSPEND
222 case PM_EVENT_SUSPEND:
223 return ops->suspend;
224 case PM_EVENT_RESUME:
225 return ops->resume;
226#endif
227#ifdef CONFIG_HIBERNATE_CALLBACKS
228 case PM_EVENT_FREEZE:
229 case PM_EVENT_QUIESCE:
230 return ops->freeze;
231 case PM_EVENT_HIBERNATE:
232 return ops->poweroff;
233 case PM_EVENT_THAW:
234 case PM_EVENT_RECOVER:
235 return ops->thaw;
236 break;
237 case PM_EVENT_RESTORE:
238 return ops->restore;
239#endif
240 }
241
242 return NULL;
243}
244
245
246
247
248
249
250
251
252static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
253 pm_message_t state)
254{
255 switch (state.event) {
256#ifdef CONFIG_SUSPEND
257 case PM_EVENT_SUSPEND:
258 return ops->suspend_late;
259 case PM_EVENT_RESUME:
260 return ops->resume_early;
261#endif
262#ifdef CONFIG_HIBERNATE_CALLBACKS
263 case PM_EVENT_FREEZE:
264 case PM_EVENT_QUIESCE:
265 return ops->freeze_late;
266 case PM_EVENT_HIBERNATE:
267 return ops->poweroff_late;
268 case PM_EVENT_THAW:
269 case PM_EVENT_RECOVER:
270 return ops->thaw_early;
271 case PM_EVENT_RESTORE:
272 return ops->restore_early;
273#endif
274 }
275
276 return NULL;
277}
278
279
280
281
282
283
284
285
286
287static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
288{
289 switch (state.event) {
290#ifdef CONFIG_SUSPEND
291 case PM_EVENT_SUSPEND:
292 return ops->suspend_noirq;
293 case PM_EVENT_RESUME:
294 return ops->resume_noirq;
295#endif
296#ifdef CONFIG_HIBERNATE_CALLBACKS
297 case PM_EVENT_FREEZE:
298 case PM_EVENT_QUIESCE:
299 return ops->freeze_noirq;
300 case PM_EVENT_HIBERNATE:
301 return ops->poweroff_noirq;
302 case PM_EVENT_THAW:
303 case PM_EVENT_RECOVER:
304 return ops->thaw_noirq;
305 case PM_EVENT_RESTORE:
306 return ops->restore_noirq;
307#endif
308 }
309
310 return NULL;
311}
312
313static char *pm_verb(int event)
314{
315 switch (event) {
316 case PM_EVENT_SUSPEND:
317 return "suspend";
318 case PM_EVENT_RESUME:
319 return "resume";
320 case PM_EVENT_FREEZE:
321 return "freeze";
322 case PM_EVENT_QUIESCE:
323 return "quiesce";
324 case PM_EVENT_HIBERNATE:
325 return "hibernate";
326 case PM_EVENT_THAW:
327 return "thaw";
328 case PM_EVENT_RESTORE:
329 return "restore";
330 case PM_EVENT_RECOVER:
331 return "recover";
332 default:
333 return "(unknown PM event)";
334 }
335}
336
337static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
338{
339 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
340 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
341 ", may wakeup" : "");
342}
343
344static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
345 int error)
346{
347 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
348 dev_name(dev), pm_verb(state.event), info, error);
349}
350
351static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
352{
353 ktime_t calltime;
354 u64 usecs64;
355 int usecs;
356
357 calltime = ktime_get();
358 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
359 do_div(usecs64, NSEC_PER_USEC);
360 usecs = usecs64;
361 if (usecs == 0)
362 usecs = 1;
363 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
364 info ?: "", info ? " " : "", pm_verb(state.event),
365 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
366}
367
368static int dpm_run_callback(pm_callback_t cb, struct device *dev,
369 pm_message_t state, char *info)
370{
371 ktime_t calltime;
372 int error;
373
374 if (!cb)
375 return 0;
376
377 calltime = initcall_debug_start(dev);
378
379 pm_dev_dbg(dev, state, info);
380 error = cb(dev);
381 suspend_report_result(cb, error);
382
383 initcall_debug_report(dev, calltime, error);
384
385 return error;
386}
387
388
389
390
391
392
393
394
395
396
397
398static int device_resume_noirq(struct device *dev, pm_message_t state)
399{
400 pm_callback_t callback = NULL;
401 char *info = NULL;
402 int error = 0;
403
404 TRACE_DEVICE(dev);
405 TRACE_RESUME(0);
406
407 if (dev->power.syscore)
408 goto Out;
409
410 if (dev->pm_domain) {
411 info = "noirq power domain ";
412 callback = pm_noirq_op(&dev->pm_domain->ops, state);
413 } else if (dev->type && dev->type->pm) {
414 info = "noirq type ";
415 callback = pm_noirq_op(dev->type->pm, state);
416 } else if (dev->class && dev->class->pm) {
417 info = "noirq class ";
418 callback = pm_noirq_op(dev->class->pm, state);
419 } else if (dev->bus && dev->bus->pm) {
420 info = "noirq bus ";
421 callback = pm_noirq_op(dev->bus->pm, state);
422 }
423
424 if (!callback && dev->driver && dev->driver->pm) {
425 info = "noirq driver ";
426 callback = pm_noirq_op(dev->driver->pm, state);
427 }
428
429 error = dpm_run_callback(callback, dev, state, info);
430
431 Out:
432 TRACE_RESUME(error);
433 return error;
434}
435
436
437
438
439
440
441
442
443static void dpm_resume_noirq(pm_message_t state)
444{
445 ktime_t starttime = ktime_get();
446
447 mutex_lock(&dpm_list_mtx);
448 while (!list_empty(&dpm_noirq_list)) {
449 struct device *dev = to_device(dpm_noirq_list.next);
450 int error;
451
452 get_device(dev);
453 list_move_tail(&dev->power.entry, &dpm_late_early_list);
454 mutex_unlock(&dpm_list_mtx);
455
456 error = device_resume_noirq(dev, state);
457 if (error) {
458 suspend_stats.failed_resume_noirq++;
459 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
460 dpm_save_failed_dev(dev_name(dev));
461 pm_dev_err(dev, state, " noirq", error);
462 }
463
464 mutex_lock(&dpm_list_mtx);
465 put_device(dev);
466 }
467 mutex_unlock(&dpm_list_mtx);
468 dpm_show_time(starttime, state, "noirq");
469 resume_device_irqs();
470 cpuidle_resume();
471}
472
473
474
475
476
477
478
479
480static int device_resume_early(struct device *dev, pm_message_t state)
481{
482 pm_callback_t callback = NULL;
483 char *info = NULL;
484 int error = 0;
485
486 TRACE_DEVICE(dev);
487 TRACE_RESUME(0);
488
489 if (dev->power.syscore)
490 goto Out;
491
492 if (dev->pm_domain) {
493 info = "early power domain ";
494 callback = pm_late_early_op(&dev->pm_domain->ops, state);
495 } else if (dev->type && dev->type->pm) {
496 info = "early type ";
497 callback = pm_late_early_op(dev->type->pm, state);
498 } else if (dev->class && dev->class->pm) {
499 info = "early class ";
500 callback = pm_late_early_op(dev->class->pm, state);
501 } else if (dev->bus && dev->bus->pm) {
502 info = "early bus ";
503 callback = pm_late_early_op(dev->bus->pm, state);
504 }
505
506 if (!callback && dev->driver && dev->driver->pm) {
507 info = "early driver ";
508 callback = pm_late_early_op(dev->driver->pm, state);
509 }
510
511 error = dpm_run_callback(callback, dev, state, info);
512
513 Out:
514 TRACE_RESUME(error);
515
516 pm_runtime_enable(dev);
517 return error;
518}
519
520
521
522
523
524static void dpm_resume_early(pm_message_t state)
525{
526 ktime_t starttime = ktime_get();
527
528 mutex_lock(&dpm_list_mtx);
529 while (!list_empty(&dpm_late_early_list)) {
530 struct device *dev = to_device(dpm_late_early_list.next);
531 int error;
532
533 get_device(dev);
534 list_move_tail(&dev->power.entry, &dpm_suspended_list);
535 mutex_unlock(&dpm_list_mtx);
536
537 error = device_resume_early(dev, state);
538 if (error) {
539 suspend_stats.failed_resume_early++;
540 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
541 dpm_save_failed_dev(dev_name(dev));
542 pm_dev_err(dev, state, " early", error);
543 }
544
545 mutex_lock(&dpm_list_mtx);
546 put_device(dev);
547 }
548 mutex_unlock(&dpm_list_mtx);
549 dpm_show_time(starttime, state, "early");
550}
551
552
553
554
555
556void dpm_resume_start(pm_message_t state)
557{
558 dpm_resume_noirq(state);
559 dpm_resume_early(state);
560}
561EXPORT_SYMBOL_GPL(dpm_resume_start);
562
563
564
565
566
567
568
569static int device_resume(struct device *dev, pm_message_t state, bool async)
570{
571 pm_callback_t callback = NULL;
572 char *info = NULL;
573 int error = 0;
574
575 TRACE_DEVICE(dev);
576 TRACE_RESUME(0);
577
578 if (dev->power.syscore)
579 goto Complete;
580
581 dpm_wait(dev->parent, async);
582 device_lock(dev);
583
584
585
586
587
588 dev->power.is_prepared = false;
589
590 if (!dev->power.is_suspended)
591 goto Unlock;
592
593 if (dev->pm_domain) {
594 info = "power domain ";
595 callback = pm_op(&dev->pm_domain->ops, state);
596 goto Driver;
597 }
598
599 if (dev->type && dev->type->pm) {
600 info = "type ";
601 callback = pm_op(dev->type->pm, state);
602 goto Driver;
603 }
604
605 if (dev->class) {
606 if (dev->class->pm) {
607 info = "class ";
608 callback = pm_op(dev->class->pm, state);
609 goto Driver;
610 } else if (dev->class->resume) {
611 info = "legacy class ";
612 callback = dev->class->resume;
613 goto End;
614 }
615 }
616
617 if (dev->bus) {
618 if (dev->bus->pm) {
619 info = "bus ";
620 callback = pm_op(dev->bus->pm, state);
621 } else if (dev->bus->resume) {
622 info = "legacy bus ";
623 callback = dev->bus->resume;
624 goto End;
625 }
626 }
627
628 Driver:
629 if (!callback && dev->driver && dev->driver->pm) {
630 info = "driver ";
631 callback = pm_op(dev->driver->pm, state);
632 }
633
634 End:
635 error = dpm_run_callback(callback, dev, state, info);
636 dev->power.is_suspended = false;
637
638 Unlock:
639 device_unlock(dev);
640
641 Complete:
642 complete_all(&dev->power.completion);
643
644 TRACE_RESUME(error);
645
646 return error;
647}
648
649static void async_resume(void *data, async_cookie_t cookie)
650{
651 struct device *dev = (struct device *)data;
652 int error;
653
654 error = device_resume(dev, pm_transition, true);
655 if (error)
656 pm_dev_err(dev, pm_transition, " async", error);
657 put_device(dev);
658}
659
660static bool is_async(struct device *dev)
661{
662 return dev->power.async_suspend && pm_async_enabled
663 && !pm_trace_is_enabled();
664}
665
666
667
668
669
670
671
672
673void dpm_resume(pm_message_t state)
674{
675 struct device *dev;
676 ktime_t starttime = ktime_get();
677
678 might_sleep();
679
680 mutex_lock(&dpm_list_mtx);
681 pm_transition = state;
682 async_error = 0;
683
684 list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
685 INIT_COMPLETION(dev->power.completion);
686 if (is_async(dev)) {
687 get_device(dev);
688 async_schedule(async_resume, dev);
689 }
690 }
691
692 while (!list_empty(&dpm_suspended_list)) {
693 dev = to_device(dpm_suspended_list.next);
694 get_device(dev);
695 if (!is_async(dev)) {
696 int error;
697
698 mutex_unlock(&dpm_list_mtx);
699
700 error = device_resume(dev, state, false);
701 if (error) {
702 suspend_stats.failed_resume++;
703 dpm_save_failed_step(SUSPEND_RESUME);
704 dpm_save_failed_dev(dev_name(dev));
705 pm_dev_err(dev, state, "", error);
706 }
707
708 mutex_lock(&dpm_list_mtx);
709 }
710 if (!list_empty(&dev->power.entry))
711 list_move_tail(&dev->power.entry, &dpm_prepared_list);
712 put_device(dev);
713 }
714 mutex_unlock(&dpm_list_mtx);
715 async_synchronize_full();
716 dpm_show_time(starttime, state, NULL);
717
718 cpufreq_resume();
719}
720
721
722
723
724
725
726static void device_complete(struct device *dev, pm_message_t state)
727{
728 void (*callback)(struct device *) = NULL;
729 char *info = NULL;
730
731 if (dev->power.syscore)
732 return;
733
734 device_lock(dev);
735
736 if (dev->pm_domain) {
737 info = "completing power domain ";
738 callback = dev->pm_domain->ops.complete;
739 } else if (dev->type && dev->type->pm) {
740 info = "completing type ";
741 callback = dev->type->pm->complete;
742 } else if (dev->class && dev->class->pm) {
743 info = "completing class ";
744 callback = dev->class->pm->complete;
745 } else if (dev->bus && dev->bus->pm) {
746 info = "completing bus ";
747 callback = dev->bus->pm->complete;
748 }
749
750 if (!callback && dev->driver && dev->driver->pm) {
751 info = "completing driver ";
752 callback = dev->driver->pm->complete;
753 }
754
755 if (callback) {
756 pm_dev_dbg(dev, state, info);
757 callback(dev);
758 }
759
760 device_unlock(dev);
761
762 pm_runtime_put(dev);
763}
764
765
766
767
768
769
770
771
772void dpm_complete(pm_message_t state)
773{
774 struct list_head list;
775
776 might_sleep();
777
778 INIT_LIST_HEAD(&list);
779 mutex_lock(&dpm_list_mtx);
780 while (!list_empty(&dpm_prepared_list)) {
781 struct device *dev = to_device(dpm_prepared_list.prev);
782
783 get_device(dev);
784 dev->power.is_prepared = false;
785 list_move(&dev->power.entry, &list);
786 mutex_unlock(&dpm_list_mtx);
787
788 device_complete(dev, state);
789
790 mutex_lock(&dpm_list_mtx);
791 put_device(dev);
792 }
793 list_splice(&list, &dpm_list);
794 mutex_unlock(&dpm_list_mtx);
795
796
797 device_unblock_probing();
798}
799
800
801
802
803
804
805
806
807void dpm_resume_end(pm_message_t state)
808{
809 dpm_resume(state);
810 dpm_complete(state);
811}
812EXPORT_SYMBOL_GPL(dpm_resume_end);
813
814
815
816
817
818
819
820
821
822
823
824static pm_message_t resume_event(pm_message_t sleep_state)
825{
826 switch (sleep_state.event) {
827 case PM_EVENT_SUSPEND:
828 return PMSG_RESUME;
829 case PM_EVENT_FREEZE:
830 case PM_EVENT_QUIESCE:
831 return PMSG_RECOVER;
832 case PM_EVENT_HIBERNATE:
833 return PMSG_RESTORE;
834 }
835 return PMSG_ON;
836}
837
838
839
840
841
842
843
844
845
846static int device_suspend_noirq(struct device *dev, pm_message_t state)
847{
848 pm_callback_t callback = NULL;
849 char *info = NULL;
850
851 if (dev->power.syscore)
852 return 0;
853
854 if (dev->pm_domain) {
855 info = "noirq power domain ";
856 callback = pm_noirq_op(&dev->pm_domain->ops, state);
857 } else if (dev->type && dev->type->pm) {
858 info = "noirq type ";
859 callback = pm_noirq_op(dev->type->pm, state);
860 } else if (dev->class && dev->class->pm) {
861 info = "noirq class ";
862 callback = pm_noirq_op(dev->class->pm, state);
863 } else if (dev->bus && dev->bus->pm) {
864 info = "noirq bus ";
865 callback = pm_noirq_op(dev->bus->pm, state);
866 }
867
868 if (!callback && dev->driver && dev->driver->pm) {
869 info = "noirq driver ";
870 callback = pm_noirq_op(dev->driver->pm, state);
871 }
872
873 return dpm_run_callback(callback, dev, state, info);
874}
875
876
877
878
879
880
881
882
883static int dpm_suspend_noirq(pm_message_t state)
884{
885 ktime_t starttime = ktime_get();
886 int error = 0;
887
888 cpuidle_pause();
889 suspend_device_irqs();
890 mutex_lock(&dpm_list_mtx);
891 while (!list_empty(&dpm_late_early_list)) {
892 struct device *dev = to_device(dpm_late_early_list.prev);
893
894 get_device(dev);
895 mutex_unlock(&dpm_list_mtx);
896
897 error = device_suspend_noirq(dev, state);
898
899 mutex_lock(&dpm_list_mtx);
900 if (error) {
901 pm_dev_err(dev, state, " noirq", error);
902 suspend_stats.failed_suspend_noirq++;
903 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
904 dpm_save_failed_dev(dev_name(dev));
905 put_device(dev);
906 break;
907 }
908 if (!list_empty(&dev->power.entry))
909 list_move(&dev->power.entry, &dpm_noirq_list);
910 put_device(dev);
911
912 if (pm_wakeup_pending()) {
913 error = -EBUSY;
914 break;
915 }
916 }
917 mutex_unlock(&dpm_list_mtx);
918 if (error)
919 dpm_resume_noirq(resume_event(state));
920 else
921 dpm_show_time(starttime, state, "noirq");
922 return error;
923}
924
925
926
927
928
929
930
931
932static int device_suspend_late(struct device *dev, pm_message_t state)
933{
934 pm_callback_t callback = NULL;
935 char *info = NULL;
936
937 __pm_runtime_disable(dev, false);
938
939 if (dev->power.syscore)
940 return 0;
941
942 if (dev->pm_domain) {
943 info = "late power domain ";
944 callback = pm_late_early_op(&dev->pm_domain->ops, state);
945 } else if (dev->type && dev->type->pm) {
946 info = "late type ";
947 callback = pm_late_early_op(dev->type->pm, state);
948 } else if (dev->class && dev->class->pm) {
949 info = "late class ";
950 callback = pm_late_early_op(dev->class->pm, state);
951 } else if (dev->bus && dev->bus->pm) {
952 info = "late bus ";
953 callback = pm_late_early_op(dev->bus->pm, state);
954 }
955
956 if (!callback && dev->driver && dev->driver->pm) {
957 info = "late driver ";
958 callback = pm_late_early_op(dev->driver->pm, state);
959 }
960
961 return dpm_run_callback(callback, dev, state, info);
962}
963
964
965
966
967
968static int dpm_suspend_late(pm_message_t state)
969{
970 ktime_t starttime = ktime_get();
971 int error = 0;
972
973 mutex_lock(&dpm_list_mtx);
974 while (!list_empty(&dpm_suspended_list)) {
975 struct device *dev = to_device(dpm_suspended_list.prev);
976
977 get_device(dev);
978 mutex_unlock(&dpm_list_mtx);
979
980 error = device_suspend_late(dev, state);
981
982 mutex_lock(&dpm_list_mtx);
983 if (error) {
984 pm_dev_err(dev, state, " late", error);
985 suspend_stats.failed_suspend_late++;
986 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
987 dpm_save_failed_dev(dev_name(dev));
988 put_device(dev);
989 break;
990 }
991 if (!list_empty(&dev->power.entry))
992 list_move(&dev->power.entry, &dpm_late_early_list);
993 put_device(dev);
994
995 if (pm_wakeup_pending()) {
996 error = -EBUSY;
997 break;
998 }
999 }
1000 mutex_unlock(&dpm_list_mtx);
1001 if (error)
1002 dpm_resume_early(resume_event(state));
1003 else
1004 dpm_show_time(starttime, state, "late");
1005
1006 return error;
1007}
1008
1009
1010
1011
1012
1013int dpm_suspend_end(pm_message_t state)
1014{
1015 int error = dpm_suspend_late(state);
1016 if (error)
1017 return error;
1018
1019 error = dpm_suspend_noirq(state);
1020 if (error) {
1021 dpm_resume_early(resume_event(state));
1022 return error;
1023 }
1024
1025 return 0;
1026}
1027EXPORT_SYMBOL_GPL(dpm_suspend_end);
1028
1029
1030
1031
1032
1033
1034
1035static int legacy_suspend(struct device *dev, pm_message_t state,
1036 int (*cb)(struct device *dev, pm_message_t state))
1037{
1038 int error;
1039 ktime_t calltime;
1040
1041 calltime = initcall_debug_start(dev);
1042
1043 error = cb(dev, state);
1044 suspend_report_result(cb, error);
1045
1046 initcall_debug_report(dev, calltime, error);
1047
1048 return error;
1049}
1050
1051
1052
1053
1054
1055
1056
1057static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1058{
1059 pm_callback_t callback = NULL;
1060 char *info = NULL;
1061 int error = 0;
1062
1063 dpm_wait_for_children(dev, async);
1064
1065 if (async_error)
1066 goto Complete;
1067
1068
1069
1070
1071
1072
1073
1074 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1075 pm_wakeup_event(dev, 0);
1076
1077 if (pm_wakeup_pending()) {
1078 async_error = -EBUSY;
1079 goto Complete;
1080 }
1081
1082 if (dev->power.syscore)
1083 goto Complete;
1084
1085 device_lock(dev);
1086
1087 if (dev->pm_domain) {
1088 info = "power domain ";
1089 callback = pm_op(&dev->pm_domain->ops, state);
1090 goto Run;
1091 }
1092
1093 if (dev->type && dev->type->pm) {
1094 info = "type ";
1095 callback = pm_op(dev->type->pm, state);
1096 goto Run;
1097 }
1098
1099 if (dev->class) {
1100 if (dev->class->pm) {
1101 info = "class ";
1102 callback = pm_op(dev->class->pm, state);
1103 goto Run;
1104 } else if (dev->class->suspend) {
1105 pm_dev_dbg(dev, state, "legacy class ");
1106 error = legacy_suspend(dev, state, dev->class->suspend);
1107 goto End;
1108 }
1109 }
1110
1111 if (dev->bus) {
1112 if (dev->bus->pm) {
1113 info = "bus ";
1114 callback = pm_op(dev->bus->pm, state);
1115 } else if (dev->bus->suspend) {
1116 pm_dev_dbg(dev, state, "legacy bus ");
1117 error = legacy_suspend(dev, state, dev->bus->suspend);
1118 goto End;
1119 }
1120 }
1121
1122 Run:
1123 if (!callback && dev->driver && dev->driver->pm) {
1124 info = "driver ";
1125 callback = pm_op(dev->driver->pm, state);
1126 }
1127
1128 error = dpm_run_callback(callback, dev, state, info);
1129
1130 End:
1131 if (!error) {
1132 dev->power.is_suspended = true;
1133 if (dev->power.wakeup_path
1134 && dev->parent && !dev->parent->power.ignore_children)
1135 dev->parent->power.wakeup_path = true;
1136 }
1137
1138 device_unlock(dev);
1139
1140 Complete:
1141 complete_all(&dev->power.completion);
1142 if (error)
1143 async_error = error;
1144
1145 return error;
1146}
1147
1148static void async_suspend(void *data, async_cookie_t cookie)
1149{
1150 struct device *dev = (struct device *)data;
1151 int error;
1152
1153 error = __device_suspend(dev, pm_transition, true);
1154 if (error) {
1155 dpm_save_failed_dev(dev_name(dev));
1156 pm_dev_err(dev, pm_transition, " async", error);
1157 }
1158
1159 put_device(dev);
1160}
1161
1162static int device_suspend(struct device *dev)
1163{
1164 INIT_COMPLETION(dev->power.completion);
1165
1166 if (pm_async_enabled && dev->power.async_suspend) {
1167 get_device(dev);
1168 async_schedule(async_suspend, dev);
1169 return 0;
1170 }
1171
1172 return __device_suspend(dev, pm_transition, false);
1173}
1174
1175
1176
1177
1178
1179int dpm_suspend(pm_message_t state)
1180{
1181 ktime_t starttime = ktime_get();
1182 int error = 0;
1183
1184 might_sleep();
1185
1186 cpufreq_suspend();
1187
1188 mutex_lock(&dpm_list_mtx);
1189 pm_transition = state;
1190 async_error = 0;
1191 while (!list_empty(&dpm_prepared_list)) {
1192 struct device *dev = to_device(dpm_prepared_list.prev);
1193
1194 get_device(dev);
1195 mutex_unlock(&dpm_list_mtx);
1196
1197 error = device_suspend(dev);
1198
1199 mutex_lock(&dpm_list_mtx);
1200 if (error) {
1201 pm_dev_err(dev, state, "", error);
1202 dpm_save_failed_dev(dev_name(dev));
1203 put_device(dev);
1204 break;
1205 }
1206 if (!list_empty(&dev->power.entry))
1207 list_move(&dev->power.entry, &dpm_suspended_list);
1208 put_device(dev);
1209 if (async_error)
1210 break;
1211 }
1212 mutex_unlock(&dpm_list_mtx);
1213 async_synchronize_full();
1214 if (!error)
1215 error = async_error;
1216 if (error) {
1217 suspend_stats.failed_suspend++;
1218 dpm_save_failed_step(SUSPEND_SUSPEND);
1219 } else
1220 dpm_show_time(starttime, state, NULL);
1221 return error;
1222}
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232static int device_prepare(struct device *dev, pm_message_t state)
1233{
1234 int (*callback)(struct device *) = NULL;
1235 char *info = NULL;
1236 int error = 0;
1237
1238 if (dev->power.syscore)
1239 return 0;
1240
1241
1242
1243
1244
1245
1246
1247 pm_runtime_get_noresume(dev);
1248
1249 device_lock(dev);
1250
1251 dev->power.wakeup_path = device_may_wakeup(dev);
1252
1253 if (dev->pm_domain) {
1254 info = "preparing power domain ";
1255 callback = dev->pm_domain->ops.prepare;
1256 } else if (dev->type && dev->type->pm) {
1257 info = "preparing type ";
1258 callback = dev->type->pm->prepare;
1259 } else if (dev->class && dev->class->pm) {
1260 info = "preparing class ";
1261 callback = dev->class->pm->prepare;
1262 } else if (dev->bus && dev->bus->pm) {
1263 info = "preparing bus ";
1264 callback = dev->bus->pm->prepare;
1265 }
1266
1267 if (!callback && dev->driver && dev->driver->pm) {
1268 info = "preparing driver ";
1269 callback = dev->driver->pm->prepare;
1270 }
1271
1272 if (callback) {
1273 error = callback(dev);
1274 suspend_report_result(callback, error);
1275 }
1276
1277 device_unlock(dev);
1278
1279 return error;
1280}
1281
1282
1283
1284
1285
1286
1287
1288int dpm_prepare(pm_message_t state)
1289{
1290 int error = 0;
1291
1292 might_sleep();
1293
1294
1295
1296
1297
1298
1299 wait_for_device_probe();
1300
1301
1302
1303
1304
1305
1306 device_block_probing();
1307
1308 mutex_lock(&dpm_list_mtx);
1309 while (!list_empty(&dpm_list)) {
1310 struct device *dev = to_device(dpm_list.next);
1311
1312 get_device(dev);
1313 mutex_unlock(&dpm_list_mtx);
1314
1315 error = device_prepare(dev, state);
1316
1317 mutex_lock(&dpm_list_mtx);
1318 if (error) {
1319 if (error == -EAGAIN) {
1320 put_device(dev);
1321 error = 0;
1322 continue;
1323 }
1324 printk(KERN_INFO "PM: Device %s not prepared "
1325 "for power transition: code %d\n",
1326 dev_name(dev), error);
1327 put_device(dev);
1328 break;
1329 }
1330 dev->power.is_prepared = true;
1331 if (!list_empty(&dev->power.entry))
1332 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1333 put_device(dev);
1334 }
1335 mutex_unlock(&dpm_list_mtx);
1336 return error;
1337}
1338
1339
1340
1341
1342
1343
1344
1345
1346int dpm_suspend_start(pm_message_t state)
1347{
1348 int error;
1349
1350 error = dpm_prepare(state);
1351 if (error) {
1352 suspend_stats.failed_prepare++;
1353 dpm_save_failed_step(SUSPEND_PREPARE);
1354 } else
1355 error = dpm_suspend(state);
1356 return error;
1357}
1358EXPORT_SYMBOL_GPL(dpm_suspend_start);
1359
1360void __suspend_report_result(const char *function, void *fn, int ret)
1361{
1362 if (ret)
1363 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1364}
1365EXPORT_SYMBOL_GPL(__suspend_report_result);
1366
1367
1368
1369
1370
1371
1372int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1373{
1374 dpm_wait(dev, subordinate->power.async_suspend);
1375 return async_error;
1376}
1377EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1388{
1389 struct device *dev;
1390
1391 if (!fn)
1392 return;
1393
1394 device_pm_lock();
1395 list_for_each_entry(dev, &dpm_list, power.entry)
1396 fn(dev, data);
1397 device_pm_unlock();
1398}
1399EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1400