1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/device.h>
21#include <linux/kallsyms.h>
22#include <linux/export.h>
23#include <linux/mutex.h>
24#include <linux/pm.h>
25#include <linux/pm_runtime.h>
26#include <linux/resume-trace.h>
27#include <linux/interrupt.h>
28#include <linux/sched.h>
29#include <linux/async.h>
30#include <linux/suspend.h>
31#include <trace/events/power.h>
32#include <linux/cpuidle.h>
33#include "../base.h"
34#include "power.h"
35
36typedef int (*pm_callback_t)(struct device *);
37
38
39
40
41
42
43
44
45
46
47
48LIST_HEAD(dpm_list);
49static LIST_HEAD(dpm_prepared_list);
50static LIST_HEAD(dpm_suspended_list);
51static LIST_HEAD(dpm_late_early_list);
52static LIST_HEAD(dpm_noirq_list);
53
54struct suspend_stats suspend_stats;
55static DEFINE_MUTEX(dpm_list_mtx);
56static pm_message_t pm_transition;
57
58static int async_error;
59
60static char *pm_verb(int event)
61{
62 switch (event) {
63 case PM_EVENT_SUSPEND:
64 return "suspend";
65 case PM_EVENT_RESUME:
66 return "resume";
67 case PM_EVENT_FREEZE:
68 return "freeze";
69 case PM_EVENT_QUIESCE:
70 return "quiesce";
71 case PM_EVENT_HIBERNATE:
72 return "hibernate";
73 case PM_EVENT_THAW:
74 return "thaw";
75 case PM_EVENT_RESTORE:
76 return "restore";
77 case PM_EVENT_RECOVER:
78 return "recover";
79 default:
80 return "(unknown PM event)";
81 }
82}
83
84
85
86
87
88void device_pm_sleep_init(struct device *dev)
89{
90 dev->power.is_prepared = false;
91 dev->power.is_suspended = false;
92 init_completion(&dev->power.completion);
93 complete_all(&dev->power.completion);
94 dev->power.wakeup = NULL;
95 INIT_LIST_HEAD(&dev->power.entry);
96}
97
98
99
100
101void device_pm_lock(void)
102{
103 mutex_lock(&dpm_list_mtx);
104}
105
106
107
108
109void device_pm_unlock(void)
110{
111 mutex_unlock(&dpm_list_mtx);
112}
113
114
115
116
117
118void device_pm_add(struct device *dev)
119{
120 pr_debug("PM: Adding info for %s:%s\n",
121 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
122 mutex_lock(&dpm_list_mtx);
123 if (dev->parent && dev->parent->power.is_prepared)
124 dev_warn(dev, "parent %s should not be sleeping\n",
125 dev_name(dev->parent));
126 list_add_tail(&dev->power.entry, &dpm_list);
127 mutex_unlock(&dpm_list_mtx);
128}
129
130
131
132
133
134void device_pm_remove(struct device *dev)
135{
136 pr_debug("PM: Removing info for %s:%s\n",
137 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
138 complete_all(&dev->power.completion);
139 mutex_lock(&dpm_list_mtx);
140 list_del_init(&dev->power.entry);
141 mutex_unlock(&dpm_list_mtx);
142 device_wakeup_disable(dev);
143 pm_runtime_remove(dev);
144}
145
146
147
148
149
150
151void device_pm_move_before(struct device *deva, struct device *devb)
152{
153 pr_debug("PM: Moving %s:%s before %s:%s\n",
154 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
155 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
156
157 list_move_tail(&deva->power.entry, &devb->power.entry);
158}
159
160
161
162
163
164
165void device_pm_move_after(struct device *deva, struct device *devb)
166{
167 pr_debug("PM: Moving %s:%s after %s:%s\n",
168 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
169 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
170
171 list_move(&deva->power.entry, &devb->power.entry);
172}
173
174
175
176
177
178void device_pm_move_last(struct device *dev)
179{
180 pr_debug("PM: Moving %s:%s to end of list\n",
181 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
182 list_move_tail(&dev->power.entry, &dpm_list);
183}
184
185static ktime_t initcall_debug_start(struct device *dev)
186{
187 ktime_t calltime = ktime_set(0, 0);
188
189 if (pm_print_times_enabled) {
190 pr_info("calling %s+ @ %i, parent: %s\n",
191 dev_name(dev), task_pid_nr(current),
192 dev->parent ? dev_name(dev->parent) : "none");
193 calltime = ktime_get();
194 }
195
196 return calltime;
197}
198
199static void initcall_debug_report(struct device *dev, ktime_t calltime,
200 int error, pm_message_t state, char *info)
201{
202 ktime_t rettime;
203 s64 nsecs;
204
205 rettime = ktime_get();
206 nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
207
208 if (pm_print_times_enabled) {
209 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
210 error, (unsigned long long)nsecs >> 10);
211 }
212
213 trace_device_pm_report_time(dev, info, nsecs, pm_verb(state.event),
214 error);
215}
216
217
218
219
220
221
222static void dpm_wait(struct device *dev, bool async)
223{
224 if (!dev)
225 return;
226
227 if (async || (pm_async_enabled && dev->power.async_suspend))
228 wait_for_completion(&dev->power.completion);
229}
230
231static int dpm_wait_fn(struct device *dev, void *async_ptr)
232{
233 dpm_wait(dev, *((bool *)async_ptr));
234 return 0;
235}
236
237static void dpm_wait_for_children(struct device *dev, bool async)
238{
239 device_for_each_child(dev, &async, dpm_wait_fn);
240}
241
242
243
244
245
246
247static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
248{
249 switch (state.event) {
250#ifdef CONFIG_SUSPEND
251 case PM_EVENT_SUSPEND:
252 return ops->suspend;
253 case PM_EVENT_RESUME:
254 return ops->resume;
255#endif
256#ifdef CONFIG_HIBERNATE_CALLBACKS
257 case PM_EVENT_FREEZE:
258 case PM_EVENT_QUIESCE:
259 return ops->freeze;
260 case PM_EVENT_HIBERNATE:
261 return ops->poweroff;
262 case PM_EVENT_THAW:
263 case PM_EVENT_RECOVER:
264 return ops->thaw;
265 break;
266 case PM_EVENT_RESTORE:
267 return ops->restore;
268#endif
269 }
270
271 return NULL;
272}
273
274
275
276
277
278
279
280
281static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
282 pm_message_t state)
283{
284 switch (state.event) {
285#ifdef CONFIG_SUSPEND
286 case PM_EVENT_SUSPEND:
287 return ops->suspend_late;
288 case PM_EVENT_RESUME:
289 return ops->resume_early;
290#endif
291#ifdef CONFIG_HIBERNATE_CALLBACKS
292 case PM_EVENT_FREEZE:
293 case PM_EVENT_QUIESCE:
294 return ops->freeze_late;
295 case PM_EVENT_HIBERNATE:
296 return ops->poweroff_late;
297 case PM_EVENT_THAW:
298 case PM_EVENT_RECOVER:
299 return ops->thaw_early;
300 case PM_EVENT_RESTORE:
301 return ops->restore_early;
302#endif
303 }
304
305 return NULL;
306}
307
308
309
310
311
312
313
314
315
316static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
317{
318 switch (state.event) {
319#ifdef CONFIG_SUSPEND
320 case PM_EVENT_SUSPEND:
321 return ops->suspend_noirq;
322 case PM_EVENT_RESUME:
323 return ops->resume_noirq;
324#endif
325#ifdef CONFIG_HIBERNATE_CALLBACKS
326 case PM_EVENT_FREEZE:
327 case PM_EVENT_QUIESCE:
328 return ops->freeze_noirq;
329 case PM_EVENT_HIBERNATE:
330 return ops->poweroff_noirq;
331 case PM_EVENT_THAW:
332 case PM_EVENT_RECOVER:
333 return ops->thaw_noirq;
334 case PM_EVENT_RESTORE:
335 return ops->restore_noirq;
336#endif
337 }
338
339 return NULL;
340}
341
342static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
343{
344 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
345 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
346 ", may wakeup" : "");
347}
348
349static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
350 int error)
351{
352 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
353 dev_name(dev), pm_verb(state.event), info, error);
354}
355
356static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
357{
358 ktime_t calltime;
359 u64 usecs64;
360 int usecs;
361
362 calltime = ktime_get();
363 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
364 do_div(usecs64, NSEC_PER_USEC);
365 usecs = usecs64;
366 if (usecs == 0)
367 usecs = 1;
368 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
369 info ?: "", info ? " " : "", pm_verb(state.event),
370 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
371}
372
373static int dpm_run_callback(pm_callback_t cb, struct device *dev,
374 pm_message_t state, char *info)
375{
376 ktime_t calltime;
377 int error;
378
379 if (!cb)
380 return 0;
381
382 calltime = initcall_debug_start(dev);
383
384 pm_dev_dbg(dev, state, info);
385 error = cb(dev);
386 suspend_report_result(cb, error);
387
388 initcall_debug_report(dev, calltime, error, state, info);
389
390 return error;
391}
392
393
394
395
396
397
398
399
400
401
402
403static int device_resume_noirq(struct device *dev, pm_message_t state)
404{
405 pm_callback_t callback = NULL;
406 char *info = NULL;
407 int error = 0;
408
409 TRACE_DEVICE(dev);
410 TRACE_RESUME(0);
411
412 if (dev->power.syscore)
413 goto Out;
414
415 if (dev->pm_domain) {
416 info = "noirq power domain ";
417 callback = pm_noirq_op(&dev->pm_domain->ops, state);
418 } else if (dev->type && dev->type->pm) {
419 info = "noirq type ";
420 callback = pm_noirq_op(dev->type->pm, state);
421 } else if (dev->class && dev->class->pm) {
422 info = "noirq class ";
423 callback = pm_noirq_op(dev->class->pm, state);
424 } else if (dev->bus && dev->bus->pm) {
425 info = "noirq bus ";
426 callback = pm_noirq_op(dev->bus->pm, state);
427 }
428
429 if (!callback && dev->driver && dev->driver->pm) {
430 info = "noirq driver ";
431 callback = pm_noirq_op(dev->driver->pm, state);
432 }
433
434 error = dpm_run_callback(callback, dev, state, info);
435
436 Out:
437 TRACE_RESUME(error);
438 return error;
439}
440
441
442
443
444
445
446
447
448static void dpm_resume_noirq(pm_message_t state)
449{
450 ktime_t starttime = ktime_get();
451
452 mutex_lock(&dpm_list_mtx);
453 while (!list_empty(&dpm_noirq_list)) {
454 struct device *dev = to_device(dpm_noirq_list.next);
455 int error;
456
457 get_device(dev);
458 list_move_tail(&dev->power.entry, &dpm_late_early_list);
459 mutex_unlock(&dpm_list_mtx);
460
461 error = device_resume_noirq(dev, state);
462 if (error) {
463 suspend_stats.failed_resume_noirq++;
464 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
465 dpm_save_failed_dev(dev_name(dev));
466 pm_dev_err(dev, state, " noirq", error);
467 }
468
469 mutex_lock(&dpm_list_mtx);
470 put_device(dev);
471 }
472 mutex_unlock(&dpm_list_mtx);
473 dpm_show_time(starttime, state, "noirq");
474 resume_device_irqs();
475 cpuidle_resume();
476}
477
478
479
480
481
482
483
484
485static int device_resume_early(struct device *dev, pm_message_t state)
486{
487 pm_callback_t callback = NULL;
488 char *info = NULL;
489 int error = 0;
490
491 TRACE_DEVICE(dev);
492 TRACE_RESUME(0);
493
494 if (dev->power.syscore)
495 goto Out;
496
497 if (dev->pm_domain) {
498 info = "early power domain ";
499 callback = pm_late_early_op(&dev->pm_domain->ops, state);
500 } else if (dev->type && dev->type->pm) {
501 info = "early type ";
502 callback = pm_late_early_op(dev->type->pm, state);
503 } else if (dev->class && dev->class->pm) {
504 info = "early class ";
505 callback = pm_late_early_op(dev->class->pm, state);
506 } else if (dev->bus && dev->bus->pm) {
507 info = "early bus ";
508 callback = pm_late_early_op(dev->bus->pm, state);
509 }
510
511 if (!callback && dev->driver && dev->driver->pm) {
512 info = "early driver ";
513 callback = pm_late_early_op(dev->driver->pm, state);
514 }
515
516 error = dpm_run_callback(callback, dev, state, info);
517
518 Out:
519 TRACE_RESUME(error);
520
521 pm_runtime_enable(dev);
522 return error;
523}
524
525
526
527
528
529static void dpm_resume_early(pm_message_t state)
530{
531 ktime_t starttime = ktime_get();
532
533 mutex_lock(&dpm_list_mtx);
534 while (!list_empty(&dpm_late_early_list)) {
535 struct device *dev = to_device(dpm_late_early_list.next);
536 int error;
537
538 get_device(dev);
539 list_move_tail(&dev->power.entry, &dpm_suspended_list);
540 mutex_unlock(&dpm_list_mtx);
541
542 error = device_resume_early(dev, state);
543 if (error) {
544 suspend_stats.failed_resume_early++;
545 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
546 dpm_save_failed_dev(dev_name(dev));
547 pm_dev_err(dev, state, " early", error);
548 }
549
550 mutex_lock(&dpm_list_mtx);
551 put_device(dev);
552 }
553 mutex_unlock(&dpm_list_mtx);
554 dpm_show_time(starttime, state, "early");
555}
556
557
558
559
560
561void dpm_resume_start(pm_message_t state)
562{
563 dpm_resume_noirq(state);
564 dpm_resume_early(state);
565}
566EXPORT_SYMBOL_GPL(dpm_resume_start);
567
568
569
570
571
572
573
574static int device_resume(struct device *dev, pm_message_t state, bool async)
575{
576 pm_callback_t callback = NULL;
577 char *info = NULL;
578 int error = 0;
579
580 TRACE_DEVICE(dev);
581 TRACE_RESUME(0);
582
583 if (dev->power.syscore)
584 goto Complete;
585
586 dpm_wait(dev->parent, async);
587 device_lock(dev);
588
589
590
591
592
593 dev->power.is_prepared = false;
594
595 if (!dev->power.is_suspended)
596 goto Unlock;
597
598 if (dev->pm_domain) {
599 info = "power domain ";
600 callback = pm_op(&dev->pm_domain->ops, state);
601 goto Driver;
602 }
603
604 if (dev->type && dev->type->pm) {
605 info = "type ";
606 callback = pm_op(dev->type->pm, state);
607 goto Driver;
608 }
609
610 if (dev->class) {
611 if (dev->class->pm) {
612 info = "class ";
613 callback = pm_op(dev->class->pm, state);
614 goto Driver;
615 } else if (dev->class->resume) {
616 info = "legacy class ";
617 callback = dev->class->resume;
618 goto End;
619 }
620 }
621
622 if (dev->bus) {
623 if (dev->bus->pm) {
624 info = "bus ";
625 callback = pm_op(dev->bus->pm, state);
626 } else if (dev->bus->resume) {
627 info = "legacy bus ";
628 callback = dev->bus->resume;
629 goto End;
630 }
631 }
632
633 Driver:
634 if (!callback && dev->driver && dev->driver->pm) {
635 info = "driver ";
636 callback = pm_op(dev->driver->pm, state);
637 }
638
639 End:
640 error = dpm_run_callback(callback, dev, state, info);
641 dev->power.is_suspended = false;
642
643 Unlock:
644 device_unlock(dev);
645
646 Complete:
647 complete_all(&dev->power.completion);
648
649 TRACE_RESUME(error);
650
651 return error;
652}
653
654static void async_resume(void *data, async_cookie_t cookie)
655{
656 struct device *dev = (struct device *)data;
657 int error;
658
659 error = device_resume(dev, pm_transition, true);
660 if (error)
661 pm_dev_err(dev, pm_transition, " async", error);
662 put_device(dev);
663}
664
665static bool is_async(struct device *dev)
666{
667 return dev->power.async_suspend && pm_async_enabled
668 && !pm_trace_is_enabled();
669}
670
671
672
673
674
675
676
677
678void dpm_resume(pm_message_t state)
679{
680 struct device *dev;
681 ktime_t starttime = ktime_get();
682
683 might_sleep();
684
685 mutex_lock(&dpm_list_mtx);
686 pm_transition = state;
687 async_error = 0;
688
689 list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
690 INIT_COMPLETION(dev->power.completion);
691 if (is_async(dev)) {
692 get_device(dev);
693 async_schedule(async_resume, dev);
694 }
695 }
696
697 while (!list_empty(&dpm_suspended_list)) {
698 dev = to_device(dpm_suspended_list.next);
699 get_device(dev);
700 if (!is_async(dev)) {
701 int error;
702
703 mutex_unlock(&dpm_list_mtx);
704
705 error = device_resume(dev, state, false);
706 if (error) {
707 suspend_stats.failed_resume++;
708 dpm_save_failed_step(SUSPEND_RESUME);
709 dpm_save_failed_dev(dev_name(dev));
710 pm_dev_err(dev, state, "", error);
711 }
712
713 mutex_lock(&dpm_list_mtx);
714 }
715 if (!list_empty(&dev->power.entry))
716 list_move_tail(&dev->power.entry, &dpm_prepared_list);
717 put_device(dev);
718 }
719 mutex_unlock(&dpm_list_mtx);
720 async_synchronize_full();
721 dpm_show_time(starttime, state, NULL);
722}
723
724
725
726
727
728
729static void device_complete(struct device *dev, pm_message_t state)
730{
731 void (*callback)(struct device *) = NULL;
732 char *info = NULL;
733
734 if (dev->power.syscore)
735 return;
736
737 device_lock(dev);
738
739 if (dev->pm_domain) {
740 info = "completing power domain ";
741 callback = dev->pm_domain->ops.complete;
742 } else if (dev->type && dev->type->pm) {
743 info = "completing type ";
744 callback = dev->type->pm->complete;
745 } else if (dev->class && dev->class->pm) {
746 info = "completing class ";
747 callback = dev->class->pm->complete;
748 } else if (dev->bus && dev->bus->pm) {
749 info = "completing bus ";
750 callback = dev->bus->pm->complete;
751 }
752
753 if (!callback && dev->driver && dev->driver->pm) {
754 info = "completing driver ";
755 callback = dev->driver->pm->complete;
756 }
757
758 if (callback) {
759 pm_dev_dbg(dev, state, info);
760 callback(dev);
761 }
762
763 device_unlock(dev);
764
765 pm_runtime_put(dev);
766}
767
768
769
770
771
772
773
774
775void dpm_complete(pm_message_t state)
776{
777 struct list_head list;
778
779 might_sleep();
780
781 INIT_LIST_HEAD(&list);
782 mutex_lock(&dpm_list_mtx);
783 while (!list_empty(&dpm_prepared_list)) {
784 struct device *dev = to_device(dpm_prepared_list.prev);
785
786 get_device(dev);
787 dev->power.is_prepared = false;
788 list_move(&dev->power.entry, &list);
789 mutex_unlock(&dpm_list_mtx);
790
791 device_complete(dev, state);
792
793 mutex_lock(&dpm_list_mtx);
794 put_device(dev);
795 }
796 list_splice(&list, &dpm_list);
797 mutex_unlock(&dpm_list_mtx);
798}
799
800
801
802
803
804
805
806
807void dpm_resume_end(pm_message_t state)
808{
809 dpm_resume(state);
810 dpm_complete(state);
811}
812EXPORT_SYMBOL_GPL(dpm_resume_end);
813
814
815
816
817
818
819
820
821
822
823
824static pm_message_t resume_event(pm_message_t sleep_state)
825{
826 switch (sleep_state.event) {
827 case PM_EVENT_SUSPEND:
828 return PMSG_RESUME;
829 case PM_EVENT_FREEZE:
830 case PM_EVENT_QUIESCE:
831 return PMSG_RECOVER;
832 case PM_EVENT_HIBERNATE:
833 return PMSG_RESTORE;
834 }
835 return PMSG_ON;
836}
837
838
839
840
841
842
843
844
845
846static int device_suspend_noirq(struct device *dev, pm_message_t state)
847{
848 pm_callback_t callback = NULL;
849 char *info = NULL;
850
851 if (dev->power.syscore)
852 return 0;
853
854 if (dev->pm_domain) {
855 info = "noirq power domain ";
856 callback = pm_noirq_op(&dev->pm_domain->ops, state);
857 } else if (dev->type && dev->type->pm) {
858 info = "noirq type ";
859 callback = pm_noirq_op(dev->type->pm, state);
860 } else if (dev->class && dev->class->pm) {
861 info = "noirq class ";
862 callback = pm_noirq_op(dev->class->pm, state);
863 } else if (dev->bus && dev->bus->pm) {
864 info = "noirq bus ";
865 callback = pm_noirq_op(dev->bus->pm, state);
866 }
867
868 if (!callback && dev->driver && dev->driver->pm) {
869 info = "noirq driver ";
870 callback = pm_noirq_op(dev->driver->pm, state);
871 }
872
873 return dpm_run_callback(callback, dev, state, info);
874}
875
876
877
878
879
880
881
882
883static int dpm_suspend_noirq(pm_message_t state)
884{
885 ktime_t starttime = ktime_get();
886 int error = 0;
887
888 cpuidle_pause();
889 suspend_device_irqs();
890 mutex_lock(&dpm_list_mtx);
891 while (!list_empty(&dpm_late_early_list)) {
892 struct device *dev = to_device(dpm_late_early_list.prev);
893
894 get_device(dev);
895 mutex_unlock(&dpm_list_mtx);
896
897 error = device_suspend_noirq(dev, state);
898
899 mutex_lock(&dpm_list_mtx);
900 if (error) {
901 pm_dev_err(dev, state, " noirq", error);
902 suspend_stats.failed_suspend_noirq++;
903 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
904 dpm_save_failed_dev(dev_name(dev));
905 put_device(dev);
906 break;
907 }
908 if (!list_empty(&dev->power.entry))
909 list_move(&dev->power.entry, &dpm_noirq_list);
910 put_device(dev);
911
912 if (pm_wakeup_pending()) {
913 error = -EBUSY;
914 break;
915 }
916 }
917 mutex_unlock(&dpm_list_mtx);
918 if (error)
919 dpm_resume_noirq(resume_event(state));
920 else
921 dpm_show_time(starttime, state, "noirq");
922 return error;
923}
924
925
926
927
928
929
930
931
932static int device_suspend_late(struct device *dev, pm_message_t state)
933{
934 pm_callback_t callback = NULL;
935 char *info = NULL;
936
937 __pm_runtime_disable(dev, false);
938
939 if (dev->power.syscore)
940 return 0;
941
942 if (dev->pm_domain) {
943 info = "late power domain ";
944 callback = pm_late_early_op(&dev->pm_domain->ops, state);
945 } else if (dev->type && dev->type->pm) {
946 info = "late type ";
947 callback = pm_late_early_op(dev->type->pm, state);
948 } else if (dev->class && dev->class->pm) {
949 info = "late class ";
950 callback = pm_late_early_op(dev->class->pm, state);
951 } else if (dev->bus && dev->bus->pm) {
952 info = "late bus ";
953 callback = pm_late_early_op(dev->bus->pm, state);
954 }
955
956 if (!callback && dev->driver && dev->driver->pm) {
957 info = "late driver ";
958 callback = pm_late_early_op(dev->driver->pm, state);
959 }
960
961 return dpm_run_callback(callback, dev, state, info);
962}
963
964
965
966
967
968static int dpm_suspend_late(pm_message_t state)
969{
970 ktime_t starttime = ktime_get();
971 int error = 0;
972
973 mutex_lock(&dpm_list_mtx);
974 while (!list_empty(&dpm_suspended_list)) {
975 struct device *dev = to_device(dpm_suspended_list.prev);
976
977 get_device(dev);
978 mutex_unlock(&dpm_list_mtx);
979
980 error = device_suspend_late(dev, state);
981
982 mutex_lock(&dpm_list_mtx);
983 if (error) {
984 pm_dev_err(dev, state, " late", error);
985 suspend_stats.failed_suspend_late++;
986 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
987 dpm_save_failed_dev(dev_name(dev));
988 put_device(dev);
989 break;
990 }
991 if (!list_empty(&dev->power.entry))
992 list_move(&dev->power.entry, &dpm_late_early_list);
993 put_device(dev);
994
995 if (pm_wakeup_pending()) {
996 error = -EBUSY;
997 break;
998 }
999 }
1000 mutex_unlock(&dpm_list_mtx);
1001 if (error)
1002 dpm_resume_early(resume_event(state));
1003 else
1004 dpm_show_time(starttime, state, "late");
1005
1006 return error;
1007}
1008
1009
1010
1011
1012
1013int dpm_suspend_end(pm_message_t state)
1014{
1015 int error = dpm_suspend_late(state);
1016 if (error)
1017 return error;
1018
1019 error = dpm_suspend_noirq(state);
1020 if (error) {
1021 dpm_resume_early(resume_event(state));
1022 return error;
1023 }
1024
1025 return 0;
1026}
1027EXPORT_SYMBOL_GPL(dpm_suspend_end);
1028
1029
1030
1031
1032
1033
1034
1035static int legacy_suspend(struct device *dev, pm_message_t state,
1036 int (*cb)(struct device *dev, pm_message_t state),
1037 char *info)
1038{
1039 int error;
1040 ktime_t calltime;
1041
1042 calltime = initcall_debug_start(dev);
1043
1044 error = cb(dev, state);
1045 suspend_report_result(cb, error);
1046
1047 initcall_debug_report(dev, calltime, error, state, info);
1048
1049 return error;
1050}
1051
1052
1053
1054
1055
1056
1057
1058static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1059{
1060 pm_callback_t callback = NULL;
1061 char *info = NULL;
1062 int error = 0;
1063
1064 dpm_wait_for_children(dev, async);
1065
1066 if (async_error)
1067 goto Complete;
1068
1069
1070
1071
1072
1073
1074
1075 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1076 pm_wakeup_event(dev, 0);
1077
1078 if (pm_wakeup_pending()) {
1079 async_error = -EBUSY;
1080 goto Complete;
1081 }
1082
1083 if (dev->power.syscore)
1084 goto Complete;
1085
1086 device_lock(dev);
1087
1088 if (dev->pm_domain) {
1089 info = "power domain ";
1090 callback = pm_op(&dev->pm_domain->ops, state);
1091 goto Run;
1092 }
1093
1094 if (dev->type && dev->type->pm) {
1095 info = "type ";
1096 callback = pm_op(dev->type->pm, state);
1097 goto Run;
1098 }
1099
1100 if (dev->class) {
1101 if (dev->class->pm) {
1102 info = "class ";
1103 callback = pm_op(dev->class->pm, state);
1104 goto Run;
1105 } else if (dev->class->suspend) {
1106 pm_dev_dbg(dev, state, "legacy class ");
1107 error = legacy_suspend(dev, state, dev->class->suspend,
1108 "legacy class ");
1109 goto End;
1110 }
1111 }
1112
1113 if (dev->bus) {
1114 if (dev->bus->pm) {
1115 info = "bus ";
1116 callback = pm_op(dev->bus->pm, state);
1117 } else if (dev->bus->suspend) {
1118 pm_dev_dbg(dev, state, "legacy bus ");
1119 error = legacy_suspend(dev, state, dev->bus->suspend,
1120 "legacy bus ");
1121 goto End;
1122 }
1123 }
1124
1125 Run:
1126 if (!callback && dev->driver && dev->driver->pm) {
1127 info = "driver ";
1128 callback = pm_op(dev->driver->pm, state);
1129 }
1130
1131 error = dpm_run_callback(callback, dev, state, info);
1132
1133 End:
1134 if (!error) {
1135 dev->power.is_suspended = true;
1136 if (dev->power.wakeup_path
1137 && dev->parent && !dev->parent->power.ignore_children)
1138 dev->parent->power.wakeup_path = true;
1139 }
1140
1141 device_unlock(dev);
1142
1143 Complete:
1144 complete_all(&dev->power.completion);
1145 if (error)
1146 async_error = error;
1147
1148 return error;
1149}
1150
1151static void async_suspend(void *data, async_cookie_t cookie)
1152{
1153 struct device *dev = (struct device *)data;
1154 int error;
1155
1156 error = __device_suspend(dev, pm_transition, true);
1157 if (error) {
1158 dpm_save_failed_dev(dev_name(dev));
1159 pm_dev_err(dev, pm_transition, " async", error);
1160 }
1161
1162 put_device(dev);
1163}
1164
1165static int device_suspend(struct device *dev)
1166{
1167 INIT_COMPLETION(dev->power.completion);
1168
1169 if (pm_async_enabled && dev->power.async_suspend) {
1170 get_device(dev);
1171 async_schedule(async_suspend, dev);
1172 return 0;
1173 }
1174
1175 return __device_suspend(dev, pm_transition, false);
1176}
1177
1178
1179
1180
1181
1182int dpm_suspend(pm_message_t state)
1183{
1184 ktime_t starttime = ktime_get();
1185 int error = 0;
1186
1187 might_sleep();
1188
1189 mutex_lock(&dpm_list_mtx);
1190 pm_transition = state;
1191 async_error = 0;
1192 while (!list_empty(&dpm_prepared_list)) {
1193 struct device *dev = to_device(dpm_prepared_list.prev);
1194
1195 get_device(dev);
1196 mutex_unlock(&dpm_list_mtx);
1197
1198 error = device_suspend(dev);
1199
1200 mutex_lock(&dpm_list_mtx);
1201 if (error) {
1202 pm_dev_err(dev, state, "", error);
1203 dpm_save_failed_dev(dev_name(dev));
1204 put_device(dev);
1205 break;
1206 }
1207 if (!list_empty(&dev->power.entry))
1208 list_move(&dev->power.entry, &dpm_suspended_list);
1209 put_device(dev);
1210 if (async_error)
1211 break;
1212 }
1213 mutex_unlock(&dpm_list_mtx);
1214 async_synchronize_full();
1215 if (!error)
1216 error = async_error;
1217 if (error) {
1218 suspend_stats.failed_suspend++;
1219 dpm_save_failed_step(SUSPEND_SUSPEND);
1220 } else
1221 dpm_show_time(starttime, state, NULL);
1222 return error;
1223}
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233static int device_prepare(struct device *dev, pm_message_t state)
1234{
1235 int (*callback)(struct device *) = NULL;
1236 char *info = NULL;
1237 int error = 0;
1238
1239 if (dev->power.syscore)
1240 return 0;
1241
1242
1243
1244
1245
1246
1247
1248 pm_runtime_get_noresume(dev);
1249
1250 device_lock(dev);
1251
1252 dev->power.wakeup_path = device_may_wakeup(dev);
1253
1254 if (dev->pm_domain) {
1255 info = "preparing power domain ";
1256 callback = dev->pm_domain->ops.prepare;
1257 } else if (dev->type && dev->type->pm) {
1258 info = "preparing type ";
1259 callback = dev->type->pm->prepare;
1260 } else if (dev->class && dev->class->pm) {
1261 info = "preparing class ";
1262 callback = dev->class->pm->prepare;
1263 } else if (dev->bus && dev->bus->pm) {
1264 info = "preparing bus ";
1265 callback = dev->bus->pm->prepare;
1266 }
1267
1268 if (!callback && dev->driver && dev->driver->pm) {
1269 info = "preparing driver ";
1270 callback = dev->driver->pm->prepare;
1271 }
1272
1273 if (callback) {
1274 error = callback(dev);
1275 suspend_report_result(callback, error);
1276 }
1277
1278 device_unlock(dev);
1279
1280 return error;
1281}
1282
1283
1284
1285
1286
1287
1288
1289int dpm_prepare(pm_message_t state)
1290{
1291 int error = 0;
1292
1293 might_sleep();
1294
1295 mutex_lock(&dpm_list_mtx);
1296 while (!list_empty(&dpm_list)) {
1297 struct device *dev = to_device(dpm_list.next);
1298
1299 get_device(dev);
1300 mutex_unlock(&dpm_list_mtx);
1301
1302 error = device_prepare(dev, state);
1303
1304 mutex_lock(&dpm_list_mtx);
1305 if (error) {
1306 if (error == -EAGAIN) {
1307 put_device(dev);
1308 error = 0;
1309 continue;
1310 }
1311 printk(KERN_INFO "PM: Device %s not prepared "
1312 "for power transition: code %d\n",
1313 dev_name(dev), error);
1314 put_device(dev);
1315 break;
1316 }
1317 dev->power.is_prepared = true;
1318 if (!list_empty(&dev->power.entry))
1319 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1320 put_device(dev);
1321 }
1322 mutex_unlock(&dpm_list_mtx);
1323 return error;
1324}
1325
1326
1327
1328
1329
1330
1331
1332
1333int dpm_suspend_start(pm_message_t state)
1334{
1335 int error;
1336
1337 error = dpm_prepare(state);
1338 if (error) {
1339 suspend_stats.failed_prepare++;
1340 dpm_save_failed_step(SUSPEND_PREPARE);
1341 } else
1342 error = dpm_suspend(state);
1343 return error;
1344}
1345EXPORT_SYMBOL_GPL(dpm_suspend_start);
1346
1347void __suspend_report_result(const char *function, void *fn, int ret)
1348{
1349 if (ret)
1350 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1351}
1352EXPORT_SYMBOL_GPL(__suspend_report_result);
1353
1354
1355
1356
1357
1358
1359int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1360{
1361 dpm_wait(dev, subordinate->power.async_suspend);
1362 return async_error;
1363}
1364EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1375{
1376 struct device *dev;
1377
1378 if (!fn)
1379 return;
1380
1381 device_pm_lock();
1382 list_for_each_entry(dev, &dpm_list, power.entry)
1383 fn(dev, data);
1384 device_pm_unlock();
1385}
1386EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1387