1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/device.h>
21#include <linux/kallsyms.h>
22#include <linux/export.h>
23#include <linux/mutex.h>
24#include <linux/pm.h>
25#include <linux/pm_runtime.h>
26#include <linux/resume-trace.h>
27#include <linux/interrupt.h>
28#include <linux/sched.h>
29#include <linux/async.h>
30#include <linux/suspend.h>
31#include <linux/cpuidle.h>
32#include "../base.h"
33#include "power.h"
34
35typedef int (*pm_callback_t)(struct device *);
36
37
38
39
40
41
42
43
44
45
46
47LIST_HEAD(dpm_list);
48static LIST_HEAD(dpm_prepared_list);
49static LIST_HEAD(dpm_suspended_list);
50static LIST_HEAD(dpm_late_early_list);
51static LIST_HEAD(dpm_noirq_list);
52
53struct suspend_stats suspend_stats;
54static DEFINE_MUTEX(dpm_list_mtx);
55static pm_message_t pm_transition;
56
57static int async_error;
58
59
60
61
62
63void device_pm_sleep_init(struct device *dev)
64{
65 dev->power.is_prepared = false;
66 dev->power.is_suspended = false;
67 init_completion(&dev->power.completion);
68 complete_all(&dev->power.completion);
69 dev->power.wakeup = NULL;
70 INIT_LIST_HEAD(&dev->power.entry);
71}
72
73
74
75
76void device_pm_lock(void)
77{
78 mutex_lock(&dpm_list_mtx);
79}
80
81
82
83
84void device_pm_unlock(void)
85{
86 mutex_unlock(&dpm_list_mtx);
87}
88
89
90
91
92
93void device_pm_add(struct device *dev)
94{
95 pr_debug("PM: Adding info for %s:%s\n",
96 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
97 mutex_lock(&dpm_list_mtx);
98 if (dev->parent && dev->parent->power.is_prepared)
99 dev_warn(dev, "parent %s should not be sleeping\n",
100 dev_name(dev->parent));
101 list_add_tail(&dev->power.entry, &dpm_list);
102 mutex_unlock(&dpm_list_mtx);
103}
104
105
106
107
108
109void device_pm_remove(struct device *dev)
110{
111 pr_debug("PM: Removing info for %s:%s\n",
112 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
113 complete_all(&dev->power.completion);
114 mutex_lock(&dpm_list_mtx);
115 list_del_init(&dev->power.entry);
116 mutex_unlock(&dpm_list_mtx);
117 device_wakeup_disable(dev);
118 pm_runtime_remove(dev);
119}
120
121
122
123
124
125
126void device_pm_move_before(struct device *deva, struct device *devb)
127{
128 pr_debug("PM: Moving %s:%s before %s:%s\n",
129 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
130 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
131
132 list_move_tail(&deva->power.entry, &devb->power.entry);
133}
134
135
136
137
138
139
140void device_pm_move_after(struct device *deva, struct device *devb)
141{
142 pr_debug("PM: Moving %s:%s after %s:%s\n",
143 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
144 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
145
146 list_move(&deva->power.entry, &devb->power.entry);
147}
148
149
150
151
152
153void device_pm_move_last(struct device *dev)
154{
155 pr_debug("PM: Moving %s:%s to end of list\n",
156 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
157 list_move_tail(&dev->power.entry, &dpm_list);
158}
159
160static ktime_t initcall_debug_start(struct device *dev)
161{
162 ktime_t calltime = ktime_set(0, 0);
163
164 if (pm_print_times_enabled) {
165 pr_info("calling %s+ @ %i, parent: %s\n",
166 dev_name(dev), task_pid_nr(current),
167 dev->parent ? dev_name(dev->parent) : "none");
168 calltime = ktime_get();
169 }
170
171 return calltime;
172}
173
174static void initcall_debug_report(struct device *dev, ktime_t calltime,
175 int error)
176{
177 ktime_t delta, rettime;
178
179 if (pm_print_times_enabled) {
180 rettime = ktime_get();
181 delta = ktime_sub(rettime, calltime);
182 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
183 error, (unsigned long long)ktime_to_ns(delta) >> 10);
184 }
185}
186
187
188
189
190
191
192static void dpm_wait(struct device *dev, bool async)
193{
194 if (!dev)
195 return;
196
197 if (async || (pm_async_enabled && dev->power.async_suspend))
198 wait_for_completion(&dev->power.completion);
199}
200
201static int dpm_wait_fn(struct device *dev, void *async_ptr)
202{
203 dpm_wait(dev, *((bool *)async_ptr));
204 return 0;
205}
206
207static void dpm_wait_for_children(struct device *dev, bool async)
208{
209 device_for_each_child(dev, &async, dpm_wait_fn);
210}
211
212
213
214
215
216
217static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
218{
219 switch (state.event) {
220#ifdef CONFIG_SUSPEND
221 case PM_EVENT_SUSPEND:
222 return ops->suspend;
223 case PM_EVENT_RESUME:
224 return ops->resume;
225#endif
226#ifdef CONFIG_HIBERNATE_CALLBACKS
227 case PM_EVENT_FREEZE:
228 case PM_EVENT_QUIESCE:
229 return ops->freeze;
230 case PM_EVENT_HIBERNATE:
231 return ops->poweroff;
232 case PM_EVENT_THAW:
233 case PM_EVENT_RECOVER:
234 return ops->thaw;
235 break;
236 case PM_EVENT_RESTORE:
237 return ops->restore;
238#endif
239 }
240
241 return NULL;
242}
243
244
245
246
247
248
249
250
251static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
252 pm_message_t state)
253{
254 switch (state.event) {
255#ifdef CONFIG_SUSPEND
256 case PM_EVENT_SUSPEND:
257 return ops->suspend_late;
258 case PM_EVENT_RESUME:
259 return ops->resume_early;
260#endif
261#ifdef CONFIG_HIBERNATE_CALLBACKS
262 case PM_EVENT_FREEZE:
263 case PM_EVENT_QUIESCE:
264 return ops->freeze_late;
265 case PM_EVENT_HIBERNATE:
266 return ops->poweroff_late;
267 case PM_EVENT_THAW:
268 case PM_EVENT_RECOVER:
269 return ops->thaw_early;
270 case PM_EVENT_RESTORE:
271 return ops->restore_early;
272#endif
273 }
274
275 return NULL;
276}
277
278
279
280
281
282
283
284
285
286static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
287{
288 switch (state.event) {
289#ifdef CONFIG_SUSPEND
290 case PM_EVENT_SUSPEND:
291 return ops->suspend_noirq;
292 case PM_EVENT_RESUME:
293 return ops->resume_noirq;
294#endif
295#ifdef CONFIG_HIBERNATE_CALLBACKS
296 case PM_EVENT_FREEZE:
297 case PM_EVENT_QUIESCE:
298 return ops->freeze_noirq;
299 case PM_EVENT_HIBERNATE:
300 return ops->poweroff_noirq;
301 case PM_EVENT_THAW:
302 case PM_EVENT_RECOVER:
303 return ops->thaw_noirq;
304 case PM_EVENT_RESTORE:
305 return ops->restore_noirq;
306#endif
307 }
308
309 return NULL;
310}
311
312static char *pm_verb(int event)
313{
314 switch (event) {
315 case PM_EVENT_SUSPEND:
316 return "suspend";
317 case PM_EVENT_RESUME:
318 return "resume";
319 case PM_EVENT_FREEZE:
320 return "freeze";
321 case PM_EVENT_QUIESCE:
322 return "quiesce";
323 case PM_EVENT_HIBERNATE:
324 return "hibernate";
325 case PM_EVENT_THAW:
326 return "thaw";
327 case PM_EVENT_RESTORE:
328 return "restore";
329 case PM_EVENT_RECOVER:
330 return "recover";
331 default:
332 return "(unknown PM event)";
333 }
334}
335
336static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
337{
338 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
339 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
340 ", may wakeup" : "");
341}
342
343static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
344 int error)
345{
346 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
347 dev_name(dev), pm_verb(state.event), info, error);
348}
349
350static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
351{
352 ktime_t calltime;
353 u64 usecs64;
354 int usecs;
355
356 calltime = ktime_get();
357 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
358 do_div(usecs64, NSEC_PER_USEC);
359 usecs = usecs64;
360 if (usecs == 0)
361 usecs = 1;
362 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
363 info ?: "", info ? " " : "", pm_verb(state.event),
364 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
365}
366
367static int dpm_run_callback(pm_callback_t cb, struct device *dev,
368 pm_message_t state, char *info)
369{
370 ktime_t calltime;
371 int error;
372
373 if (!cb)
374 return 0;
375
376 calltime = initcall_debug_start(dev);
377
378 pm_dev_dbg(dev, state, info);
379 error = cb(dev);
380 suspend_report_result(cb, error);
381
382 initcall_debug_report(dev, calltime, error);
383
384 return error;
385}
386
387
388
389
390
391
392
393
394
395
396
397static int device_resume_noirq(struct device *dev, pm_message_t state)
398{
399 pm_callback_t callback = NULL;
400 char *info = NULL;
401 int error = 0;
402
403 TRACE_DEVICE(dev);
404 TRACE_RESUME(0);
405
406 if (dev->power.syscore)
407 goto Out;
408
409 if (dev->pm_domain) {
410 info = "noirq power domain ";
411 callback = pm_noirq_op(&dev->pm_domain->ops, state);
412 } else if (dev->type && dev->type->pm) {
413 info = "noirq type ";
414 callback = pm_noirq_op(dev->type->pm, state);
415 } else if (dev->class && dev->class->pm) {
416 info = "noirq class ";
417 callback = pm_noirq_op(dev->class->pm, state);
418 } else if (dev->bus && dev->bus->pm) {
419 info = "noirq bus ";
420 callback = pm_noirq_op(dev->bus->pm, state);
421 }
422
423 if (!callback && dev->driver && dev->driver->pm) {
424 info = "noirq driver ";
425 callback = pm_noirq_op(dev->driver->pm, state);
426 }
427
428 error = dpm_run_callback(callback, dev, state, info);
429
430 Out:
431 TRACE_RESUME(error);
432 return error;
433}
434
435
436
437
438
439
440
441
442static void dpm_resume_noirq(pm_message_t state)
443{
444 ktime_t starttime = ktime_get();
445
446 mutex_lock(&dpm_list_mtx);
447 while (!list_empty(&dpm_noirq_list)) {
448 struct device *dev = to_device(dpm_noirq_list.next);
449 int error;
450
451 get_device(dev);
452 list_move_tail(&dev->power.entry, &dpm_late_early_list);
453 mutex_unlock(&dpm_list_mtx);
454
455 error = device_resume_noirq(dev, state);
456 if (error) {
457 suspend_stats.failed_resume_noirq++;
458 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
459 dpm_save_failed_dev(dev_name(dev));
460 pm_dev_err(dev, state, " noirq", error);
461 }
462
463 mutex_lock(&dpm_list_mtx);
464 put_device(dev);
465 }
466 mutex_unlock(&dpm_list_mtx);
467 dpm_show_time(starttime, state, "noirq");
468 resume_device_irqs();
469 cpuidle_resume();
470}
471
472
473
474
475
476
477
478
479static int device_resume_early(struct device *dev, pm_message_t state)
480{
481 pm_callback_t callback = NULL;
482 char *info = NULL;
483 int error = 0;
484
485 TRACE_DEVICE(dev);
486 TRACE_RESUME(0);
487
488 if (dev->power.syscore)
489 goto Out;
490
491 if (dev->pm_domain) {
492 info = "early power domain ";
493 callback = pm_late_early_op(&dev->pm_domain->ops, state);
494 } else if (dev->type && dev->type->pm) {
495 info = "early type ";
496 callback = pm_late_early_op(dev->type->pm, state);
497 } else if (dev->class && dev->class->pm) {
498 info = "early class ";
499 callback = pm_late_early_op(dev->class->pm, state);
500 } else if (dev->bus && dev->bus->pm) {
501 info = "early bus ";
502 callback = pm_late_early_op(dev->bus->pm, state);
503 }
504
505 if (!callback && dev->driver && dev->driver->pm) {
506 info = "early driver ";
507 callback = pm_late_early_op(dev->driver->pm, state);
508 }
509
510 error = dpm_run_callback(callback, dev, state, info);
511
512 Out:
513 TRACE_RESUME(error);
514
515 pm_runtime_enable(dev);
516 return error;
517}
518
519
520
521
522
523static void dpm_resume_early(pm_message_t state)
524{
525 ktime_t starttime = ktime_get();
526
527 mutex_lock(&dpm_list_mtx);
528 while (!list_empty(&dpm_late_early_list)) {
529 struct device *dev = to_device(dpm_late_early_list.next);
530 int error;
531
532 get_device(dev);
533 list_move_tail(&dev->power.entry, &dpm_suspended_list);
534 mutex_unlock(&dpm_list_mtx);
535
536 error = device_resume_early(dev, state);
537 if (error) {
538 suspend_stats.failed_resume_early++;
539 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
540 dpm_save_failed_dev(dev_name(dev));
541 pm_dev_err(dev, state, " early", error);
542 }
543
544 mutex_lock(&dpm_list_mtx);
545 put_device(dev);
546 }
547 mutex_unlock(&dpm_list_mtx);
548 dpm_show_time(starttime, state, "early");
549}
550
551
552
553
554
555void dpm_resume_start(pm_message_t state)
556{
557 dpm_resume_noirq(state);
558 dpm_resume_early(state);
559}
560EXPORT_SYMBOL_GPL(dpm_resume_start);
561
562
563
564
565
566
567
568static int device_resume(struct device *dev, pm_message_t state, bool async)
569{
570 pm_callback_t callback = NULL;
571 char *info = NULL;
572 int error = 0;
573
574 TRACE_DEVICE(dev);
575 TRACE_RESUME(0);
576
577 if (dev->power.syscore)
578 goto Complete;
579
580 dpm_wait(dev->parent, async);
581 device_lock(dev);
582
583
584
585
586
587 dev->power.is_prepared = false;
588
589 if (!dev->power.is_suspended)
590 goto Unlock;
591
592 if (dev->pm_domain) {
593 info = "power domain ";
594 callback = pm_op(&dev->pm_domain->ops, state);
595 goto Driver;
596 }
597
598 if (dev->type && dev->type->pm) {
599 info = "type ";
600 callback = pm_op(dev->type->pm, state);
601 goto Driver;
602 }
603
604 if (dev->class) {
605 if (dev->class->pm) {
606 info = "class ";
607 callback = pm_op(dev->class->pm, state);
608 goto Driver;
609 } else if (dev->class->resume) {
610 info = "legacy class ";
611 callback = dev->class->resume;
612 goto End;
613 }
614 }
615
616 if (dev->bus) {
617 if (dev->bus->pm) {
618 info = "bus ";
619 callback = pm_op(dev->bus->pm, state);
620 } else if (dev->bus->resume) {
621 info = "legacy bus ";
622 callback = dev->bus->resume;
623 goto End;
624 }
625 }
626
627 Driver:
628 if (!callback && dev->driver && dev->driver->pm) {
629 info = "driver ";
630 callback = pm_op(dev->driver->pm, state);
631 }
632
633 End:
634 error = dpm_run_callback(callback, dev, state, info);
635 dev->power.is_suspended = false;
636
637 Unlock:
638 device_unlock(dev);
639
640 Complete:
641 complete_all(&dev->power.completion);
642
643 TRACE_RESUME(error);
644
645 return error;
646}
647
648static void async_resume(void *data, async_cookie_t cookie)
649{
650 struct device *dev = (struct device *)data;
651 int error;
652
653 error = device_resume(dev, pm_transition, true);
654 if (error)
655 pm_dev_err(dev, pm_transition, " async", error);
656 put_device(dev);
657}
658
659static bool is_async(struct device *dev)
660{
661 return dev->power.async_suspend && pm_async_enabled
662 && !pm_trace_is_enabled();
663}
664
665
666
667
668
669
670
671
672void dpm_resume(pm_message_t state)
673{
674 struct device *dev;
675 ktime_t starttime = ktime_get();
676
677 might_sleep();
678
679 mutex_lock(&dpm_list_mtx);
680 pm_transition = state;
681 async_error = 0;
682
683 list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
684 INIT_COMPLETION(dev->power.completion);
685 if (is_async(dev)) {
686 get_device(dev);
687 async_schedule(async_resume, dev);
688 }
689 }
690
691 while (!list_empty(&dpm_suspended_list)) {
692 dev = to_device(dpm_suspended_list.next);
693 get_device(dev);
694 if (!is_async(dev)) {
695 int error;
696
697 mutex_unlock(&dpm_list_mtx);
698
699 error = device_resume(dev, state, false);
700 if (error) {
701 suspend_stats.failed_resume++;
702 dpm_save_failed_step(SUSPEND_RESUME);
703 dpm_save_failed_dev(dev_name(dev));
704 pm_dev_err(dev, state, "", error);
705 }
706
707 mutex_lock(&dpm_list_mtx);
708 }
709 if (!list_empty(&dev->power.entry))
710 list_move_tail(&dev->power.entry, &dpm_prepared_list);
711 put_device(dev);
712 }
713 mutex_unlock(&dpm_list_mtx);
714 async_synchronize_full();
715 dpm_show_time(starttime, state, NULL);
716}
717
718
719
720
721
722
723static void device_complete(struct device *dev, pm_message_t state)
724{
725 void (*callback)(struct device *) = NULL;
726 char *info = NULL;
727
728 if (dev->power.syscore)
729 return;
730
731 device_lock(dev);
732
733 if (dev->pm_domain) {
734 info = "completing power domain ";
735 callback = dev->pm_domain->ops.complete;
736 } else if (dev->type && dev->type->pm) {
737 info = "completing type ";
738 callback = dev->type->pm->complete;
739 } else if (dev->class && dev->class->pm) {
740 info = "completing class ";
741 callback = dev->class->pm->complete;
742 } else if (dev->bus && dev->bus->pm) {
743 info = "completing bus ";
744 callback = dev->bus->pm->complete;
745 }
746
747 if (!callback && dev->driver && dev->driver->pm) {
748 info = "completing driver ";
749 callback = dev->driver->pm->complete;
750 }
751
752 if (callback) {
753 pm_dev_dbg(dev, state, info);
754 callback(dev);
755 }
756
757 device_unlock(dev);
758
759 pm_runtime_put(dev);
760}
761
762
763
764
765
766
767
768
769void dpm_complete(pm_message_t state)
770{
771 struct list_head list;
772
773 might_sleep();
774
775 INIT_LIST_HEAD(&list);
776 mutex_lock(&dpm_list_mtx);
777 while (!list_empty(&dpm_prepared_list)) {
778 struct device *dev = to_device(dpm_prepared_list.prev);
779
780 get_device(dev);
781 dev->power.is_prepared = false;
782 list_move(&dev->power.entry, &list);
783 mutex_unlock(&dpm_list_mtx);
784
785 device_complete(dev, state);
786
787 mutex_lock(&dpm_list_mtx);
788 put_device(dev);
789 }
790 list_splice(&list, &dpm_list);
791 mutex_unlock(&dpm_list_mtx);
792}
793
794
795
796
797
798
799
800
801void dpm_resume_end(pm_message_t state)
802{
803 dpm_resume(state);
804 dpm_complete(state);
805}
806EXPORT_SYMBOL_GPL(dpm_resume_end);
807
808
809
810
811
812
813
814
815
816
817
818static pm_message_t resume_event(pm_message_t sleep_state)
819{
820 switch (sleep_state.event) {
821 case PM_EVENT_SUSPEND:
822 return PMSG_RESUME;
823 case PM_EVENT_FREEZE:
824 case PM_EVENT_QUIESCE:
825 return PMSG_RECOVER;
826 case PM_EVENT_HIBERNATE:
827 return PMSG_RESTORE;
828 }
829 return PMSG_ON;
830}
831
832
833
834
835
836
837
838
839
840static int device_suspend_noirq(struct device *dev, pm_message_t state)
841{
842 pm_callback_t callback = NULL;
843 char *info = NULL;
844
845 if (dev->power.syscore)
846 return 0;
847
848 if (dev->pm_domain) {
849 info = "noirq power domain ";
850 callback = pm_noirq_op(&dev->pm_domain->ops, state);
851 } else if (dev->type && dev->type->pm) {
852 info = "noirq type ";
853 callback = pm_noirq_op(dev->type->pm, state);
854 } else if (dev->class && dev->class->pm) {
855 info = "noirq class ";
856 callback = pm_noirq_op(dev->class->pm, state);
857 } else if (dev->bus && dev->bus->pm) {
858 info = "noirq bus ";
859 callback = pm_noirq_op(dev->bus->pm, state);
860 }
861
862 if (!callback && dev->driver && dev->driver->pm) {
863 info = "noirq driver ";
864 callback = pm_noirq_op(dev->driver->pm, state);
865 }
866
867 return dpm_run_callback(callback, dev, state, info);
868}
869
870
871
872
873
874
875
876
877static int dpm_suspend_noirq(pm_message_t state)
878{
879 ktime_t starttime = ktime_get();
880 int error = 0;
881
882 cpuidle_pause();
883 suspend_device_irqs();
884 mutex_lock(&dpm_list_mtx);
885 while (!list_empty(&dpm_late_early_list)) {
886 struct device *dev = to_device(dpm_late_early_list.prev);
887
888 get_device(dev);
889 mutex_unlock(&dpm_list_mtx);
890
891 error = device_suspend_noirq(dev, state);
892
893 mutex_lock(&dpm_list_mtx);
894 if (error) {
895 pm_dev_err(dev, state, " noirq", error);
896 suspend_stats.failed_suspend_noirq++;
897 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
898 dpm_save_failed_dev(dev_name(dev));
899 put_device(dev);
900 break;
901 }
902 if (!list_empty(&dev->power.entry))
903 list_move(&dev->power.entry, &dpm_noirq_list);
904 put_device(dev);
905
906 if (pm_wakeup_pending()) {
907 error = -EBUSY;
908 break;
909 }
910 }
911 mutex_unlock(&dpm_list_mtx);
912 if (error)
913 dpm_resume_noirq(resume_event(state));
914 else
915 dpm_show_time(starttime, state, "noirq");
916 return error;
917}
918
919
920
921
922
923
924
925
926static int device_suspend_late(struct device *dev, pm_message_t state)
927{
928 pm_callback_t callback = NULL;
929 char *info = NULL;
930
931 __pm_runtime_disable(dev, false);
932
933 if (dev->power.syscore)
934 return 0;
935
936 if (dev->pm_domain) {
937 info = "late power domain ";
938 callback = pm_late_early_op(&dev->pm_domain->ops, state);
939 } else if (dev->type && dev->type->pm) {
940 info = "late type ";
941 callback = pm_late_early_op(dev->type->pm, state);
942 } else if (dev->class && dev->class->pm) {
943 info = "late class ";
944 callback = pm_late_early_op(dev->class->pm, state);
945 } else if (dev->bus && dev->bus->pm) {
946 info = "late bus ";
947 callback = pm_late_early_op(dev->bus->pm, state);
948 }
949
950 if (!callback && dev->driver && dev->driver->pm) {
951 info = "late driver ";
952 callback = pm_late_early_op(dev->driver->pm, state);
953 }
954
955 return dpm_run_callback(callback, dev, state, info);
956}
957
958
959
960
961
962static int dpm_suspend_late(pm_message_t state)
963{
964 ktime_t starttime = ktime_get();
965 int error = 0;
966
967 mutex_lock(&dpm_list_mtx);
968 while (!list_empty(&dpm_suspended_list)) {
969 struct device *dev = to_device(dpm_suspended_list.prev);
970
971 get_device(dev);
972 mutex_unlock(&dpm_list_mtx);
973
974 error = device_suspend_late(dev, state);
975
976 mutex_lock(&dpm_list_mtx);
977 if (error) {
978 pm_dev_err(dev, state, " late", error);
979 suspend_stats.failed_suspend_late++;
980 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
981 dpm_save_failed_dev(dev_name(dev));
982 put_device(dev);
983 break;
984 }
985 if (!list_empty(&dev->power.entry))
986 list_move(&dev->power.entry, &dpm_late_early_list);
987 put_device(dev);
988
989 if (pm_wakeup_pending()) {
990 error = -EBUSY;
991 break;
992 }
993 }
994 mutex_unlock(&dpm_list_mtx);
995 if (error)
996 dpm_resume_early(resume_event(state));
997 else
998 dpm_show_time(starttime, state, "late");
999
1000 return error;
1001}
1002
1003
1004
1005
1006
1007int dpm_suspend_end(pm_message_t state)
1008{
1009 int error = dpm_suspend_late(state);
1010 if (error)
1011 return error;
1012
1013 error = dpm_suspend_noirq(state);
1014 if (error) {
1015 dpm_resume_early(resume_event(state));
1016 return error;
1017 }
1018
1019 return 0;
1020}
1021EXPORT_SYMBOL_GPL(dpm_suspend_end);
1022
1023
1024
1025
1026
1027
1028
1029static int legacy_suspend(struct device *dev, pm_message_t state,
1030 int (*cb)(struct device *dev, pm_message_t state))
1031{
1032 int error;
1033 ktime_t calltime;
1034
1035 calltime = initcall_debug_start(dev);
1036
1037 error = cb(dev, state);
1038 suspend_report_result(cb, error);
1039
1040 initcall_debug_report(dev, calltime, error);
1041
1042 return error;
1043}
1044
1045
1046
1047
1048
1049
1050
1051static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1052{
1053 pm_callback_t callback = NULL;
1054 char *info = NULL;
1055 int error = 0;
1056
1057 dpm_wait_for_children(dev, async);
1058
1059 if (async_error)
1060 goto Complete;
1061
1062
1063
1064
1065
1066
1067
1068 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1069 pm_wakeup_event(dev, 0);
1070
1071 if (pm_wakeup_pending()) {
1072 async_error = -EBUSY;
1073 goto Complete;
1074 }
1075
1076 if (dev->power.syscore)
1077 goto Complete;
1078
1079 device_lock(dev);
1080
1081 if (dev->pm_domain) {
1082 info = "power domain ";
1083 callback = pm_op(&dev->pm_domain->ops, state);
1084 goto Run;
1085 }
1086
1087 if (dev->type && dev->type->pm) {
1088 info = "type ";
1089 callback = pm_op(dev->type->pm, state);
1090 goto Run;
1091 }
1092
1093 if (dev->class) {
1094 if (dev->class->pm) {
1095 info = "class ";
1096 callback = pm_op(dev->class->pm, state);
1097 goto Run;
1098 } else if (dev->class->suspend) {
1099 pm_dev_dbg(dev, state, "legacy class ");
1100 error = legacy_suspend(dev, state, dev->class->suspend);
1101 goto End;
1102 }
1103 }
1104
1105 if (dev->bus) {
1106 if (dev->bus->pm) {
1107 info = "bus ";
1108 callback = pm_op(dev->bus->pm, state);
1109 } else if (dev->bus->suspend) {
1110 pm_dev_dbg(dev, state, "legacy bus ");
1111 error = legacy_suspend(dev, state, dev->bus->suspend);
1112 goto End;
1113 }
1114 }
1115
1116 Run:
1117 if (!callback && dev->driver && dev->driver->pm) {
1118 info = "driver ";
1119 callback = pm_op(dev->driver->pm, state);
1120 }
1121
1122 error = dpm_run_callback(callback, dev, state, info);
1123
1124 End:
1125 if (!error) {
1126 dev->power.is_suspended = true;
1127 if (dev->power.wakeup_path
1128 && dev->parent && !dev->parent->power.ignore_children)
1129 dev->parent->power.wakeup_path = true;
1130 }
1131
1132 device_unlock(dev);
1133
1134 Complete:
1135 complete_all(&dev->power.completion);
1136 if (error)
1137 async_error = error;
1138
1139 return error;
1140}
1141
1142static void async_suspend(void *data, async_cookie_t cookie)
1143{
1144 struct device *dev = (struct device *)data;
1145 int error;
1146
1147 error = __device_suspend(dev, pm_transition, true);
1148 if (error) {
1149 dpm_save_failed_dev(dev_name(dev));
1150 pm_dev_err(dev, pm_transition, " async", error);
1151 }
1152
1153 put_device(dev);
1154}
1155
1156static int device_suspend(struct device *dev)
1157{
1158 INIT_COMPLETION(dev->power.completion);
1159
1160 if (pm_async_enabled && dev->power.async_suspend) {
1161 get_device(dev);
1162 async_schedule(async_suspend, dev);
1163 return 0;
1164 }
1165
1166 return __device_suspend(dev, pm_transition, false);
1167}
1168
1169
1170
1171
1172
1173int dpm_suspend(pm_message_t state)
1174{
1175 ktime_t starttime = ktime_get();
1176 int error = 0;
1177
1178 might_sleep();
1179
1180 mutex_lock(&dpm_list_mtx);
1181 pm_transition = state;
1182 async_error = 0;
1183 while (!list_empty(&dpm_prepared_list)) {
1184 struct device *dev = to_device(dpm_prepared_list.prev);
1185
1186 get_device(dev);
1187 mutex_unlock(&dpm_list_mtx);
1188
1189 error = device_suspend(dev);
1190
1191 mutex_lock(&dpm_list_mtx);
1192 if (error) {
1193 pm_dev_err(dev, state, "", error);
1194 dpm_save_failed_dev(dev_name(dev));
1195 put_device(dev);
1196 break;
1197 }
1198 if (!list_empty(&dev->power.entry))
1199 list_move(&dev->power.entry, &dpm_suspended_list);
1200 put_device(dev);
1201 if (async_error)
1202 break;
1203 }
1204 mutex_unlock(&dpm_list_mtx);
1205 async_synchronize_full();
1206 if (!error)
1207 error = async_error;
1208 if (error) {
1209 suspend_stats.failed_suspend++;
1210 dpm_save_failed_step(SUSPEND_SUSPEND);
1211 } else
1212 dpm_show_time(starttime, state, NULL);
1213 return error;
1214}
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224static int device_prepare(struct device *dev, pm_message_t state)
1225{
1226 int (*callback)(struct device *) = NULL;
1227 char *info = NULL;
1228 int error = 0;
1229
1230 if (dev->power.syscore)
1231 return 0;
1232
1233
1234
1235
1236
1237
1238
1239 pm_runtime_get_noresume(dev);
1240
1241 device_lock(dev);
1242
1243 dev->power.wakeup_path = device_may_wakeup(dev);
1244
1245 if (dev->pm_domain) {
1246 info = "preparing power domain ";
1247 callback = dev->pm_domain->ops.prepare;
1248 } else if (dev->type && dev->type->pm) {
1249 info = "preparing type ";
1250 callback = dev->type->pm->prepare;
1251 } else if (dev->class && dev->class->pm) {
1252 info = "preparing class ";
1253 callback = dev->class->pm->prepare;
1254 } else if (dev->bus && dev->bus->pm) {
1255 info = "preparing bus ";
1256 callback = dev->bus->pm->prepare;
1257 }
1258
1259 if (!callback && dev->driver && dev->driver->pm) {
1260 info = "preparing driver ";
1261 callback = dev->driver->pm->prepare;
1262 }
1263
1264 if (callback) {
1265 error = callback(dev);
1266 suspend_report_result(callback, error);
1267 }
1268
1269 device_unlock(dev);
1270
1271 return error;
1272}
1273
1274
1275
1276
1277
1278
1279
1280int dpm_prepare(pm_message_t state)
1281{
1282 int error = 0;
1283
1284 might_sleep();
1285
1286 mutex_lock(&dpm_list_mtx);
1287 while (!list_empty(&dpm_list)) {
1288 struct device *dev = to_device(dpm_list.next);
1289
1290 get_device(dev);
1291 mutex_unlock(&dpm_list_mtx);
1292
1293 error = device_prepare(dev, state);
1294
1295 mutex_lock(&dpm_list_mtx);
1296 if (error) {
1297 if (error == -EAGAIN) {
1298 put_device(dev);
1299 error = 0;
1300 continue;
1301 }
1302 printk(KERN_INFO "PM: Device %s not prepared "
1303 "for power transition: code %d\n",
1304 dev_name(dev), error);
1305 put_device(dev);
1306 break;
1307 }
1308 dev->power.is_prepared = true;
1309 if (!list_empty(&dev->power.entry))
1310 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1311 put_device(dev);
1312 }
1313 mutex_unlock(&dpm_list_mtx);
1314 return error;
1315}
1316
1317
1318
1319
1320
1321
1322
1323
1324int dpm_suspend_start(pm_message_t state)
1325{
1326 int error;
1327
1328 error = dpm_prepare(state);
1329 if (error) {
1330 suspend_stats.failed_prepare++;
1331 dpm_save_failed_step(SUSPEND_PREPARE);
1332 } else
1333 error = dpm_suspend(state);
1334 return error;
1335}
1336EXPORT_SYMBOL_GPL(dpm_suspend_start);
1337
1338void __suspend_report_result(const char *function, void *fn, int ret)
1339{
1340 if (ret)
1341 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1342}
1343EXPORT_SYMBOL_GPL(__suspend_report_result);
1344
1345
1346
1347
1348
1349
1350int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1351{
1352 dpm_wait(dev, subordinate->power.async_suspend);
1353 return async_error;
1354}
1355EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1366{
1367 struct device *dev;
1368
1369 if (!fn)
1370 return;
1371
1372 device_pm_lock();
1373 list_for_each_entry(dev, &dpm_list, power.entry)
1374 fn(dev, data);
1375 device_pm_unlock();
1376}
1377EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1378