1
2
3
4
5
6
7
8
9#include <linux/sched.h>
10#include <linux/pm_runtime.h>
11#include <linux/jiffies.h>
12
13static int __pm_runtime_resume(struct device *dev, bool from_wq);
14static int __pm_request_idle(struct device *dev);
15static int __pm_request_resume(struct device *dev);
16
17
18
19
20
21static void pm_runtime_deactivate_timer(struct device *dev)
22{
23 if (dev->power.timer_expires > 0) {
24 del_timer(&dev->power.suspend_timer);
25 dev->power.timer_expires = 0;
26 }
27}
28
29
30
31
32
33static void pm_runtime_cancel_pending(struct device *dev)
34{
35 pm_runtime_deactivate_timer(dev);
36
37
38
39
40 dev->power.request = RPM_REQ_NONE;
41}
42
43
44
45
46
47
48
49static int __pm_runtime_idle(struct device *dev)
50 __releases(&dev->power.lock) __acquires(&dev->power.lock)
51{
52 int retval = 0;
53
54 if (dev->power.runtime_error)
55 retval = -EINVAL;
56 else if (dev->power.idle_notification)
57 retval = -EINPROGRESS;
58 else if (atomic_read(&dev->power.usage_count) > 0
59 || dev->power.disable_depth > 0
60 || dev->power.runtime_status != RPM_ACTIVE)
61 retval = -EAGAIN;
62 else if (!pm_children_suspended(dev))
63 retval = -EBUSY;
64 if (retval)
65 goto out;
66
67 if (dev->power.request_pending) {
68
69
70
71
72 if (dev->power.request == RPM_REQ_IDLE) {
73 dev->power.request = RPM_REQ_NONE;
74 } else if (dev->power.request != RPM_REQ_NONE) {
75 retval = -EAGAIN;
76 goto out;
77 }
78 }
79
80 dev->power.idle_notification = true;
81
82 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_idle) {
83 spin_unlock_irq(&dev->power.lock);
84
85 dev->bus->pm->runtime_idle(dev);
86
87 spin_lock_irq(&dev->power.lock);
88 }
89
90 dev->power.idle_notification = false;
91 wake_up_all(&dev->power.wait_queue);
92
93 out:
94 return retval;
95}
96
97
98
99
100
101int pm_runtime_idle(struct device *dev)
102{
103 int retval;
104
105 spin_lock_irq(&dev->power.lock);
106 retval = __pm_runtime_idle(dev);
107 spin_unlock_irq(&dev->power.lock);
108
109 return retval;
110}
111EXPORT_SYMBOL_GPL(pm_runtime_idle);
112
113
114
115
116
117
118
119
120
121
122
123
124
125int __pm_runtime_suspend(struct device *dev, bool from_wq)
126 __releases(&dev->power.lock) __acquires(&dev->power.lock)
127{
128 struct device *parent = NULL;
129 bool notify = false;
130 int retval = 0;
131
132 dev_dbg(dev, "__pm_runtime_suspend()%s!\n",
133 from_wq ? " from workqueue" : "");
134
135 repeat:
136 if (dev->power.runtime_error) {
137 retval = -EINVAL;
138 goto out;
139 }
140
141
142 if (dev->power.request_pending
143 && dev->power.request == RPM_REQ_RESUME) {
144 retval = -EAGAIN;
145 goto out;
146 }
147
148
149 pm_runtime_cancel_pending(dev);
150
151 if (dev->power.runtime_status == RPM_SUSPENDED)
152 retval = 1;
153 else if (dev->power.runtime_status == RPM_RESUMING
154 || dev->power.disable_depth > 0
155 || atomic_read(&dev->power.usage_count) > 0)
156 retval = -EAGAIN;
157 else if (!pm_children_suspended(dev))
158 retval = -EBUSY;
159 if (retval)
160 goto out;
161
162 if (dev->power.runtime_status == RPM_SUSPENDING) {
163 DEFINE_WAIT(wait);
164
165 if (from_wq) {
166 retval = -EINPROGRESS;
167 goto out;
168 }
169
170
171 for (;;) {
172 prepare_to_wait(&dev->power.wait_queue, &wait,
173 TASK_UNINTERRUPTIBLE);
174 if (dev->power.runtime_status != RPM_SUSPENDING)
175 break;
176
177 spin_unlock_irq(&dev->power.lock);
178
179 schedule();
180
181 spin_lock_irq(&dev->power.lock);
182 }
183 finish_wait(&dev->power.wait_queue, &wait);
184 goto repeat;
185 }
186
187 dev->power.runtime_status = RPM_SUSPENDING;
188
189 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) {
190 spin_unlock_irq(&dev->power.lock);
191
192 retval = dev->bus->pm->runtime_suspend(dev);
193
194 spin_lock_irq(&dev->power.lock);
195 dev->power.runtime_error = retval;
196 } else {
197 retval = -ENOSYS;
198 }
199
200 if (retval) {
201 dev->power.runtime_status = RPM_ACTIVE;
202 pm_runtime_cancel_pending(dev);
203 dev->power.deferred_resume = false;
204
205 if (retval == -EAGAIN || retval == -EBUSY) {
206 notify = true;
207 dev->power.runtime_error = 0;
208 }
209 } else {
210 dev->power.runtime_status = RPM_SUSPENDED;
211
212 if (dev->parent) {
213 parent = dev->parent;
214 atomic_add_unless(&parent->power.child_count, -1, 0);
215 }
216 }
217 wake_up_all(&dev->power.wait_queue);
218
219 if (dev->power.deferred_resume) {
220 dev->power.deferred_resume = false;
221 __pm_runtime_resume(dev, false);
222 retval = -EAGAIN;
223 goto out;
224 }
225
226 if (notify)
227 __pm_runtime_idle(dev);
228
229 if (parent && !parent->power.ignore_children) {
230 spin_unlock_irq(&dev->power.lock);
231
232 pm_request_idle(parent);
233
234 spin_lock_irq(&dev->power.lock);
235 }
236
237 out:
238 dev_dbg(dev, "__pm_runtime_suspend() returns %d!\n", retval);
239
240 return retval;
241}
242
243
244
245
246
247int pm_runtime_suspend(struct device *dev)
248{
249 int retval;
250
251 spin_lock_irq(&dev->power.lock);
252 retval = __pm_runtime_suspend(dev, false);
253 spin_unlock_irq(&dev->power.lock);
254
255 return retval;
256}
257EXPORT_SYMBOL_GPL(pm_runtime_suspend);
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272int __pm_runtime_resume(struct device *dev, bool from_wq)
273 __releases(&dev->power.lock) __acquires(&dev->power.lock)
274{
275 struct device *parent = NULL;
276 int retval = 0;
277
278 dev_dbg(dev, "__pm_runtime_resume()%s!\n",
279 from_wq ? " from workqueue" : "");
280
281 repeat:
282 if (dev->power.runtime_error) {
283 retval = -EINVAL;
284 goto out;
285 }
286
287 pm_runtime_cancel_pending(dev);
288
289 if (dev->power.runtime_status == RPM_ACTIVE)
290 retval = 1;
291 else if (dev->power.disable_depth > 0)
292 retval = -EAGAIN;
293 if (retval)
294 goto out;
295
296 if (dev->power.runtime_status == RPM_RESUMING
297 || dev->power.runtime_status == RPM_SUSPENDING) {
298 DEFINE_WAIT(wait);
299
300 if (from_wq) {
301 if (dev->power.runtime_status == RPM_SUSPENDING)
302 dev->power.deferred_resume = true;
303 retval = -EINPROGRESS;
304 goto out;
305 }
306
307
308 for (;;) {
309 prepare_to_wait(&dev->power.wait_queue, &wait,
310 TASK_UNINTERRUPTIBLE);
311 if (dev->power.runtime_status != RPM_RESUMING
312 && dev->power.runtime_status != RPM_SUSPENDING)
313 break;
314
315 spin_unlock_irq(&dev->power.lock);
316
317 schedule();
318
319 spin_lock_irq(&dev->power.lock);
320 }
321 finish_wait(&dev->power.wait_queue, &wait);
322 goto repeat;
323 }
324
325 if (!parent && dev->parent) {
326
327
328
329
330 parent = dev->parent;
331 spin_unlock(&dev->power.lock);
332
333 pm_runtime_get_noresume(parent);
334
335 spin_lock(&parent->power.lock);
336
337
338
339
340 if (!parent->power.disable_depth
341 && !parent->power.ignore_children) {
342 __pm_runtime_resume(parent, false);
343 if (parent->power.runtime_status != RPM_ACTIVE)
344 retval = -EBUSY;
345 }
346 spin_unlock(&parent->power.lock);
347
348 spin_lock(&dev->power.lock);
349 if (retval)
350 goto out;
351 goto repeat;
352 }
353
354 dev->power.runtime_status = RPM_RESUMING;
355
356 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) {
357 spin_unlock_irq(&dev->power.lock);
358
359 retval = dev->bus->pm->runtime_resume(dev);
360
361 spin_lock_irq(&dev->power.lock);
362 dev->power.runtime_error = retval;
363 } else {
364 retval = -ENOSYS;
365 }
366
367 if (retval) {
368 dev->power.runtime_status = RPM_SUSPENDED;
369 pm_runtime_cancel_pending(dev);
370 } else {
371 dev->power.runtime_status = RPM_ACTIVE;
372 if (parent)
373 atomic_inc(&parent->power.child_count);
374 }
375 wake_up_all(&dev->power.wait_queue);
376
377 if (!retval)
378 __pm_request_idle(dev);
379
380 out:
381 if (parent) {
382 spin_unlock_irq(&dev->power.lock);
383
384 pm_runtime_put(parent);
385
386 spin_lock_irq(&dev->power.lock);
387 }
388
389 dev_dbg(dev, "__pm_runtime_resume() returns %d!\n", retval);
390
391 return retval;
392}
393
394
395
396
397
398int pm_runtime_resume(struct device *dev)
399{
400 int retval;
401
402 spin_lock_irq(&dev->power.lock);
403 retval = __pm_runtime_resume(dev, false);
404 spin_unlock_irq(&dev->power.lock);
405
406 return retval;
407}
408EXPORT_SYMBOL_GPL(pm_runtime_resume);
409
410
411
412
413
414
415
416
417static void pm_runtime_work(struct work_struct *work)
418{
419 struct device *dev = container_of(work, struct device, power.work);
420 enum rpm_request req;
421
422 spin_lock_irq(&dev->power.lock);
423
424 if (!dev->power.request_pending)
425 goto out;
426
427 req = dev->power.request;
428 dev->power.request = RPM_REQ_NONE;
429 dev->power.request_pending = false;
430
431 switch (req) {
432 case RPM_REQ_NONE:
433 break;
434 case RPM_REQ_IDLE:
435 __pm_runtime_idle(dev);
436 break;
437 case RPM_REQ_SUSPEND:
438 __pm_runtime_suspend(dev, true);
439 break;
440 case RPM_REQ_RESUME:
441 __pm_runtime_resume(dev, true);
442 break;
443 }
444
445 out:
446 spin_unlock_irq(&dev->power.lock);
447}
448
449
450
451
452
453
454
455
456
457
458static int __pm_request_idle(struct device *dev)
459{
460 int retval = 0;
461
462 if (dev->power.runtime_error)
463 retval = -EINVAL;
464 else if (atomic_read(&dev->power.usage_count) > 0
465 || dev->power.disable_depth > 0
466 || dev->power.runtime_status == RPM_SUSPENDED
467 || dev->power.runtime_status == RPM_SUSPENDING)
468 retval = -EAGAIN;
469 else if (!pm_children_suspended(dev))
470 retval = -EBUSY;
471 if (retval)
472 return retval;
473
474 if (dev->power.request_pending) {
475
476 if (dev->power.request == RPM_REQ_NONE)
477 dev->power.request = RPM_REQ_IDLE;
478 else if (dev->power.request != RPM_REQ_IDLE)
479 retval = -EAGAIN;
480 return retval;
481 }
482
483 dev->power.request = RPM_REQ_IDLE;
484 dev->power.request_pending = true;
485 queue_work(pm_wq, &dev->power.work);
486
487 return retval;
488}
489
490
491
492
493
494int pm_request_idle(struct device *dev)
495{
496 unsigned long flags;
497 int retval;
498
499 spin_lock_irqsave(&dev->power.lock, flags);
500 retval = __pm_request_idle(dev);
501 spin_unlock_irqrestore(&dev->power.lock, flags);
502
503 return retval;
504}
505EXPORT_SYMBOL_GPL(pm_request_idle);
506
507
508
509
510
511
512
513static int __pm_request_suspend(struct device *dev)
514{
515 int retval = 0;
516
517 if (dev->power.runtime_error)
518 return -EINVAL;
519
520 if (dev->power.runtime_status == RPM_SUSPENDED)
521 retval = 1;
522 else if (atomic_read(&dev->power.usage_count) > 0
523 || dev->power.disable_depth > 0)
524 retval = -EAGAIN;
525 else if (dev->power.runtime_status == RPM_SUSPENDING)
526 retval = -EINPROGRESS;
527 else if (!pm_children_suspended(dev))
528 retval = -EBUSY;
529 if (retval < 0)
530 return retval;
531
532 pm_runtime_deactivate_timer(dev);
533
534 if (dev->power.request_pending) {
535
536
537
538
539 if (dev->power.request == RPM_REQ_RESUME)
540 retval = -EAGAIN;
541 else if (dev->power.request != RPM_REQ_SUSPEND)
542 dev->power.request = retval ?
543 RPM_REQ_NONE : RPM_REQ_SUSPEND;
544 return retval;
545 } else if (retval) {
546 return retval;
547 }
548
549 dev->power.request = RPM_REQ_SUSPEND;
550 dev->power.request_pending = true;
551 queue_work(pm_wq, &dev->power.work);
552
553 return 0;
554}
555
556
557
558
559
560
561
562static void pm_suspend_timer_fn(unsigned long data)
563{
564 struct device *dev = (struct device *)data;
565 unsigned long flags;
566 unsigned long expires;
567
568 spin_lock_irqsave(&dev->power.lock, flags);
569
570 expires = dev->power.timer_expires;
571
572 if (expires > 0 && !time_after(expires, jiffies)) {
573 dev->power.timer_expires = 0;
574 __pm_request_suspend(dev);
575 }
576
577 spin_unlock_irqrestore(&dev->power.lock, flags);
578}
579
580
581
582
583
584
585int pm_schedule_suspend(struct device *dev, unsigned int delay)
586{
587 unsigned long flags;
588 int retval = 0;
589
590 spin_lock_irqsave(&dev->power.lock, flags);
591
592 if (dev->power.runtime_error) {
593 retval = -EINVAL;
594 goto out;
595 }
596
597 if (!delay) {
598 retval = __pm_request_suspend(dev);
599 goto out;
600 }
601
602 pm_runtime_deactivate_timer(dev);
603
604 if (dev->power.request_pending) {
605
606
607
608
609 if (dev->power.request == RPM_REQ_RESUME) {
610 retval = -EAGAIN;
611 goto out;
612 }
613 dev->power.request = RPM_REQ_NONE;
614 }
615
616 if (dev->power.runtime_status == RPM_SUSPENDED)
617 retval = 1;
618 else if (dev->power.runtime_status == RPM_SUSPENDING)
619 retval = -EINPROGRESS;
620 else if (atomic_read(&dev->power.usage_count) > 0
621 || dev->power.disable_depth > 0)
622 retval = -EAGAIN;
623 else if (!pm_children_suspended(dev))
624 retval = -EBUSY;
625 if (retval)
626 goto out;
627
628 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
629 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
630
631 out:
632 spin_unlock_irqrestore(&dev->power.lock, flags);
633
634 return retval;
635}
636EXPORT_SYMBOL_GPL(pm_schedule_suspend);
637
638
639
640
641
642
643
644static int __pm_request_resume(struct device *dev)
645{
646 int retval = 0;
647
648 if (dev->power.runtime_error)
649 return -EINVAL;
650
651 if (dev->power.runtime_status == RPM_ACTIVE)
652 retval = 1;
653 else if (dev->power.runtime_status == RPM_RESUMING)
654 retval = -EINPROGRESS;
655 else if (dev->power.disable_depth > 0)
656 retval = -EAGAIN;
657 if (retval < 0)
658 return retval;
659
660 pm_runtime_deactivate_timer(dev);
661
662 if (dev->power.request_pending) {
663
664 dev->power.request = retval ? RPM_REQ_NONE : RPM_REQ_RESUME;
665 return retval;
666 } else if (retval) {
667 return retval;
668 }
669
670 dev->power.request = RPM_REQ_RESUME;
671 dev->power.request_pending = true;
672 queue_work(pm_wq, &dev->power.work);
673
674 return retval;
675}
676
677
678
679
680
681int pm_request_resume(struct device *dev)
682{
683 unsigned long flags;
684 int retval;
685
686 spin_lock_irqsave(&dev->power.lock, flags);
687 retval = __pm_request_resume(dev);
688 spin_unlock_irqrestore(&dev->power.lock, flags);
689
690 return retval;
691}
692EXPORT_SYMBOL_GPL(pm_request_resume);
693
694
695
696
697
698
699
700
701
702int __pm_runtime_get(struct device *dev, bool sync)
703{
704 int retval = 1;
705
706 if (atomic_add_return(1, &dev->power.usage_count) == 1)
707 retval = sync ? pm_runtime_resume(dev) : pm_request_resume(dev);
708
709 return retval;
710}
711EXPORT_SYMBOL_GPL(__pm_runtime_get);
712
713
714
715
716
717
718
719
720
721
722int __pm_runtime_put(struct device *dev, bool sync)
723{
724 int retval = 0;
725
726 if (atomic_dec_and_test(&dev->power.usage_count))
727 retval = sync ? pm_runtime_idle(dev) : pm_request_idle(dev);
728
729 return retval;
730}
731EXPORT_SYMBOL_GPL(__pm_runtime_put);
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750int __pm_runtime_set_status(struct device *dev, unsigned int status)
751{
752 struct device *parent = dev->parent;
753 unsigned long flags;
754 bool notify_parent = false;
755 int error = 0;
756
757 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
758 return -EINVAL;
759
760 spin_lock_irqsave(&dev->power.lock, flags);
761
762 if (!dev->power.runtime_error && !dev->power.disable_depth) {
763 error = -EAGAIN;
764 goto out;
765 }
766
767 if (dev->power.runtime_status == status)
768 goto out_set;
769
770 if (status == RPM_SUSPENDED) {
771
772 if (parent) {
773 atomic_add_unless(&parent->power.child_count, -1, 0);
774 notify_parent = !parent->power.ignore_children;
775 }
776 goto out_set;
777 }
778
779 if (parent) {
780 spin_lock(&parent->power.lock);
781
782
783
784
785
786
787 if (!parent->power.disable_depth
788 && !parent->power.ignore_children
789 && parent->power.runtime_status != RPM_ACTIVE) {
790 error = -EBUSY;
791 } else {
792 if (dev->power.runtime_status == RPM_SUSPENDED)
793 atomic_inc(&parent->power.child_count);
794 }
795
796 spin_unlock(&parent->power.lock);
797
798 if (error)
799 goto out;
800 }
801
802 out_set:
803 dev->power.runtime_status = status;
804 dev->power.runtime_error = 0;
805 out:
806 spin_unlock_irqrestore(&dev->power.lock, flags);
807
808 if (notify_parent)
809 pm_request_idle(parent);
810
811 return error;
812}
813EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
814
815
816
817
818
819
820
821
822
823
824static void __pm_runtime_barrier(struct device *dev)
825{
826 pm_runtime_deactivate_timer(dev);
827
828 if (dev->power.request_pending) {
829 dev->power.request = RPM_REQ_NONE;
830 spin_unlock_irq(&dev->power.lock);
831
832 cancel_work_sync(&dev->power.work);
833
834 spin_lock_irq(&dev->power.lock);
835 dev->power.request_pending = false;
836 }
837
838 if (dev->power.runtime_status == RPM_SUSPENDING
839 || dev->power.runtime_status == RPM_RESUMING
840 || dev->power.idle_notification) {
841 DEFINE_WAIT(wait);
842
843
844 for (;;) {
845 prepare_to_wait(&dev->power.wait_queue, &wait,
846 TASK_UNINTERRUPTIBLE);
847 if (dev->power.runtime_status != RPM_SUSPENDING
848 && dev->power.runtime_status != RPM_RESUMING
849 && !dev->power.idle_notification)
850 break;
851 spin_unlock_irq(&dev->power.lock);
852
853 schedule();
854
855 spin_lock_irq(&dev->power.lock);
856 }
857 finish_wait(&dev->power.wait_queue, &wait);
858 }
859}
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875int pm_runtime_barrier(struct device *dev)
876{
877 int retval = 0;
878
879 pm_runtime_get_noresume(dev);
880 spin_lock_irq(&dev->power.lock);
881
882 if (dev->power.request_pending
883 && dev->power.request == RPM_REQ_RESUME) {
884 __pm_runtime_resume(dev, false);
885 retval = 1;
886 }
887
888 __pm_runtime_barrier(dev);
889
890 spin_unlock_irq(&dev->power.lock);
891 pm_runtime_put_noidle(dev);
892
893 return retval;
894}
895EXPORT_SYMBOL_GPL(pm_runtime_barrier);
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911void __pm_runtime_disable(struct device *dev, bool check_resume)
912{
913 spin_lock_irq(&dev->power.lock);
914
915 if (dev->power.disable_depth > 0) {
916 dev->power.disable_depth++;
917 goto out;
918 }
919
920
921
922
923
924
925 if (check_resume && dev->power.request_pending
926 && dev->power.request == RPM_REQ_RESUME) {
927
928
929
930
931 pm_runtime_get_noresume(dev);
932
933 __pm_runtime_resume(dev, false);
934
935 pm_runtime_put_noidle(dev);
936 }
937
938 if (!dev->power.disable_depth++)
939 __pm_runtime_barrier(dev);
940
941 out:
942 spin_unlock_irq(&dev->power.lock);
943}
944EXPORT_SYMBOL_GPL(__pm_runtime_disable);
945
946
947
948
949
950void pm_runtime_enable(struct device *dev)
951{
952 unsigned long flags;
953
954 spin_lock_irqsave(&dev->power.lock, flags);
955
956 if (dev->power.disable_depth > 0)
957 dev->power.disable_depth--;
958 else
959 dev_warn(dev, "Unbalanced %s!\n", __func__);
960
961 spin_unlock_irqrestore(&dev->power.lock, flags);
962}
963EXPORT_SYMBOL_GPL(pm_runtime_enable);
964
965
966
967
968
969void pm_runtime_init(struct device *dev)
970{
971 spin_lock_init(&dev->power.lock);
972
973 dev->power.runtime_status = RPM_SUSPENDED;
974 dev->power.idle_notification = false;
975
976 dev->power.disable_depth = 1;
977 atomic_set(&dev->power.usage_count, 0);
978
979 dev->power.runtime_error = 0;
980
981 atomic_set(&dev->power.child_count, 0);
982 pm_suspend_ignore_children(dev, false);
983
984 dev->power.request_pending = false;
985 dev->power.request = RPM_REQ_NONE;
986 dev->power.deferred_resume = false;
987 INIT_WORK(&dev->power.work, pm_runtime_work);
988
989 dev->power.timer_expires = 0;
990 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
991 (unsigned long)dev);
992
993 init_waitqueue_head(&dev->power.wait_queue);
994}
995
996
997
998
999
1000void pm_runtime_remove(struct device *dev)
1001{
1002 __pm_runtime_disable(dev, false);
1003
1004
1005 if (dev->power.runtime_status == RPM_ACTIVE)
1006 pm_runtime_set_suspended(dev);
1007}
1008