1
2
3
4
5
6
7
8
9
10#include <linux/sched.h>
11#include <linux/export.h>
12#include <linux/pm_runtime.h>
13#include <linux/pm_wakeirq.h>
14#include <trace/events/rpm.h>
15#include "power.h"
16
17typedef int (*pm_callback_t)(struct device *);
18
19static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
20{
21 pm_callback_t cb;
22 const struct dev_pm_ops *ops;
23
24 if (dev->pm_domain)
25 ops = &dev->pm_domain->ops;
26 else if (dev->type && dev->type->pm)
27 ops = dev->type->pm;
28 else if (dev->class && dev->class->pm)
29 ops = dev->class->pm;
30 else if (dev->bus && dev->bus->pm)
31 ops = dev->bus->pm;
32 else
33 ops = NULL;
34
35 if (ops)
36 cb = *(pm_callback_t *)((void *)ops + cb_offset);
37 else
38 cb = NULL;
39
40 if (!cb && dev->driver && dev->driver->pm)
41 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
42
43 return cb;
44}
45
46#define RPM_GET_CALLBACK(dev, callback) \
47 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
48
49static int rpm_resume(struct device *dev, int rpmflags);
50static int rpm_suspend(struct device *dev, int rpmflags);
51
52
53
54
55
56
57
58
59
60
61
62
63void update_pm_runtime_accounting(struct device *dev)
64{
65 unsigned long now = jiffies;
66 unsigned long delta;
67
68 delta = now - dev->power.accounting_timestamp;
69
70 dev->power.accounting_timestamp = now;
71
72 if (dev->power.disable_depth > 0)
73 return;
74
75 if (dev->power.runtime_status == RPM_SUSPENDED)
76 dev->power.suspended_jiffies += delta;
77 else
78 dev->power.active_jiffies += delta;
79}
80
81static void __update_runtime_status(struct device *dev, enum rpm_status status)
82{
83 update_pm_runtime_accounting(dev);
84 dev->power.runtime_status = status;
85}
86
87
88
89
90
91static void pm_runtime_deactivate_timer(struct device *dev)
92{
93 if (dev->power.timer_expires > 0) {
94 del_timer(&dev->power.suspend_timer);
95 dev->power.timer_expires = 0;
96 }
97}
98
99
100
101
102
103static void pm_runtime_cancel_pending(struct device *dev)
104{
105 pm_runtime_deactivate_timer(dev);
106
107
108
109
110 dev->power.request = RPM_REQ_NONE;
111}
112
113
114
115
116
117
118
119
120
121
122
123
124
125unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
126{
127 int autosuspend_delay;
128 long elapsed;
129 unsigned long last_busy;
130 unsigned long expires = 0;
131
132 if (!dev->power.use_autosuspend)
133 goto out;
134
135 autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
136 if (autosuspend_delay < 0)
137 goto out;
138
139 last_busy = ACCESS_ONCE(dev->power.last_busy);
140 elapsed = jiffies - last_busy;
141 if (elapsed < 0)
142 goto out;
143
144
145
146
147
148 expires = last_busy + msecs_to_jiffies(autosuspend_delay);
149 if (autosuspend_delay >= 1000)
150 expires = round_jiffies(expires);
151 expires += !expires;
152 if (elapsed >= expires - last_busy)
153 expires = 0;
154
155 out:
156 return expires;
157}
158EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
159
160static int dev_memalloc_noio(struct device *dev, void *data)
161{
162 return dev->power.memalloc_noio;
163}
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
194{
195 static DEFINE_MUTEX(dev_hotplug_mutex);
196
197 mutex_lock(&dev_hotplug_mutex);
198 for (;;) {
199 bool enabled;
200
201
202 spin_lock_irq(&dev->power.lock);
203 enabled = dev->power.memalloc_noio;
204 dev->power.memalloc_noio = enable;
205 spin_unlock_irq(&dev->power.lock);
206
207
208
209
210
211 if (enabled && enable)
212 break;
213
214 dev = dev->parent;
215
216
217
218
219
220
221 if (!dev || (!enable &&
222 device_for_each_child(dev, NULL,
223 dev_memalloc_noio)))
224 break;
225 }
226 mutex_unlock(&dev_hotplug_mutex);
227}
228EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
229
230
231
232
233
234static int rpm_check_suspend_allowed(struct device *dev)
235{
236 int retval = 0;
237
238 if (dev->power.runtime_error)
239 retval = -EINVAL;
240 else if (dev->power.disable_depth > 0)
241 retval = -EACCES;
242 else if (atomic_read(&dev->power.usage_count) > 0)
243 retval = -EAGAIN;
244 else if (!pm_children_suspended(dev))
245 retval = -EBUSY;
246
247
248 else if ((dev->power.deferred_resume
249 && dev->power.runtime_status == RPM_SUSPENDING)
250 || (dev->power.request_pending
251 && dev->power.request == RPM_REQ_RESUME))
252 retval = -EAGAIN;
253 else if (__dev_pm_qos_read_value(dev) < 0)
254 retval = -EPERM;
255 else if (dev->power.runtime_status == RPM_SUSPENDED)
256 retval = 1;
257
258 return retval;
259}
260
261
262
263
264
265
266static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
267 __releases(&dev->power.lock) __acquires(&dev->power.lock)
268{
269 int retval;
270
271 if (dev->power.irq_safe)
272 spin_unlock(&dev->power.lock);
273 else
274 spin_unlock_irq(&dev->power.lock);
275
276 retval = cb(dev);
277
278 if (dev->power.irq_safe)
279 spin_lock(&dev->power.lock);
280 else
281 spin_lock_irq(&dev->power.lock);
282
283 return retval;
284}
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299static int rpm_idle(struct device *dev, int rpmflags)
300{
301 int (*callback)(struct device *);
302 int retval;
303
304 trace_rpm_idle(dev, rpmflags);
305 retval = rpm_check_suspend_allowed(dev);
306 if (retval < 0)
307 ;
308
309
310 else if (dev->power.runtime_status != RPM_ACTIVE)
311 retval = -EAGAIN;
312
313
314
315
316
317 else if (dev->power.request_pending &&
318 dev->power.request > RPM_REQ_IDLE)
319 retval = -EAGAIN;
320
321
322 else if (dev->power.idle_notification)
323 retval = -EINPROGRESS;
324 if (retval)
325 goto out;
326
327
328 dev->power.request = RPM_REQ_NONE;
329
330 if (dev->power.no_callbacks)
331 goto out;
332
333
334 if (rpmflags & RPM_ASYNC) {
335 dev->power.request = RPM_REQ_IDLE;
336 if (!dev->power.request_pending) {
337 dev->power.request_pending = true;
338 queue_work(pm_wq, &dev->power.work);
339 }
340 trace_rpm_return_int(dev, _THIS_IP_, 0);
341 return 0;
342 }
343
344 dev->power.idle_notification = true;
345
346 callback = RPM_GET_CALLBACK(dev, runtime_idle);
347
348 if (callback)
349 retval = __rpm_callback(callback, dev);
350
351 dev->power.idle_notification = false;
352 wake_up_all(&dev->power.wait_queue);
353
354 out:
355 trace_rpm_return_int(dev, _THIS_IP_, retval);
356 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
357}
358
359
360
361
362
363
364static int rpm_callback(int (*cb)(struct device *), struct device *dev)
365{
366 int retval;
367
368 if (!cb)
369 return -ENOSYS;
370
371 if (dev->power.memalloc_noio) {
372 unsigned int noio_flag;
373
374
375
376
377
378
379
380
381
382
383 noio_flag = memalloc_noio_save();
384 retval = __rpm_callback(cb, dev);
385 memalloc_noio_restore(noio_flag);
386 } else {
387 retval = __rpm_callback(cb, dev);
388 }
389
390 dev->power.runtime_error = retval;
391 return retval != -EACCES ? retval : -EIO;
392}
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415static int rpm_suspend(struct device *dev, int rpmflags)
416 __releases(&dev->power.lock) __acquires(&dev->power.lock)
417{
418 int (*callback)(struct device *);
419 struct device *parent = NULL;
420 int retval;
421
422 trace_rpm_suspend(dev, rpmflags);
423
424 repeat:
425 retval = rpm_check_suspend_allowed(dev);
426
427 if (retval < 0)
428 ;
429
430
431 else if (dev->power.runtime_status == RPM_RESUMING &&
432 !(rpmflags & RPM_ASYNC))
433 retval = -EAGAIN;
434 if (retval)
435 goto out;
436
437
438 if ((rpmflags & RPM_AUTO)
439 && dev->power.runtime_status != RPM_SUSPENDING) {
440 unsigned long expires = pm_runtime_autosuspend_expiration(dev);
441
442 if (expires != 0) {
443
444 dev->power.request = RPM_REQ_NONE;
445
446
447
448
449
450
451
452
453 if (!(dev->power.timer_expires && time_before_eq(
454 dev->power.timer_expires, expires))) {
455 dev->power.timer_expires = expires;
456 mod_timer(&dev->power.suspend_timer, expires);
457 }
458 dev->power.timer_autosuspends = 1;
459 goto out;
460 }
461 }
462
463
464 pm_runtime_cancel_pending(dev);
465
466 if (dev->power.runtime_status == RPM_SUSPENDING) {
467 DEFINE_WAIT(wait);
468
469 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
470 retval = -EINPROGRESS;
471 goto out;
472 }
473
474 if (dev->power.irq_safe) {
475 spin_unlock(&dev->power.lock);
476
477 cpu_relax();
478
479 spin_lock(&dev->power.lock);
480 goto repeat;
481 }
482
483
484 for (;;) {
485 prepare_to_wait(&dev->power.wait_queue, &wait,
486 TASK_UNINTERRUPTIBLE);
487 if (dev->power.runtime_status != RPM_SUSPENDING)
488 break;
489
490 spin_unlock_irq(&dev->power.lock);
491
492 schedule();
493
494 spin_lock_irq(&dev->power.lock);
495 }
496 finish_wait(&dev->power.wait_queue, &wait);
497 goto repeat;
498 }
499
500 if (dev->power.no_callbacks)
501 goto no_callback;
502
503
504 if (rpmflags & RPM_ASYNC) {
505 dev->power.request = (rpmflags & RPM_AUTO) ?
506 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
507 if (!dev->power.request_pending) {
508 dev->power.request_pending = true;
509 queue_work(pm_wq, &dev->power.work);
510 }
511 goto out;
512 }
513
514 __update_runtime_status(dev, RPM_SUSPENDING);
515
516 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
517
518 dev_pm_enable_wake_irq(dev);
519 retval = rpm_callback(callback, dev);
520 if (retval)
521 goto fail;
522
523 no_callback:
524 __update_runtime_status(dev, RPM_SUSPENDED);
525 pm_runtime_deactivate_timer(dev);
526
527 if (dev->parent) {
528 parent = dev->parent;
529 atomic_add_unless(&parent->power.child_count, -1, 0);
530 }
531 wake_up_all(&dev->power.wait_queue);
532
533 if (dev->power.deferred_resume) {
534 dev->power.deferred_resume = false;
535 rpm_resume(dev, 0);
536 retval = -EAGAIN;
537 goto out;
538 }
539
540
541 if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
542 spin_unlock(&dev->power.lock);
543
544 spin_lock(&parent->power.lock);
545 rpm_idle(parent, RPM_ASYNC);
546 spin_unlock(&parent->power.lock);
547
548 spin_lock(&dev->power.lock);
549 }
550
551 out:
552 trace_rpm_return_int(dev, _THIS_IP_, retval);
553
554 return retval;
555
556 fail:
557 dev_pm_disable_wake_irq(dev);
558 __update_runtime_status(dev, RPM_ACTIVE);
559 dev->power.deferred_resume = false;
560 wake_up_all(&dev->power.wait_queue);
561
562 if (retval == -EAGAIN || retval == -EBUSY) {
563 dev->power.runtime_error = 0;
564
565
566
567
568
569
570
571 if ((rpmflags & RPM_AUTO) &&
572 pm_runtime_autosuspend_expiration(dev) != 0)
573 goto repeat;
574 } else {
575 pm_runtime_cancel_pending(dev);
576 }
577 goto out;
578}
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597static int rpm_resume(struct device *dev, int rpmflags)
598 __releases(&dev->power.lock) __acquires(&dev->power.lock)
599{
600 int (*callback)(struct device *);
601 struct device *parent = NULL;
602 int retval = 0;
603
604 trace_rpm_resume(dev, rpmflags);
605
606 repeat:
607 if (dev->power.runtime_error)
608 retval = -EINVAL;
609 else if (dev->power.disable_depth == 1 && dev->power.is_suspended
610 && dev->power.runtime_status == RPM_ACTIVE)
611 retval = 1;
612 else if (dev->power.disable_depth > 0)
613 retval = -EACCES;
614 if (retval)
615 goto out;
616
617
618
619
620
621
622
623 dev->power.request = RPM_REQ_NONE;
624 if (!dev->power.timer_autosuspends)
625 pm_runtime_deactivate_timer(dev);
626
627 if (dev->power.runtime_status == RPM_ACTIVE) {
628 retval = 1;
629 goto out;
630 }
631
632 if (dev->power.runtime_status == RPM_RESUMING
633 || dev->power.runtime_status == RPM_SUSPENDING) {
634 DEFINE_WAIT(wait);
635
636 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
637 if (dev->power.runtime_status == RPM_SUSPENDING)
638 dev->power.deferred_resume = true;
639 else
640 retval = -EINPROGRESS;
641 goto out;
642 }
643
644 if (dev->power.irq_safe) {
645 spin_unlock(&dev->power.lock);
646
647 cpu_relax();
648
649 spin_lock(&dev->power.lock);
650 goto repeat;
651 }
652
653
654 for (;;) {
655 prepare_to_wait(&dev->power.wait_queue, &wait,
656 TASK_UNINTERRUPTIBLE);
657 if (dev->power.runtime_status != RPM_RESUMING
658 && dev->power.runtime_status != RPM_SUSPENDING)
659 break;
660
661 spin_unlock_irq(&dev->power.lock);
662
663 schedule();
664
665 spin_lock_irq(&dev->power.lock);
666 }
667 finish_wait(&dev->power.wait_queue, &wait);
668 goto repeat;
669 }
670
671
672
673
674
675
676 if (dev->power.no_callbacks && !parent && dev->parent) {
677 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
678 if (dev->parent->power.disable_depth > 0
679 || dev->parent->power.ignore_children
680 || dev->parent->power.runtime_status == RPM_ACTIVE) {
681 atomic_inc(&dev->parent->power.child_count);
682 spin_unlock(&dev->parent->power.lock);
683 retval = 1;
684 goto no_callback;
685 }
686 spin_unlock(&dev->parent->power.lock);
687 }
688
689
690 if (rpmflags & RPM_ASYNC) {
691 dev->power.request = RPM_REQ_RESUME;
692 if (!dev->power.request_pending) {
693 dev->power.request_pending = true;
694 queue_work(pm_wq, &dev->power.work);
695 }
696 retval = 0;
697 goto out;
698 }
699
700 if (!parent && dev->parent) {
701
702
703
704
705
706 parent = dev->parent;
707 if (dev->power.irq_safe)
708 goto skip_parent;
709 spin_unlock(&dev->power.lock);
710
711 pm_runtime_get_noresume(parent);
712
713 spin_lock(&parent->power.lock);
714
715
716
717
718 if (!parent->power.disable_depth
719 && !parent->power.ignore_children) {
720 rpm_resume(parent, 0);
721 if (parent->power.runtime_status != RPM_ACTIVE)
722 retval = -EBUSY;
723 }
724 spin_unlock(&parent->power.lock);
725
726 spin_lock(&dev->power.lock);
727 if (retval)
728 goto out;
729 goto repeat;
730 }
731 skip_parent:
732
733 if (dev->power.no_callbacks)
734 goto no_callback;
735
736 __update_runtime_status(dev, RPM_RESUMING);
737
738 callback = RPM_GET_CALLBACK(dev, runtime_resume);
739
740 dev_pm_disable_wake_irq(dev);
741 retval = rpm_callback(callback, dev);
742 if (retval) {
743 __update_runtime_status(dev, RPM_SUSPENDED);
744 pm_runtime_cancel_pending(dev);
745 dev_pm_enable_wake_irq(dev);
746 } else {
747 no_callback:
748 __update_runtime_status(dev, RPM_ACTIVE);
749 pm_runtime_mark_last_busy(dev);
750 if (parent)
751 atomic_inc(&parent->power.child_count);
752 }
753 wake_up_all(&dev->power.wait_queue);
754
755 if (retval >= 0)
756 rpm_idle(dev, RPM_ASYNC);
757
758 out:
759 if (parent && !dev->power.irq_safe) {
760 spin_unlock_irq(&dev->power.lock);
761
762 pm_runtime_put(parent);
763
764 spin_lock_irq(&dev->power.lock);
765 }
766
767 trace_rpm_return_int(dev, _THIS_IP_, retval);
768
769 return retval;
770}
771
772
773
774
775
776
777
778
779static void pm_runtime_work(struct work_struct *work)
780{
781 struct device *dev = container_of(work, struct device, power.work);
782 enum rpm_request req;
783
784 spin_lock_irq(&dev->power.lock);
785
786 if (!dev->power.request_pending)
787 goto out;
788
789 req = dev->power.request;
790 dev->power.request = RPM_REQ_NONE;
791 dev->power.request_pending = false;
792
793 switch (req) {
794 case RPM_REQ_NONE:
795 break;
796 case RPM_REQ_IDLE:
797 rpm_idle(dev, RPM_NOWAIT);
798 break;
799 case RPM_REQ_SUSPEND:
800 rpm_suspend(dev, RPM_NOWAIT);
801 break;
802 case RPM_REQ_AUTOSUSPEND:
803 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
804 break;
805 case RPM_REQ_RESUME:
806 rpm_resume(dev, RPM_NOWAIT);
807 break;
808 }
809
810 out:
811 spin_unlock_irq(&dev->power.lock);
812}
813
814
815
816
817
818
819
820static void pm_suspend_timer_fn(unsigned long data)
821{
822 struct device *dev = (struct device *)data;
823 unsigned long flags;
824 unsigned long expires;
825
826 spin_lock_irqsave(&dev->power.lock, flags);
827
828 expires = dev->power.timer_expires;
829
830 if (expires > 0 && !time_after(expires, jiffies)) {
831 dev->power.timer_expires = 0;
832 rpm_suspend(dev, dev->power.timer_autosuspends ?
833 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
834 }
835
836 spin_unlock_irqrestore(&dev->power.lock, flags);
837}
838
839
840
841
842
843
844int pm_schedule_suspend(struct device *dev, unsigned int delay)
845{
846 unsigned long flags;
847 int retval;
848
849 spin_lock_irqsave(&dev->power.lock, flags);
850
851 if (!delay) {
852 retval = rpm_suspend(dev, RPM_ASYNC);
853 goto out;
854 }
855
856 retval = rpm_check_suspend_allowed(dev);
857 if (retval)
858 goto out;
859
860
861 pm_runtime_cancel_pending(dev);
862
863 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
864 dev->power.timer_expires += !dev->power.timer_expires;
865 dev->power.timer_autosuspends = 0;
866 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
867
868 out:
869 spin_unlock_irqrestore(&dev->power.lock, flags);
870
871 return retval;
872}
873EXPORT_SYMBOL_GPL(pm_schedule_suspend);
874
875
876
877
878
879
880
881
882
883
884
885
886
887int __pm_runtime_idle(struct device *dev, int rpmflags)
888{
889 unsigned long flags;
890 int retval;
891
892 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
893
894 if (rpmflags & RPM_GET_PUT) {
895 if (!atomic_dec_and_test(&dev->power.usage_count))
896 return 0;
897 }
898
899 spin_lock_irqsave(&dev->power.lock, flags);
900 retval = rpm_idle(dev, rpmflags);
901 spin_unlock_irqrestore(&dev->power.lock, flags);
902
903 return retval;
904}
905EXPORT_SYMBOL_GPL(__pm_runtime_idle);
906
907
908
909
910
911
912
913
914
915
916
917
918
919int __pm_runtime_suspend(struct device *dev, int rpmflags)
920{
921 unsigned long flags;
922 int retval;
923
924 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
925
926 if (rpmflags & RPM_GET_PUT) {
927 if (!atomic_dec_and_test(&dev->power.usage_count))
928 return 0;
929 }
930
931 spin_lock_irqsave(&dev->power.lock, flags);
932 retval = rpm_suspend(dev, rpmflags);
933 spin_unlock_irqrestore(&dev->power.lock, flags);
934
935 return retval;
936}
937EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
938
939
940
941
942
943
944
945
946
947
948
949
950int __pm_runtime_resume(struct device *dev, int rpmflags)
951{
952 unsigned long flags;
953 int retval;
954
955 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
956
957 if (rpmflags & RPM_GET_PUT)
958 atomic_inc(&dev->power.usage_count);
959
960 spin_lock_irqsave(&dev->power.lock, flags);
961 retval = rpm_resume(dev, rpmflags);
962 spin_unlock_irqrestore(&dev->power.lock, flags);
963
964 return retval;
965}
966EXPORT_SYMBOL_GPL(__pm_runtime_resume);
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985int __pm_runtime_set_status(struct device *dev, unsigned int status)
986{
987 struct device *parent = dev->parent;
988 unsigned long flags;
989 bool notify_parent = false;
990 int error = 0;
991
992 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
993 return -EINVAL;
994
995 spin_lock_irqsave(&dev->power.lock, flags);
996
997 if (!dev->power.runtime_error && !dev->power.disable_depth) {
998 error = -EAGAIN;
999 goto out;
1000 }
1001
1002 if (dev->power.runtime_status == status)
1003 goto out_set;
1004
1005 if (status == RPM_SUSPENDED) {
1006
1007 if (parent) {
1008 atomic_add_unless(&parent->power.child_count, -1, 0);
1009 notify_parent = !parent->power.ignore_children;
1010 }
1011 goto out_set;
1012 }
1013
1014 if (parent) {
1015 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1016
1017
1018
1019
1020
1021
1022 if (!parent->power.disable_depth
1023 && !parent->power.ignore_children
1024 && parent->power.runtime_status != RPM_ACTIVE)
1025 error = -EBUSY;
1026 else if (dev->power.runtime_status == RPM_SUSPENDED)
1027 atomic_inc(&parent->power.child_count);
1028
1029 spin_unlock(&parent->power.lock);
1030
1031 if (error)
1032 goto out;
1033 }
1034
1035 out_set:
1036 __update_runtime_status(dev, status);
1037 dev->power.runtime_error = 0;
1038 out:
1039 spin_unlock_irqrestore(&dev->power.lock, flags);
1040
1041 if (notify_parent)
1042 pm_request_idle(parent);
1043
1044 return error;
1045}
1046EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057static void __pm_runtime_barrier(struct device *dev)
1058{
1059 pm_runtime_deactivate_timer(dev);
1060
1061 if (dev->power.request_pending) {
1062 dev->power.request = RPM_REQ_NONE;
1063 spin_unlock_irq(&dev->power.lock);
1064
1065 cancel_work_sync(&dev->power.work);
1066
1067 spin_lock_irq(&dev->power.lock);
1068 dev->power.request_pending = false;
1069 }
1070
1071 if (dev->power.runtime_status == RPM_SUSPENDING
1072 || dev->power.runtime_status == RPM_RESUMING
1073 || dev->power.idle_notification) {
1074 DEFINE_WAIT(wait);
1075
1076
1077 for (;;) {
1078 prepare_to_wait(&dev->power.wait_queue, &wait,
1079 TASK_UNINTERRUPTIBLE);
1080 if (dev->power.runtime_status != RPM_SUSPENDING
1081 && dev->power.runtime_status != RPM_RESUMING
1082 && !dev->power.idle_notification)
1083 break;
1084 spin_unlock_irq(&dev->power.lock);
1085
1086 schedule();
1087
1088 spin_lock_irq(&dev->power.lock);
1089 }
1090 finish_wait(&dev->power.wait_queue, &wait);
1091 }
1092}
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108int pm_runtime_barrier(struct device *dev)
1109{
1110 int retval = 0;
1111
1112 pm_runtime_get_noresume(dev);
1113 spin_lock_irq(&dev->power.lock);
1114
1115 if (dev->power.request_pending
1116 && dev->power.request == RPM_REQ_RESUME) {
1117 rpm_resume(dev, 0);
1118 retval = 1;
1119 }
1120
1121 __pm_runtime_barrier(dev);
1122
1123 spin_unlock_irq(&dev->power.lock);
1124 pm_runtime_put_noidle(dev);
1125
1126 return retval;
1127}
1128EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144void __pm_runtime_disable(struct device *dev, bool check_resume)
1145{
1146 spin_lock_irq(&dev->power.lock);
1147
1148 if (dev->power.disable_depth > 0) {
1149 dev->power.disable_depth++;
1150 goto out;
1151 }
1152
1153
1154
1155
1156
1157
1158 if (check_resume && dev->power.request_pending
1159 && dev->power.request == RPM_REQ_RESUME) {
1160
1161
1162
1163
1164 pm_runtime_get_noresume(dev);
1165
1166 rpm_resume(dev, 0);
1167
1168 pm_runtime_put_noidle(dev);
1169 }
1170
1171 if (!dev->power.disable_depth++)
1172 __pm_runtime_barrier(dev);
1173
1174 out:
1175 spin_unlock_irq(&dev->power.lock);
1176}
1177EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1178
1179
1180
1181
1182
1183void pm_runtime_enable(struct device *dev)
1184{
1185 unsigned long flags;
1186
1187 spin_lock_irqsave(&dev->power.lock, flags);
1188
1189 if (dev->power.disable_depth > 0)
1190 dev->power.disable_depth--;
1191 else
1192 dev_warn(dev, "Unbalanced %s!\n", __func__);
1193
1194 spin_unlock_irqrestore(&dev->power.lock, flags);
1195}
1196EXPORT_SYMBOL_GPL(pm_runtime_enable);
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206void pm_runtime_forbid(struct device *dev)
1207{
1208 spin_lock_irq(&dev->power.lock);
1209 if (!dev->power.runtime_auto)
1210 goto out;
1211
1212 dev->power.runtime_auto = false;
1213 atomic_inc(&dev->power.usage_count);
1214 rpm_resume(dev, 0);
1215
1216 out:
1217 spin_unlock_irq(&dev->power.lock);
1218}
1219EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1220
1221
1222
1223
1224
1225
1226
1227void pm_runtime_allow(struct device *dev)
1228{
1229 spin_lock_irq(&dev->power.lock);
1230 if (dev->power.runtime_auto)
1231 goto out;
1232
1233 dev->power.runtime_auto = true;
1234 if (atomic_dec_and_test(&dev->power.usage_count))
1235 rpm_idle(dev, RPM_AUTO);
1236
1237 out:
1238 spin_unlock_irq(&dev->power.lock);
1239}
1240EXPORT_SYMBOL_GPL(pm_runtime_allow);
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250void pm_runtime_no_callbacks(struct device *dev)
1251{
1252 spin_lock_irq(&dev->power.lock);
1253 dev->power.no_callbacks = 1;
1254 spin_unlock_irq(&dev->power.lock);
1255 if (device_is_registered(dev))
1256 rpm_sysfs_remove(dev);
1257}
1258EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271void pm_runtime_irq_safe(struct device *dev)
1272{
1273 if (dev->parent)
1274 pm_runtime_get_sync(dev->parent);
1275 spin_lock_irq(&dev->power.lock);
1276 dev->power.irq_safe = 1;
1277 spin_unlock_irq(&dev->power.lock);
1278}
1279EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1293{
1294 int delay = dev->power.autosuspend_delay;
1295
1296
1297 if (dev->power.use_autosuspend && delay < 0) {
1298
1299
1300 if (!old_use || old_delay >= 0) {
1301 atomic_inc(&dev->power.usage_count);
1302 rpm_resume(dev, 0);
1303 }
1304 }
1305
1306
1307 else {
1308
1309
1310 if (old_use && old_delay < 0)
1311 atomic_dec(&dev->power.usage_count);
1312
1313
1314 rpm_idle(dev, RPM_AUTO);
1315 }
1316}
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1328{
1329 int old_delay, old_use;
1330
1331 spin_lock_irq(&dev->power.lock);
1332 old_delay = dev->power.autosuspend_delay;
1333 old_use = dev->power.use_autosuspend;
1334 dev->power.autosuspend_delay = delay;
1335 update_autosuspend(dev, old_delay, old_use);
1336 spin_unlock_irq(&dev->power.lock);
1337}
1338EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1349{
1350 int old_delay, old_use;
1351
1352 spin_lock_irq(&dev->power.lock);
1353 old_delay = dev->power.autosuspend_delay;
1354 old_use = dev->power.use_autosuspend;
1355 dev->power.use_autosuspend = use;
1356 update_autosuspend(dev, old_delay, old_use);
1357 spin_unlock_irq(&dev->power.lock);
1358}
1359EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1360
1361
1362
1363
1364
1365void pm_runtime_init(struct device *dev)
1366{
1367 dev->power.runtime_status = RPM_SUSPENDED;
1368 dev->power.idle_notification = false;
1369
1370 dev->power.disable_depth = 1;
1371 atomic_set(&dev->power.usage_count, 0);
1372
1373 dev->power.runtime_error = 0;
1374
1375 atomic_set(&dev->power.child_count, 0);
1376 pm_suspend_ignore_children(dev, false);
1377 dev->power.runtime_auto = true;
1378
1379 dev->power.request_pending = false;
1380 dev->power.request = RPM_REQ_NONE;
1381 dev->power.deferred_resume = false;
1382 dev->power.accounting_timestamp = jiffies;
1383 INIT_WORK(&dev->power.work, pm_runtime_work);
1384
1385 dev->power.timer_expires = 0;
1386 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
1387 (unsigned long)dev);
1388
1389 init_waitqueue_head(&dev->power.wait_queue);
1390}
1391
1392
1393
1394
1395
1396void pm_runtime_remove(struct device *dev)
1397{
1398 __pm_runtime_disable(dev, false);
1399
1400
1401 if (dev->power.runtime_status == RPM_ACTIVE)
1402 pm_runtime_set_suspended(dev);
1403 if (dev->power.irq_safe && dev->parent)
1404 pm_runtime_put(dev->parent);
1405}
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419int pm_runtime_force_suspend(struct device *dev)
1420{
1421 int (*callback)(struct device *);
1422 int ret = 0;
1423
1424 pm_runtime_disable(dev);
1425 if (pm_runtime_status_suspended(dev))
1426 return 0;
1427
1428 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
1429
1430 if (!callback) {
1431 ret = -ENOSYS;
1432 goto err;
1433 }
1434
1435 ret = callback(dev);
1436 if (ret)
1437 goto err;
1438
1439 pm_runtime_set_suspended(dev);
1440 return 0;
1441err:
1442 pm_runtime_enable(dev);
1443 return ret;
1444}
1445EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459int pm_runtime_force_resume(struct device *dev)
1460{
1461 int (*callback)(struct device *);
1462 int ret = 0;
1463
1464 callback = RPM_GET_CALLBACK(dev, runtime_resume);
1465
1466 if (!callback) {
1467 ret = -ENOSYS;
1468 goto out;
1469 }
1470
1471 ret = callback(dev);
1472 if (ret)
1473 goto out;
1474
1475 pm_runtime_set_active(dev);
1476 pm_runtime_mark_last_busy(dev);
1477out:
1478 pm_runtime_enable(dev);
1479 return ret;
1480}
1481EXPORT_SYMBOL_GPL(pm_runtime_force_resume);
1482