1
2
3
4
5
6
7
8
9
10#include <linux/sched.h>
11#include <linux/export.h>
12#include <linux/pm_runtime.h>
13#include <linux/pm_wakeirq.h>
14#include <trace/events/rpm.h>
15#include "power.h"
16
17typedef int (*pm_callback_t)(struct device *);
18
19static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
20{
21 pm_callback_t cb;
22 const struct dev_pm_ops *ops;
23
24 if (dev->pm_domain)
25 ops = &dev->pm_domain->ops;
26 else if (dev->type && dev->type->pm)
27 ops = dev->type->pm;
28 else if (dev->class && dev->class->pm)
29 ops = dev->class->pm;
30 else if (dev->bus && dev->bus->pm)
31 ops = dev->bus->pm;
32 else
33 ops = NULL;
34
35 if (ops)
36 cb = *(pm_callback_t *)((void *)ops + cb_offset);
37 else
38 cb = NULL;
39
40 if (!cb && dev->driver && dev->driver->pm)
41 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
42
43 return cb;
44}
45
46#define RPM_GET_CALLBACK(dev, callback) \
47 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
48
49static int rpm_resume(struct device *dev, int rpmflags);
50static int rpm_suspend(struct device *dev, int rpmflags);
51
52
53
54
55
56
57
58
59
60
61
62
63void update_pm_runtime_accounting(struct device *dev)
64{
65 unsigned long now = jiffies;
66 unsigned long delta;
67
68 delta = now - dev->power.accounting_timestamp;
69
70 dev->power.accounting_timestamp = now;
71
72 if (dev->power.disable_depth > 0)
73 return;
74
75 if (dev->power.runtime_status == RPM_SUSPENDED)
76 dev->power.suspended_jiffies += delta;
77 else
78 dev->power.active_jiffies += delta;
79}
80
81static void __update_runtime_status(struct device *dev, enum rpm_status status)
82{
83 update_pm_runtime_accounting(dev);
84 dev->power.runtime_status = status;
85}
86
87
88
89
90
91static void pm_runtime_deactivate_timer(struct device *dev)
92{
93 if (dev->power.timer_expires > 0) {
94 del_timer(&dev->power.suspend_timer);
95 dev->power.timer_expires = 0;
96 }
97}
98
99
100
101
102
103static void pm_runtime_cancel_pending(struct device *dev)
104{
105 pm_runtime_deactivate_timer(dev);
106
107
108
109
110 dev->power.request = RPM_REQ_NONE;
111}
112
113
114
115
116
117
118
119
120
121
122
123
124
125unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
126{
127 int autosuspend_delay;
128 long elapsed;
129 unsigned long last_busy;
130 unsigned long expires = 0;
131
132 if (!dev->power.use_autosuspend)
133 goto out;
134
135 autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
136 if (autosuspend_delay < 0)
137 goto out;
138
139 last_busy = ACCESS_ONCE(dev->power.last_busy);
140 elapsed = jiffies - last_busy;
141 if (elapsed < 0)
142 goto out;
143
144
145
146
147
148 expires = last_busy + msecs_to_jiffies(autosuspend_delay);
149 if (autosuspend_delay >= 1000)
150 expires = round_jiffies(expires);
151 expires += !expires;
152 if (elapsed >= expires - last_busy)
153 expires = 0;
154
155 out:
156 return expires;
157}
158EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
159
160static int dev_memalloc_noio(struct device *dev, void *data)
161{
162 return dev->power.memalloc_noio;
163}
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
194{
195 static DEFINE_MUTEX(dev_hotplug_mutex);
196
197 mutex_lock(&dev_hotplug_mutex);
198 for (;;) {
199 bool enabled;
200
201
202 spin_lock_irq(&dev->power.lock);
203 enabled = dev->power.memalloc_noio;
204 dev->power.memalloc_noio = enable;
205 spin_unlock_irq(&dev->power.lock);
206
207
208
209
210
211 if (enabled && enable)
212 break;
213
214 dev = dev->parent;
215
216
217
218
219
220
221 if (!dev || (!enable &&
222 device_for_each_child(dev, NULL,
223 dev_memalloc_noio)))
224 break;
225 }
226 mutex_unlock(&dev_hotplug_mutex);
227}
228EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
229
230
231
232
233
234static int rpm_check_suspend_allowed(struct device *dev)
235{
236 int retval = 0;
237
238 if (dev->power.runtime_error)
239 retval = -EINVAL;
240 else if (dev->power.disable_depth > 0)
241 retval = -EACCES;
242 else if (atomic_read(&dev->power.usage_count) > 0)
243 retval = -EAGAIN;
244 else if (!pm_children_suspended(dev))
245 retval = -EBUSY;
246
247
248 else if ((dev->power.deferred_resume
249 && dev->power.runtime_status == RPM_SUSPENDING)
250 || (dev->power.request_pending
251 && dev->power.request == RPM_REQ_RESUME))
252 retval = -EAGAIN;
253 else if (__dev_pm_qos_read_value(dev) < 0)
254 retval = -EPERM;
255 else if (dev->power.runtime_status == RPM_SUSPENDED)
256 retval = 1;
257
258 return retval;
259}
260
261
262
263
264
265
266static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
267 __releases(&dev->power.lock) __acquires(&dev->power.lock)
268{
269 int retval;
270
271 if (dev->power.irq_safe)
272 spin_unlock(&dev->power.lock);
273 else
274 spin_unlock_irq(&dev->power.lock);
275
276 retval = cb(dev);
277
278 if (dev->power.irq_safe)
279 spin_lock(&dev->power.lock);
280 else
281 spin_lock_irq(&dev->power.lock);
282
283 return retval;
284}
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299static int rpm_idle(struct device *dev, int rpmflags)
300{
301 int (*callback)(struct device *);
302 int retval;
303
304 trace_rpm_idle_rcuidle(dev, rpmflags);
305 retval = rpm_check_suspend_allowed(dev);
306 if (retval < 0)
307 ;
308
309
310 else if (dev->power.runtime_status != RPM_ACTIVE)
311 retval = -EAGAIN;
312
313
314
315
316
317 else if (dev->power.request_pending &&
318 dev->power.request > RPM_REQ_IDLE)
319 retval = -EAGAIN;
320
321
322 else if (dev->power.idle_notification)
323 retval = -EINPROGRESS;
324 if (retval)
325 goto out;
326
327
328 dev->power.request = RPM_REQ_NONE;
329
330 if (dev->power.no_callbacks)
331 goto out;
332
333
334 if (rpmflags & RPM_ASYNC) {
335 dev->power.request = RPM_REQ_IDLE;
336 if (!dev->power.request_pending) {
337 dev->power.request_pending = true;
338 queue_work(pm_wq, &dev->power.work);
339 }
340 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
341 return 0;
342 }
343
344 dev->power.idle_notification = true;
345
346 callback = RPM_GET_CALLBACK(dev, runtime_idle);
347
348 if (callback)
349 retval = __rpm_callback(callback, dev);
350
351 dev->power.idle_notification = false;
352 wake_up_all(&dev->power.wait_queue);
353
354 out:
355 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
356 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
357}
358
359
360
361
362
363
364static int rpm_callback(int (*cb)(struct device *), struct device *dev)
365{
366 int retval;
367
368 if (!cb)
369 return -ENOSYS;
370
371 if (dev->power.memalloc_noio) {
372 unsigned int noio_flag;
373
374
375
376
377
378
379
380
381
382
383 noio_flag = memalloc_noio_save();
384 retval = __rpm_callback(cb, dev);
385 memalloc_noio_restore(noio_flag);
386 } else {
387 retval = __rpm_callback(cb, dev);
388 }
389
390 dev->power.runtime_error = retval;
391 return retval != -EACCES ? retval : -EIO;
392}
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415static int rpm_suspend(struct device *dev, int rpmflags)
416 __releases(&dev->power.lock) __acquires(&dev->power.lock)
417{
418 int (*callback)(struct device *);
419 struct device *parent = NULL;
420 int retval;
421
422 trace_rpm_suspend_rcuidle(dev, rpmflags);
423
424 repeat:
425 retval = rpm_check_suspend_allowed(dev);
426
427 if (retval < 0)
428 ;
429
430
431 else if (dev->power.runtime_status == RPM_RESUMING &&
432 !(rpmflags & RPM_ASYNC))
433 retval = -EAGAIN;
434 if (retval)
435 goto out;
436
437
438 if ((rpmflags & RPM_AUTO)
439 && dev->power.runtime_status != RPM_SUSPENDING) {
440 unsigned long expires = pm_runtime_autosuspend_expiration(dev);
441
442 if (expires != 0) {
443
444 dev->power.request = RPM_REQ_NONE;
445
446
447
448
449
450
451
452
453 if (!(dev->power.timer_expires && time_before_eq(
454 dev->power.timer_expires, expires))) {
455 dev->power.timer_expires = expires;
456 mod_timer(&dev->power.suspend_timer, expires);
457 }
458 dev->power.timer_autosuspends = 1;
459 goto out;
460 }
461 }
462
463
464 pm_runtime_cancel_pending(dev);
465
466 if (dev->power.runtime_status == RPM_SUSPENDING) {
467 DEFINE_WAIT(wait);
468
469 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
470 retval = -EINPROGRESS;
471 goto out;
472 }
473
474 if (dev->power.irq_safe) {
475 spin_unlock(&dev->power.lock);
476
477 cpu_relax();
478
479 spin_lock(&dev->power.lock);
480 goto repeat;
481 }
482
483
484 for (;;) {
485 prepare_to_wait(&dev->power.wait_queue, &wait,
486 TASK_UNINTERRUPTIBLE);
487 if (dev->power.runtime_status != RPM_SUSPENDING)
488 break;
489
490 spin_unlock_irq(&dev->power.lock);
491
492 schedule();
493
494 spin_lock_irq(&dev->power.lock);
495 }
496 finish_wait(&dev->power.wait_queue, &wait);
497 goto repeat;
498 }
499
500 if (dev->power.no_callbacks)
501 goto no_callback;
502
503
504 if (rpmflags & RPM_ASYNC) {
505 dev->power.request = (rpmflags & RPM_AUTO) ?
506 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
507 if (!dev->power.request_pending) {
508 dev->power.request_pending = true;
509 queue_work(pm_wq, &dev->power.work);
510 }
511 goto out;
512 }
513
514 __update_runtime_status(dev, RPM_SUSPENDING);
515
516 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
517
518 dev_pm_enable_wake_irq(dev);
519 retval = rpm_callback(callback, dev);
520 if (retval)
521 goto fail;
522
523 no_callback:
524 __update_runtime_status(dev, RPM_SUSPENDED);
525 pm_runtime_deactivate_timer(dev);
526
527 if (dev->parent) {
528 parent = dev->parent;
529 atomic_add_unless(&parent->power.child_count, -1, 0);
530 }
531 wake_up_all(&dev->power.wait_queue);
532
533 if (dev->power.deferred_resume) {
534 dev->power.deferred_resume = false;
535 rpm_resume(dev, 0);
536 retval = -EAGAIN;
537 goto out;
538 }
539
540
541 if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
542 spin_unlock(&dev->power.lock);
543
544 spin_lock(&parent->power.lock);
545 rpm_idle(parent, RPM_ASYNC);
546 spin_unlock(&parent->power.lock);
547
548 spin_lock(&dev->power.lock);
549 }
550
551 out:
552 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
553
554 return retval;
555
556 fail:
557 dev_pm_disable_wake_irq(dev);
558 __update_runtime_status(dev, RPM_ACTIVE);
559 dev->power.deferred_resume = false;
560 wake_up_all(&dev->power.wait_queue);
561
562 if (retval == -EAGAIN || retval == -EBUSY) {
563 dev->power.runtime_error = 0;
564
565
566
567
568
569
570
571 if ((rpmflags & RPM_AUTO) &&
572 pm_runtime_autosuspend_expiration(dev) != 0)
573 goto repeat;
574 } else {
575 pm_runtime_cancel_pending(dev);
576 }
577 goto out;
578}
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597static int rpm_resume(struct device *dev, int rpmflags)
598 __releases(&dev->power.lock) __acquires(&dev->power.lock)
599{
600 int (*callback)(struct device *);
601 struct device *parent = NULL;
602 int retval = 0;
603
604 trace_rpm_resume_rcuidle(dev, rpmflags);
605
606 repeat:
607 if (dev->power.runtime_error)
608 retval = -EINVAL;
609 else if (dev->power.disable_depth == 1 && dev->power.is_suspended
610 && dev->power.runtime_status == RPM_ACTIVE)
611 retval = 1;
612 else if (dev->power.disable_depth > 0)
613 retval = -EACCES;
614 if (retval)
615 goto out;
616
617
618
619
620
621
622
623 dev->power.request = RPM_REQ_NONE;
624 if (!dev->power.timer_autosuspends)
625 pm_runtime_deactivate_timer(dev);
626
627 if (dev->power.runtime_status == RPM_ACTIVE) {
628 retval = 1;
629 goto out;
630 }
631
632 if (dev->power.runtime_status == RPM_RESUMING
633 || dev->power.runtime_status == RPM_SUSPENDING) {
634 DEFINE_WAIT(wait);
635
636 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
637 if (dev->power.runtime_status == RPM_SUSPENDING)
638 dev->power.deferred_resume = true;
639 else
640 retval = -EINPROGRESS;
641 goto out;
642 }
643
644 if (dev->power.irq_safe) {
645 spin_unlock(&dev->power.lock);
646
647 cpu_relax();
648
649 spin_lock(&dev->power.lock);
650 goto repeat;
651 }
652
653
654 for (;;) {
655 prepare_to_wait(&dev->power.wait_queue, &wait,
656 TASK_UNINTERRUPTIBLE);
657 if (dev->power.runtime_status != RPM_RESUMING
658 && dev->power.runtime_status != RPM_SUSPENDING)
659 break;
660
661 spin_unlock_irq(&dev->power.lock);
662
663 schedule();
664
665 spin_lock_irq(&dev->power.lock);
666 }
667 finish_wait(&dev->power.wait_queue, &wait);
668 goto repeat;
669 }
670
671
672
673
674
675
676 if (dev->power.no_callbacks && !parent && dev->parent) {
677 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
678 if (dev->parent->power.disable_depth > 0
679 || dev->parent->power.ignore_children
680 || dev->parent->power.runtime_status == RPM_ACTIVE) {
681 atomic_inc(&dev->parent->power.child_count);
682 spin_unlock(&dev->parent->power.lock);
683 retval = 1;
684 goto no_callback;
685 }
686 spin_unlock(&dev->parent->power.lock);
687 }
688
689
690 if (rpmflags & RPM_ASYNC) {
691 dev->power.request = RPM_REQ_RESUME;
692 if (!dev->power.request_pending) {
693 dev->power.request_pending = true;
694 queue_work(pm_wq, &dev->power.work);
695 }
696 retval = 0;
697 goto out;
698 }
699
700 if (!parent && dev->parent) {
701
702
703
704
705
706 parent = dev->parent;
707 if (dev->power.irq_safe)
708 goto skip_parent;
709 spin_unlock(&dev->power.lock);
710
711 pm_runtime_get_noresume(parent);
712
713 spin_lock(&parent->power.lock);
714
715
716
717
718 if (!parent->power.disable_depth
719 && !parent->power.ignore_children) {
720 rpm_resume(parent, 0);
721 if (parent->power.runtime_status != RPM_ACTIVE)
722 retval = -EBUSY;
723 }
724 spin_unlock(&parent->power.lock);
725
726 spin_lock(&dev->power.lock);
727 if (retval)
728 goto out;
729 goto repeat;
730 }
731 skip_parent:
732
733 if (dev->power.no_callbacks)
734 goto no_callback;
735
736 __update_runtime_status(dev, RPM_RESUMING);
737
738 callback = RPM_GET_CALLBACK(dev, runtime_resume);
739
740 dev_pm_disable_wake_irq(dev);
741 retval = rpm_callback(callback, dev);
742 if (retval) {
743 __update_runtime_status(dev, RPM_SUSPENDED);
744 pm_runtime_cancel_pending(dev);
745 dev_pm_enable_wake_irq(dev);
746 } else {
747 no_callback:
748 __update_runtime_status(dev, RPM_ACTIVE);
749 pm_runtime_mark_last_busy(dev);
750 if (parent)
751 atomic_inc(&parent->power.child_count);
752 }
753 wake_up_all(&dev->power.wait_queue);
754
755 if (retval >= 0)
756 rpm_idle(dev, RPM_ASYNC);
757
758 out:
759 if (parent && !dev->power.irq_safe) {
760 spin_unlock_irq(&dev->power.lock);
761
762 pm_runtime_put(parent);
763
764 spin_lock_irq(&dev->power.lock);
765 }
766
767 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
768
769 return retval;
770}
771
772
773
774
775
776
777
778
779static void pm_runtime_work(struct work_struct *work)
780{
781 struct device *dev = container_of(work, struct device, power.work);
782 enum rpm_request req;
783
784 spin_lock_irq(&dev->power.lock);
785
786 if (!dev->power.request_pending)
787 goto out;
788
789 req = dev->power.request;
790 dev->power.request = RPM_REQ_NONE;
791 dev->power.request_pending = false;
792
793 switch (req) {
794 case RPM_REQ_NONE:
795 break;
796 case RPM_REQ_IDLE:
797 rpm_idle(dev, RPM_NOWAIT);
798 break;
799 case RPM_REQ_SUSPEND:
800 rpm_suspend(dev, RPM_NOWAIT);
801 break;
802 case RPM_REQ_AUTOSUSPEND:
803 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
804 break;
805 case RPM_REQ_RESUME:
806 rpm_resume(dev, RPM_NOWAIT);
807 break;
808 }
809
810 out:
811 spin_unlock_irq(&dev->power.lock);
812}
813
814
815
816
817
818
819
820static void pm_suspend_timer_fn(unsigned long data)
821{
822 struct device *dev = (struct device *)data;
823 unsigned long flags;
824 unsigned long expires;
825
826 spin_lock_irqsave(&dev->power.lock, flags);
827
828 expires = dev->power.timer_expires;
829
830 if (expires > 0 && !time_after(expires, jiffies)) {
831 dev->power.timer_expires = 0;
832 rpm_suspend(dev, dev->power.timer_autosuspends ?
833 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
834 }
835
836 spin_unlock_irqrestore(&dev->power.lock, flags);
837}
838
839
840
841
842
843
844int pm_schedule_suspend(struct device *dev, unsigned int delay)
845{
846 unsigned long flags;
847 int retval;
848
849 spin_lock_irqsave(&dev->power.lock, flags);
850
851 if (!delay) {
852 retval = rpm_suspend(dev, RPM_ASYNC);
853 goto out;
854 }
855
856 retval = rpm_check_suspend_allowed(dev);
857 if (retval)
858 goto out;
859
860
861 pm_runtime_cancel_pending(dev);
862
863 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
864 dev->power.timer_expires += !dev->power.timer_expires;
865 dev->power.timer_autosuspends = 0;
866 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
867
868 out:
869 spin_unlock_irqrestore(&dev->power.lock, flags);
870
871 return retval;
872}
873EXPORT_SYMBOL_GPL(pm_schedule_suspend);
874
875
876
877
878
879
880
881
882
883
884
885
886
887int __pm_runtime_idle(struct device *dev, int rpmflags)
888{
889 unsigned long flags;
890 int retval;
891
892 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
893
894 if (rpmflags & RPM_GET_PUT) {
895 if (!atomic_dec_and_test(&dev->power.usage_count))
896 return 0;
897 }
898
899 spin_lock_irqsave(&dev->power.lock, flags);
900 retval = rpm_idle(dev, rpmflags);
901 spin_unlock_irqrestore(&dev->power.lock, flags);
902
903 return retval;
904}
905EXPORT_SYMBOL_GPL(__pm_runtime_idle);
906
907
908
909
910
911
912
913
914
915
916
917
918
919int __pm_runtime_suspend(struct device *dev, int rpmflags)
920{
921 unsigned long flags;
922 int retval;
923
924 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
925
926 if (rpmflags & RPM_GET_PUT) {
927 if (!atomic_dec_and_test(&dev->power.usage_count))
928 return 0;
929 }
930
931 spin_lock_irqsave(&dev->power.lock, flags);
932 retval = rpm_suspend(dev, rpmflags);
933 spin_unlock_irqrestore(&dev->power.lock, flags);
934
935 return retval;
936}
937EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
938
939
940
941
942
943
944
945
946
947
948
949
950int __pm_runtime_resume(struct device *dev, int rpmflags)
951{
952 unsigned long flags;
953 int retval;
954
955 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
956
957 if (rpmflags & RPM_GET_PUT)
958 atomic_inc(&dev->power.usage_count);
959
960 spin_lock_irqsave(&dev->power.lock, flags);
961 retval = rpm_resume(dev, rpmflags);
962 spin_unlock_irqrestore(&dev->power.lock, flags);
963
964 return retval;
965}
966EXPORT_SYMBOL_GPL(__pm_runtime_resume);
967
968
969
970
971
972
973
974
975
976
977
978int pm_runtime_get_if_in_use(struct device *dev)
979{
980 unsigned long flags;
981 int retval;
982
983 spin_lock_irqsave(&dev->power.lock, flags);
984 retval = dev->power.disable_depth > 0 ? -EINVAL :
985 dev->power.runtime_status == RPM_ACTIVE
986 && atomic_inc_not_zero(&dev->power.usage_count);
987 spin_unlock_irqrestore(&dev->power.lock, flags);
988 return retval;
989}
990EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009int __pm_runtime_set_status(struct device *dev, unsigned int status)
1010{
1011 struct device *parent = dev->parent;
1012 unsigned long flags;
1013 bool notify_parent = false;
1014 int error = 0;
1015
1016 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
1017 return -EINVAL;
1018
1019 spin_lock_irqsave(&dev->power.lock, flags);
1020
1021 if (!dev->power.runtime_error && !dev->power.disable_depth) {
1022 error = -EAGAIN;
1023 goto out;
1024 }
1025
1026 if (dev->power.runtime_status == status)
1027 goto out_set;
1028
1029 if (status == RPM_SUSPENDED) {
1030
1031 if (parent) {
1032 atomic_add_unless(&parent->power.child_count, -1, 0);
1033 notify_parent = !parent->power.ignore_children;
1034 }
1035 goto out_set;
1036 }
1037
1038 if (parent) {
1039 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1040
1041
1042
1043
1044
1045
1046 if (!parent->power.disable_depth
1047 && !parent->power.ignore_children
1048 && parent->power.runtime_status != RPM_ACTIVE) {
1049 dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
1050 dev_name(dev),
1051 dev_name(parent));
1052 error = -EBUSY;
1053 } else if (dev->power.runtime_status == RPM_SUSPENDED) {
1054 atomic_inc(&parent->power.child_count);
1055 }
1056
1057 spin_unlock(&parent->power.lock);
1058
1059 if (error)
1060 goto out;
1061 }
1062
1063 out_set:
1064 __update_runtime_status(dev, status);
1065 dev->power.runtime_error = 0;
1066 out:
1067 spin_unlock_irqrestore(&dev->power.lock, flags);
1068
1069 if (notify_parent)
1070 pm_request_idle(parent);
1071
1072 return error;
1073}
1074EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085static void __pm_runtime_barrier(struct device *dev)
1086{
1087 pm_runtime_deactivate_timer(dev);
1088
1089 if (dev->power.request_pending) {
1090 dev->power.request = RPM_REQ_NONE;
1091 spin_unlock_irq(&dev->power.lock);
1092
1093 cancel_work_sync(&dev->power.work);
1094
1095 spin_lock_irq(&dev->power.lock);
1096 dev->power.request_pending = false;
1097 }
1098
1099 if (dev->power.runtime_status == RPM_SUSPENDING
1100 || dev->power.runtime_status == RPM_RESUMING
1101 || dev->power.idle_notification) {
1102 DEFINE_WAIT(wait);
1103
1104
1105 for (;;) {
1106 prepare_to_wait(&dev->power.wait_queue, &wait,
1107 TASK_UNINTERRUPTIBLE);
1108 if (dev->power.runtime_status != RPM_SUSPENDING
1109 && dev->power.runtime_status != RPM_RESUMING
1110 && !dev->power.idle_notification)
1111 break;
1112 spin_unlock_irq(&dev->power.lock);
1113
1114 schedule();
1115
1116 spin_lock_irq(&dev->power.lock);
1117 }
1118 finish_wait(&dev->power.wait_queue, &wait);
1119 }
1120}
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136int pm_runtime_barrier(struct device *dev)
1137{
1138 int retval = 0;
1139
1140 pm_runtime_get_noresume(dev);
1141 spin_lock_irq(&dev->power.lock);
1142
1143 if (dev->power.request_pending
1144 && dev->power.request == RPM_REQ_RESUME) {
1145 rpm_resume(dev, 0);
1146 retval = 1;
1147 }
1148
1149 __pm_runtime_barrier(dev);
1150
1151 spin_unlock_irq(&dev->power.lock);
1152 pm_runtime_put_noidle(dev);
1153
1154 return retval;
1155}
1156EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172void __pm_runtime_disable(struct device *dev, bool check_resume)
1173{
1174 spin_lock_irq(&dev->power.lock);
1175
1176 if (dev->power.disable_depth > 0) {
1177 dev->power.disable_depth++;
1178 goto out;
1179 }
1180
1181
1182
1183
1184
1185
1186 if (check_resume && dev->power.request_pending
1187 && dev->power.request == RPM_REQ_RESUME) {
1188
1189
1190
1191
1192 pm_runtime_get_noresume(dev);
1193
1194 rpm_resume(dev, 0);
1195
1196 pm_runtime_put_noidle(dev);
1197 }
1198
1199 if (!dev->power.disable_depth++)
1200 __pm_runtime_barrier(dev);
1201
1202 out:
1203 spin_unlock_irq(&dev->power.lock);
1204}
1205EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1206
1207
1208
1209
1210
1211void pm_runtime_enable(struct device *dev)
1212{
1213 unsigned long flags;
1214
1215 spin_lock_irqsave(&dev->power.lock, flags);
1216
1217 if (dev->power.disable_depth > 0)
1218 dev->power.disable_depth--;
1219 else
1220 dev_warn(dev, "Unbalanced %s!\n", __func__);
1221
1222 spin_unlock_irqrestore(&dev->power.lock, flags);
1223}
1224EXPORT_SYMBOL_GPL(pm_runtime_enable);
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234void pm_runtime_forbid(struct device *dev)
1235{
1236 spin_lock_irq(&dev->power.lock);
1237 if (!dev->power.runtime_auto)
1238 goto out;
1239
1240 dev->power.runtime_auto = false;
1241 atomic_inc(&dev->power.usage_count);
1242 rpm_resume(dev, 0);
1243
1244 out:
1245 spin_unlock_irq(&dev->power.lock);
1246}
1247EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1248
1249
1250
1251
1252
1253
1254
1255void pm_runtime_allow(struct device *dev)
1256{
1257 spin_lock_irq(&dev->power.lock);
1258 if (dev->power.runtime_auto)
1259 goto out;
1260
1261 dev->power.runtime_auto = true;
1262 if (atomic_dec_and_test(&dev->power.usage_count))
1263 rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
1264
1265 out:
1266 spin_unlock_irq(&dev->power.lock);
1267}
1268EXPORT_SYMBOL_GPL(pm_runtime_allow);
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278void pm_runtime_no_callbacks(struct device *dev)
1279{
1280 spin_lock_irq(&dev->power.lock);
1281 dev->power.no_callbacks = 1;
1282 spin_unlock_irq(&dev->power.lock);
1283 if (device_is_registered(dev))
1284 rpm_sysfs_remove(dev);
1285}
1286EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299void pm_runtime_irq_safe(struct device *dev)
1300{
1301 if (dev->parent)
1302 pm_runtime_get_sync(dev->parent);
1303 spin_lock_irq(&dev->power.lock);
1304 dev->power.irq_safe = 1;
1305 spin_unlock_irq(&dev->power.lock);
1306}
1307EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1321{
1322 int delay = dev->power.autosuspend_delay;
1323
1324
1325 if (dev->power.use_autosuspend && delay < 0) {
1326
1327
1328 if (!old_use || old_delay >= 0) {
1329 atomic_inc(&dev->power.usage_count);
1330 rpm_resume(dev, 0);
1331 }
1332 }
1333
1334
1335 else {
1336
1337
1338 if (old_use && old_delay < 0)
1339 atomic_dec(&dev->power.usage_count);
1340
1341
1342 rpm_idle(dev, RPM_AUTO);
1343 }
1344}
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1356{
1357 int old_delay, old_use;
1358
1359 spin_lock_irq(&dev->power.lock);
1360 old_delay = dev->power.autosuspend_delay;
1361 old_use = dev->power.use_autosuspend;
1362 dev->power.autosuspend_delay = delay;
1363 update_autosuspend(dev, old_delay, old_use);
1364 spin_unlock_irq(&dev->power.lock);
1365}
1366EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1377{
1378 int old_delay, old_use;
1379
1380 spin_lock_irq(&dev->power.lock);
1381 old_delay = dev->power.autosuspend_delay;
1382 old_use = dev->power.use_autosuspend;
1383 dev->power.use_autosuspend = use;
1384 update_autosuspend(dev, old_delay, old_use);
1385 spin_unlock_irq(&dev->power.lock);
1386}
1387EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1388
1389
1390
1391
1392
1393void pm_runtime_init(struct device *dev)
1394{
1395 dev->power.runtime_status = RPM_SUSPENDED;
1396 dev->power.idle_notification = false;
1397
1398 dev->power.disable_depth = 1;
1399 atomic_set(&dev->power.usage_count, 0);
1400
1401 dev->power.runtime_error = 0;
1402
1403 atomic_set(&dev->power.child_count, 0);
1404 pm_suspend_ignore_children(dev, false);
1405 dev->power.runtime_auto = true;
1406
1407 dev->power.request_pending = false;
1408 dev->power.request = RPM_REQ_NONE;
1409 dev->power.deferred_resume = false;
1410 dev->power.accounting_timestamp = jiffies;
1411 INIT_WORK(&dev->power.work, pm_runtime_work);
1412
1413 dev->power.timer_expires = 0;
1414 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
1415 (unsigned long)dev);
1416
1417 init_waitqueue_head(&dev->power.wait_queue);
1418}
1419
1420
1421
1422
1423
1424void pm_runtime_reinit(struct device *dev)
1425{
1426 if (!pm_runtime_enabled(dev)) {
1427 if (dev->power.runtime_status == RPM_ACTIVE)
1428 pm_runtime_set_suspended(dev);
1429 if (dev->power.irq_safe) {
1430 spin_lock_irq(&dev->power.lock);
1431 dev->power.irq_safe = 0;
1432 spin_unlock_irq(&dev->power.lock);
1433 if (dev->parent)
1434 pm_runtime_put(dev->parent);
1435 }
1436 }
1437}
1438
1439
1440
1441
1442
1443void pm_runtime_remove(struct device *dev)
1444{
1445 __pm_runtime_disable(dev, false);
1446 pm_runtime_reinit(dev);
1447}
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461int pm_runtime_force_suspend(struct device *dev)
1462{
1463 int (*callback)(struct device *);
1464 int ret = 0;
1465
1466 pm_runtime_disable(dev);
1467 if (pm_runtime_status_suspended(dev))
1468 return 0;
1469
1470 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
1471
1472 if (!callback) {
1473 ret = -ENOSYS;
1474 goto err;
1475 }
1476
1477 ret = callback(dev);
1478 if (ret)
1479 goto err;
1480
1481 pm_runtime_set_suspended(dev);
1482 return 0;
1483err:
1484 pm_runtime_enable(dev);
1485 return ret;
1486}
1487EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501int pm_runtime_force_resume(struct device *dev)
1502{
1503 int (*callback)(struct device *);
1504 int ret = 0;
1505
1506 callback = RPM_GET_CALLBACK(dev, runtime_resume);
1507
1508 if (!callback) {
1509 ret = -ENOSYS;
1510 goto out;
1511 }
1512
1513 if (!pm_runtime_status_suspended(dev))
1514 goto out;
1515
1516 ret = pm_runtime_set_active(dev);
1517 if (ret)
1518 goto out;
1519
1520 ret = callback(dev);
1521 if (ret) {
1522 pm_runtime_set_suspended(dev);
1523 goto out;
1524 }
1525
1526 pm_runtime_mark_last_busy(dev);
1527out:
1528 pm_runtime_enable(dev);
1529 return ret;
1530}
1531EXPORT_SYMBOL_GPL(pm_runtime_force_resume);
1532