1
2
3
4
5
6
7
8
9
10#include <linux/sched/mm.h>
11#include <linux/export.h>
12#include <linux/pm_runtime.h>
13#include <linux/pm_wakeirq.h>
14#include <trace/events/rpm.h>
15
16#include "../base.h"
17#include "power.h"
18
19typedef int (*pm_callback_t)(struct device *);
20
21static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
22{
23 pm_callback_t cb;
24 const struct dev_pm_ops *ops;
25
26 if (dev->pm_domain)
27 ops = &dev->pm_domain->ops;
28 else if (dev->type && dev->type->pm)
29 ops = dev->type->pm;
30 else if (dev->class && dev->class->pm)
31 ops = dev->class->pm;
32 else if (dev->bus && dev->bus->pm)
33 ops = dev->bus->pm;
34 else
35 ops = NULL;
36
37 if (ops)
38 cb = *(pm_callback_t *)((void *)ops + cb_offset);
39 else
40 cb = NULL;
41
42 if (!cb && dev->driver && dev->driver->pm)
43 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
44
45 return cb;
46}
47
48#define RPM_GET_CALLBACK(dev, callback) \
49 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
50
51static int rpm_resume(struct device *dev, int rpmflags);
52static int rpm_suspend(struct device *dev, int rpmflags);
53
54
55
56
57
58
59
60
61
62
63
64
65void update_pm_runtime_accounting(struct device *dev)
66{
67 unsigned long now = jiffies;
68 unsigned long delta;
69
70 delta = now - dev->power.accounting_timestamp;
71
72 dev->power.accounting_timestamp = now;
73
74 if (dev->power.disable_depth > 0)
75 return;
76
77 if (dev->power.runtime_status == RPM_SUSPENDED)
78 dev->power.suspended_jiffies += delta;
79 else
80 dev->power.active_jiffies += delta;
81}
82
83static void __update_runtime_status(struct device *dev, enum rpm_status status)
84{
85 update_pm_runtime_accounting(dev);
86 dev->power.runtime_status = status;
87}
88
89
90
91
92
93static void pm_runtime_deactivate_timer(struct device *dev)
94{
95 if (dev->power.timer_expires > 0) {
96 del_timer(&dev->power.suspend_timer);
97 dev->power.timer_expires = 0;
98 }
99}
100
101
102
103
104
105static void pm_runtime_cancel_pending(struct device *dev)
106{
107 pm_runtime_deactivate_timer(dev);
108
109
110
111
112 dev->power.request = RPM_REQ_NONE;
113}
114
115
116
117
118
119
120
121
122
123
124
125
126
127unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
128{
129 int autosuspend_delay;
130 long elapsed;
131 unsigned long last_busy;
132 unsigned long expires = 0;
133
134 if (!dev->power.use_autosuspend)
135 goto out;
136
137 autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
138 if (autosuspend_delay < 0)
139 goto out;
140
141 last_busy = READ_ONCE(dev->power.last_busy);
142 elapsed = jiffies - last_busy;
143 if (elapsed < 0)
144 goto out;
145
146
147
148
149
150 expires = last_busy + msecs_to_jiffies(autosuspend_delay);
151 if (autosuspend_delay >= 1000)
152 expires = round_jiffies(expires);
153 expires += !expires;
154 if (elapsed >= expires - last_busy)
155 expires = 0;
156
157 out:
158 return expires;
159}
160EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
161
162static int dev_memalloc_noio(struct device *dev, void *data)
163{
164 return dev->power.memalloc_noio;
165}
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
196{
197 static DEFINE_MUTEX(dev_hotplug_mutex);
198
199 mutex_lock(&dev_hotplug_mutex);
200 for (;;) {
201 bool enabled;
202
203
204 spin_lock_irq(&dev->power.lock);
205 enabled = dev->power.memalloc_noio;
206 dev->power.memalloc_noio = enable;
207 spin_unlock_irq(&dev->power.lock);
208
209
210
211
212
213 if (enabled && enable)
214 break;
215
216 dev = dev->parent;
217
218
219
220
221
222
223 if (!dev || (!enable &&
224 device_for_each_child(dev, NULL,
225 dev_memalloc_noio)))
226 break;
227 }
228 mutex_unlock(&dev_hotplug_mutex);
229}
230EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
231
232
233
234
235
236static int rpm_check_suspend_allowed(struct device *dev)
237{
238 int retval = 0;
239
240 if (dev->power.runtime_error)
241 retval = -EINVAL;
242 else if (dev->power.disable_depth > 0)
243 retval = -EACCES;
244 else if (atomic_read(&dev->power.usage_count) > 0)
245 retval = -EAGAIN;
246 else if (!dev->power.ignore_children &&
247 atomic_read(&dev->power.child_count))
248 retval = -EBUSY;
249
250
251 else if ((dev->power.deferred_resume
252 && dev->power.runtime_status == RPM_SUSPENDING)
253 || (dev->power.request_pending
254 && dev->power.request == RPM_REQ_RESUME))
255 retval = -EAGAIN;
256 else if (__dev_pm_qos_read_value(dev) == 0)
257 retval = -EPERM;
258 else if (dev->power.runtime_status == RPM_SUSPENDED)
259 retval = 1;
260
261 return retval;
262}
263
264static int rpm_get_suppliers(struct device *dev)
265{
266 struct device_link *link;
267
268 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
269 int retval;
270
271 if (!(link->flags & DL_FLAG_PM_RUNTIME))
272 continue;
273
274 if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND ||
275 link->rpm_active)
276 continue;
277
278 retval = pm_runtime_get_sync(link->supplier);
279
280 if (retval < 0 && retval != -EACCES) {
281 pm_runtime_put_noidle(link->supplier);
282 return retval;
283 }
284 link->rpm_active = true;
285 }
286 return 0;
287}
288
289static void rpm_put_suppliers(struct device *dev)
290{
291 struct device_link *link;
292
293 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
294 if (link->rpm_active &&
295 READ_ONCE(link->status) != DL_STATE_SUPPLIER_UNBIND) {
296 pm_runtime_put(link->supplier);
297 link->rpm_active = false;
298 }
299}
300
301
302
303
304
305
306static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
307 __releases(&dev->power.lock) __acquires(&dev->power.lock)
308{
309 int retval, idx;
310 bool use_links = dev->power.links_count > 0;
311
312 if (dev->power.irq_safe) {
313 spin_unlock(&dev->power.lock);
314 } else {
315 spin_unlock_irq(&dev->power.lock);
316
317
318
319
320
321
322
323
324 if (use_links && dev->power.runtime_status == RPM_RESUMING) {
325 idx = device_links_read_lock();
326
327 retval = rpm_get_suppliers(dev);
328 if (retval)
329 goto fail;
330
331 device_links_read_unlock(idx);
332 }
333 }
334
335 retval = cb(dev);
336
337 if (dev->power.irq_safe) {
338 spin_lock(&dev->power.lock);
339 } else {
340
341
342
343
344
345
346
347 if (use_links
348 && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
349 || (dev->power.runtime_status == RPM_RESUMING && retval))) {
350 idx = device_links_read_lock();
351
352 fail:
353 rpm_put_suppliers(dev);
354
355 device_links_read_unlock(idx);
356 }
357
358 spin_lock_irq(&dev->power.lock);
359 }
360
361 return retval;
362}
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377static int rpm_idle(struct device *dev, int rpmflags)
378{
379 int (*callback)(struct device *);
380 int retval;
381
382 trace_rpm_idle_rcuidle(dev, rpmflags);
383 retval = rpm_check_suspend_allowed(dev);
384 if (retval < 0)
385 ;
386
387
388 else if (dev->power.runtime_status != RPM_ACTIVE)
389 retval = -EAGAIN;
390
391
392
393
394
395 else if (dev->power.request_pending &&
396 dev->power.request > RPM_REQ_IDLE)
397 retval = -EAGAIN;
398
399
400 else if (dev->power.idle_notification)
401 retval = -EINPROGRESS;
402 if (retval)
403 goto out;
404
405
406 dev->power.request = RPM_REQ_NONE;
407
408 if (dev->power.no_callbacks)
409 goto out;
410
411
412 if (rpmflags & RPM_ASYNC) {
413 dev->power.request = RPM_REQ_IDLE;
414 if (!dev->power.request_pending) {
415 dev->power.request_pending = true;
416 queue_work(pm_wq, &dev->power.work);
417 }
418 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
419 return 0;
420 }
421
422 dev->power.idle_notification = true;
423
424 callback = RPM_GET_CALLBACK(dev, runtime_idle);
425
426 if (callback)
427 retval = __rpm_callback(callback, dev);
428
429 dev->power.idle_notification = false;
430 wake_up_all(&dev->power.wait_queue);
431
432 out:
433 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
434 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
435}
436
437
438
439
440
441
442static int rpm_callback(int (*cb)(struct device *), struct device *dev)
443{
444 int retval;
445
446 if (!cb)
447 return -ENOSYS;
448
449 if (dev->power.memalloc_noio) {
450 unsigned int noio_flag;
451
452
453
454
455
456
457
458
459
460
461 noio_flag = memalloc_noio_save();
462 retval = __rpm_callback(cb, dev);
463 memalloc_noio_restore(noio_flag);
464 } else {
465 retval = __rpm_callback(cb, dev);
466 }
467
468 dev->power.runtime_error = retval;
469 return retval != -EACCES ? retval : -EIO;
470}
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493static int rpm_suspend(struct device *dev, int rpmflags)
494 __releases(&dev->power.lock) __acquires(&dev->power.lock)
495{
496 int (*callback)(struct device *);
497 struct device *parent = NULL;
498 int retval;
499
500 trace_rpm_suspend_rcuidle(dev, rpmflags);
501
502 repeat:
503 retval = rpm_check_suspend_allowed(dev);
504
505 if (retval < 0)
506 ;
507
508
509 else if (dev->power.runtime_status == RPM_RESUMING &&
510 !(rpmflags & RPM_ASYNC))
511 retval = -EAGAIN;
512 if (retval)
513 goto out;
514
515
516 if ((rpmflags & RPM_AUTO)
517 && dev->power.runtime_status != RPM_SUSPENDING) {
518 unsigned long expires = pm_runtime_autosuspend_expiration(dev);
519
520 if (expires != 0) {
521
522 dev->power.request = RPM_REQ_NONE;
523
524
525
526
527
528
529
530
531 if (!(dev->power.timer_expires && time_before_eq(
532 dev->power.timer_expires, expires))) {
533 dev->power.timer_expires = expires;
534 mod_timer(&dev->power.suspend_timer, expires);
535 }
536 dev->power.timer_autosuspends = 1;
537 goto out;
538 }
539 }
540
541
542 pm_runtime_cancel_pending(dev);
543
544 if (dev->power.runtime_status == RPM_SUSPENDING) {
545 DEFINE_WAIT(wait);
546
547 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
548 retval = -EINPROGRESS;
549 goto out;
550 }
551
552 if (dev->power.irq_safe) {
553 spin_unlock(&dev->power.lock);
554
555 cpu_relax();
556
557 spin_lock(&dev->power.lock);
558 goto repeat;
559 }
560
561
562 for (;;) {
563 prepare_to_wait(&dev->power.wait_queue, &wait,
564 TASK_UNINTERRUPTIBLE);
565 if (dev->power.runtime_status != RPM_SUSPENDING)
566 break;
567
568 spin_unlock_irq(&dev->power.lock);
569
570 schedule();
571
572 spin_lock_irq(&dev->power.lock);
573 }
574 finish_wait(&dev->power.wait_queue, &wait);
575 goto repeat;
576 }
577
578 if (dev->power.no_callbacks)
579 goto no_callback;
580
581
582 if (rpmflags & RPM_ASYNC) {
583 dev->power.request = (rpmflags & RPM_AUTO) ?
584 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
585 if (!dev->power.request_pending) {
586 dev->power.request_pending = true;
587 queue_work(pm_wq, &dev->power.work);
588 }
589 goto out;
590 }
591
592 __update_runtime_status(dev, RPM_SUSPENDING);
593
594 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
595
596 dev_pm_enable_wake_irq_check(dev, true);
597 retval = rpm_callback(callback, dev);
598 if (retval)
599 goto fail;
600
601 no_callback:
602 __update_runtime_status(dev, RPM_SUSPENDED);
603 pm_runtime_deactivate_timer(dev);
604
605 if (dev->parent) {
606 parent = dev->parent;
607 atomic_add_unless(&parent->power.child_count, -1, 0);
608 }
609 wake_up_all(&dev->power.wait_queue);
610
611 if (dev->power.deferred_resume) {
612 dev->power.deferred_resume = false;
613 rpm_resume(dev, 0);
614 retval = -EAGAIN;
615 goto out;
616 }
617
618
619 if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
620 spin_unlock(&dev->power.lock);
621
622 spin_lock(&parent->power.lock);
623 rpm_idle(parent, RPM_ASYNC);
624 spin_unlock(&parent->power.lock);
625
626 spin_lock(&dev->power.lock);
627 }
628
629 out:
630 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
631
632 return retval;
633
634 fail:
635 dev_pm_disable_wake_irq_check(dev);
636 __update_runtime_status(dev, RPM_ACTIVE);
637 dev->power.deferred_resume = false;
638 wake_up_all(&dev->power.wait_queue);
639
640 if (retval == -EAGAIN || retval == -EBUSY) {
641 dev->power.runtime_error = 0;
642
643
644
645
646
647
648
649 if ((rpmflags & RPM_AUTO) &&
650 pm_runtime_autosuspend_expiration(dev) != 0)
651 goto repeat;
652 } else {
653 pm_runtime_cancel_pending(dev);
654 }
655 goto out;
656}
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675static int rpm_resume(struct device *dev, int rpmflags)
676 __releases(&dev->power.lock) __acquires(&dev->power.lock)
677{
678 int (*callback)(struct device *);
679 struct device *parent = NULL;
680 int retval = 0;
681
682 trace_rpm_resume_rcuidle(dev, rpmflags);
683
684 repeat:
685 if (dev->power.runtime_error)
686 retval = -EINVAL;
687 else if (dev->power.disable_depth == 1 && dev->power.is_suspended
688 && dev->power.runtime_status == RPM_ACTIVE)
689 retval = 1;
690 else if (dev->power.disable_depth > 0)
691 retval = -EACCES;
692 if (retval)
693 goto out;
694
695
696
697
698
699
700
701 dev->power.request = RPM_REQ_NONE;
702 if (!dev->power.timer_autosuspends)
703 pm_runtime_deactivate_timer(dev);
704
705 if (dev->power.runtime_status == RPM_ACTIVE) {
706 retval = 1;
707 goto out;
708 }
709
710 if (dev->power.runtime_status == RPM_RESUMING
711 || dev->power.runtime_status == RPM_SUSPENDING) {
712 DEFINE_WAIT(wait);
713
714 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
715 if (dev->power.runtime_status == RPM_SUSPENDING)
716 dev->power.deferred_resume = true;
717 else
718 retval = -EINPROGRESS;
719 goto out;
720 }
721
722 if (dev->power.irq_safe) {
723 spin_unlock(&dev->power.lock);
724
725 cpu_relax();
726
727 spin_lock(&dev->power.lock);
728 goto repeat;
729 }
730
731
732 for (;;) {
733 prepare_to_wait(&dev->power.wait_queue, &wait,
734 TASK_UNINTERRUPTIBLE);
735 if (dev->power.runtime_status != RPM_RESUMING
736 && dev->power.runtime_status != RPM_SUSPENDING)
737 break;
738
739 spin_unlock_irq(&dev->power.lock);
740
741 schedule();
742
743 spin_lock_irq(&dev->power.lock);
744 }
745 finish_wait(&dev->power.wait_queue, &wait);
746 goto repeat;
747 }
748
749
750
751
752
753
754 if (dev->power.no_callbacks && !parent && dev->parent) {
755 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
756 if (dev->parent->power.disable_depth > 0
757 || dev->parent->power.ignore_children
758 || dev->parent->power.runtime_status == RPM_ACTIVE) {
759 atomic_inc(&dev->parent->power.child_count);
760 spin_unlock(&dev->parent->power.lock);
761 retval = 1;
762 goto no_callback;
763 }
764 spin_unlock(&dev->parent->power.lock);
765 }
766
767
768 if (rpmflags & RPM_ASYNC) {
769 dev->power.request = RPM_REQ_RESUME;
770 if (!dev->power.request_pending) {
771 dev->power.request_pending = true;
772 queue_work(pm_wq, &dev->power.work);
773 }
774 retval = 0;
775 goto out;
776 }
777
778 if (!parent && dev->parent) {
779
780
781
782
783
784 parent = dev->parent;
785 if (dev->power.irq_safe)
786 goto skip_parent;
787 spin_unlock(&dev->power.lock);
788
789 pm_runtime_get_noresume(parent);
790
791 spin_lock(&parent->power.lock);
792
793
794
795
796 if (!parent->power.disable_depth
797 && !parent->power.ignore_children) {
798 rpm_resume(parent, 0);
799 if (parent->power.runtime_status != RPM_ACTIVE)
800 retval = -EBUSY;
801 }
802 spin_unlock(&parent->power.lock);
803
804 spin_lock(&dev->power.lock);
805 if (retval)
806 goto out;
807 goto repeat;
808 }
809 skip_parent:
810
811 if (dev->power.no_callbacks)
812 goto no_callback;
813
814 __update_runtime_status(dev, RPM_RESUMING);
815
816 callback = RPM_GET_CALLBACK(dev, runtime_resume);
817
818 dev_pm_disable_wake_irq_check(dev);
819 retval = rpm_callback(callback, dev);
820 if (retval) {
821 __update_runtime_status(dev, RPM_SUSPENDED);
822 pm_runtime_cancel_pending(dev);
823 dev_pm_enable_wake_irq_check(dev, false);
824 } else {
825 no_callback:
826 __update_runtime_status(dev, RPM_ACTIVE);
827 pm_runtime_mark_last_busy(dev);
828 if (parent)
829 atomic_inc(&parent->power.child_count);
830 }
831 wake_up_all(&dev->power.wait_queue);
832
833 if (retval >= 0)
834 rpm_idle(dev, RPM_ASYNC);
835
836 out:
837 if (parent && !dev->power.irq_safe) {
838 spin_unlock_irq(&dev->power.lock);
839
840 pm_runtime_put(parent);
841
842 spin_lock_irq(&dev->power.lock);
843 }
844
845 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
846
847 return retval;
848}
849
850
851
852
853
854
855
856
857static void pm_runtime_work(struct work_struct *work)
858{
859 struct device *dev = container_of(work, struct device, power.work);
860 enum rpm_request req;
861
862 spin_lock_irq(&dev->power.lock);
863
864 if (!dev->power.request_pending)
865 goto out;
866
867 req = dev->power.request;
868 dev->power.request = RPM_REQ_NONE;
869 dev->power.request_pending = false;
870
871 switch (req) {
872 case RPM_REQ_NONE:
873 break;
874 case RPM_REQ_IDLE:
875 rpm_idle(dev, RPM_NOWAIT);
876 break;
877 case RPM_REQ_SUSPEND:
878 rpm_suspend(dev, RPM_NOWAIT);
879 break;
880 case RPM_REQ_AUTOSUSPEND:
881 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
882 break;
883 case RPM_REQ_RESUME:
884 rpm_resume(dev, RPM_NOWAIT);
885 break;
886 }
887
888 out:
889 spin_unlock_irq(&dev->power.lock);
890}
891
892
893
894
895
896
897
898static void pm_suspend_timer_fn(struct timer_list *t)
899{
900 struct device *dev = from_timer(dev, t, power.suspend_timer);
901 unsigned long flags;
902 unsigned long expires;
903
904 spin_lock_irqsave(&dev->power.lock, flags);
905
906 expires = dev->power.timer_expires;
907
908 if (expires > 0 && !time_after(expires, jiffies)) {
909 dev->power.timer_expires = 0;
910 rpm_suspend(dev, dev->power.timer_autosuspends ?
911 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
912 }
913
914 spin_unlock_irqrestore(&dev->power.lock, flags);
915}
916
917
918
919
920
921
922int pm_schedule_suspend(struct device *dev, unsigned int delay)
923{
924 unsigned long flags;
925 int retval;
926
927 spin_lock_irqsave(&dev->power.lock, flags);
928
929 if (!delay) {
930 retval = rpm_suspend(dev, RPM_ASYNC);
931 goto out;
932 }
933
934 retval = rpm_check_suspend_allowed(dev);
935 if (retval)
936 goto out;
937
938
939 pm_runtime_cancel_pending(dev);
940
941 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
942 dev->power.timer_expires += !dev->power.timer_expires;
943 dev->power.timer_autosuspends = 0;
944 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
945
946 out:
947 spin_unlock_irqrestore(&dev->power.lock, flags);
948
949 return retval;
950}
951EXPORT_SYMBOL_GPL(pm_schedule_suspend);
952
953
954
955
956
957
958
959
960
961
962
963
964
965int __pm_runtime_idle(struct device *dev, int rpmflags)
966{
967 unsigned long flags;
968 int retval;
969
970 if (rpmflags & RPM_GET_PUT) {
971 if (!atomic_dec_and_test(&dev->power.usage_count))
972 return 0;
973 }
974
975 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
976
977 spin_lock_irqsave(&dev->power.lock, flags);
978 retval = rpm_idle(dev, rpmflags);
979 spin_unlock_irqrestore(&dev->power.lock, flags);
980
981 return retval;
982}
983EXPORT_SYMBOL_GPL(__pm_runtime_idle);
984
985
986
987
988
989
990
991
992
993
994
995
996
997int __pm_runtime_suspend(struct device *dev, int rpmflags)
998{
999 unsigned long flags;
1000 int retval;
1001
1002 if (rpmflags & RPM_GET_PUT) {
1003 if (!atomic_dec_and_test(&dev->power.usage_count))
1004 return 0;
1005 }
1006
1007 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1008
1009 spin_lock_irqsave(&dev->power.lock, flags);
1010 retval = rpm_suspend(dev, rpmflags);
1011 spin_unlock_irqrestore(&dev->power.lock, flags);
1012
1013 return retval;
1014}
1015EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028int __pm_runtime_resume(struct device *dev, int rpmflags)
1029{
1030 unsigned long flags;
1031 int retval;
1032
1033 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
1034 dev->power.runtime_status != RPM_ACTIVE);
1035
1036 if (rpmflags & RPM_GET_PUT)
1037 atomic_inc(&dev->power.usage_count);
1038
1039 spin_lock_irqsave(&dev->power.lock, flags);
1040 retval = rpm_resume(dev, rpmflags);
1041 spin_unlock_irqrestore(&dev->power.lock, flags);
1042
1043 return retval;
1044}
1045EXPORT_SYMBOL_GPL(__pm_runtime_resume);
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057int pm_runtime_get_if_in_use(struct device *dev)
1058{
1059 unsigned long flags;
1060 int retval;
1061
1062 spin_lock_irqsave(&dev->power.lock, flags);
1063 retval = dev->power.disable_depth > 0 ? -EINVAL :
1064 dev->power.runtime_status == RPM_ACTIVE
1065 && atomic_inc_not_zero(&dev->power.usage_count);
1066 spin_unlock_irqrestore(&dev->power.lock, flags);
1067 return retval;
1068}
1069EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088int __pm_runtime_set_status(struct device *dev, unsigned int status)
1089{
1090 struct device *parent = dev->parent;
1091 unsigned long flags;
1092 bool notify_parent = false;
1093 int error = 0;
1094
1095 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
1096 return -EINVAL;
1097
1098 spin_lock_irqsave(&dev->power.lock, flags);
1099
1100 if (!dev->power.runtime_error && !dev->power.disable_depth) {
1101 error = -EAGAIN;
1102 goto out;
1103 }
1104
1105 if (dev->power.runtime_status == status || !parent)
1106 goto out_set;
1107
1108 if (status == RPM_SUSPENDED) {
1109 atomic_add_unless(&parent->power.child_count, -1, 0);
1110 notify_parent = !parent->power.ignore_children;
1111 } else {
1112 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1113
1114
1115
1116
1117
1118
1119 if (!parent->power.disable_depth
1120 && !parent->power.ignore_children
1121 && parent->power.runtime_status != RPM_ACTIVE) {
1122 dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
1123 dev_name(dev),
1124 dev_name(parent));
1125 error = -EBUSY;
1126 } else if (dev->power.runtime_status == RPM_SUSPENDED) {
1127 atomic_inc(&parent->power.child_count);
1128 }
1129
1130 spin_unlock(&parent->power.lock);
1131
1132 if (error)
1133 goto out;
1134 }
1135
1136 out_set:
1137 __update_runtime_status(dev, status);
1138 dev->power.runtime_error = 0;
1139 out:
1140 spin_unlock_irqrestore(&dev->power.lock, flags);
1141
1142 if (notify_parent)
1143 pm_request_idle(parent);
1144
1145 return error;
1146}
1147EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158static void __pm_runtime_barrier(struct device *dev)
1159{
1160 pm_runtime_deactivate_timer(dev);
1161
1162 if (dev->power.request_pending) {
1163 dev->power.request = RPM_REQ_NONE;
1164 spin_unlock_irq(&dev->power.lock);
1165
1166 cancel_work_sync(&dev->power.work);
1167
1168 spin_lock_irq(&dev->power.lock);
1169 dev->power.request_pending = false;
1170 }
1171
1172 if (dev->power.runtime_status == RPM_SUSPENDING
1173 || dev->power.runtime_status == RPM_RESUMING
1174 || dev->power.idle_notification) {
1175 DEFINE_WAIT(wait);
1176
1177
1178 for (;;) {
1179 prepare_to_wait(&dev->power.wait_queue, &wait,
1180 TASK_UNINTERRUPTIBLE);
1181 if (dev->power.runtime_status != RPM_SUSPENDING
1182 && dev->power.runtime_status != RPM_RESUMING
1183 && !dev->power.idle_notification)
1184 break;
1185 spin_unlock_irq(&dev->power.lock);
1186
1187 schedule();
1188
1189 spin_lock_irq(&dev->power.lock);
1190 }
1191 finish_wait(&dev->power.wait_queue, &wait);
1192 }
1193}
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209int pm_runtime_barrier(struct device *dev)
1210{
1211 int retval = 0;
1212
1213 pm_runtime_get_noresume(dev);
1214 spin_lock_irq(&dev->power.lock);
1215
1216 if (dev->power.request_pending
1217 && dev->power.request == RPM_REQ_RESUME) {
1218 rpm_resume(dev, 0);
1219 retval = 1;
1220 }
1221
1222 __pm_runtime_barrier(dev);
1223
1224 spin_unlock_irq(&dev->power.lock);
1225 pm_runtime_put_noidle(dev);
1226
1227 return retval;
1228}
1229EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245void __pm_runtime_disable(struct device *dev, bool check_resume)
1246{
1247 spin_lock_irq(&dev->power.lock);
1248
1249 if (dev->power.disable_depth > 0) {
1250 dev->power.disable_depth++;
1251 goto out;
1252 }
1253
1254
1255
1256
1257
1258
1259 if (check_resume && dev->power.request_pending
1260 && dev->power.request == RPM_REQ_RESUME) {
1261
1262
1263
1264
1265 pm_runtime_get_noresume(dev);
1266
1267 rpm_resume(dev, 0);
1268
1269 pm_runtime_put_noidle(dev);
1270 }
1271
1272 if (!dev->power.disable_depth++)
1273 __pm_runtime_barrier(dev);
1274
1275 out:
1276 spin_unlock_irq(&dev->power.lock);
1277}
1278EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1279
1280
1281
1282
1283
1284void pm_runtime_enable(struct device *dev)
1285{
1286 unsigned long flags;
1287
1288 spin_lock_irqsave(&dev->power.lock, flags);
1289
1290 if (dev->power.disable_depth > 0)
1291 dev->power.disable_depth--;
1292 else
1293 dev_warn(dev, "Unbalanced %s!\n", __func__);
1294
1295 WARN(!dev->power.disable_depth &&
1296 dev->power.runtime_status == RPM_SUSPENDED &&
1297 !dev->power.ignore_children &&
1298 atomic_read(&dev->power.child_count) > 0,
1299 "Enabling runtime PM for inactive device (%s) with active children\n",
1300 dev_name(dev));
1301
1302 spin_unlock_irqrestore(&dev->power.lock, flags);
1303}
1304EXPORT_SYMBOL_GPL(pm_runtime_enable);
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314void pm_runtime_forbid(struct device *dev)
1315{
1316 spin_lock_irq(&dev->power.lock);
1317 if (!dev->power.runtime_auto)
1318 goto out;
1319
1320 dev->power.runtime_auto = false;
1321 atomic_inc(&dev->power.usage_count);
1322 rpm_resume(dev, 0);
1323
1324 out:
1325 spin_unlock_irq(&dev->power.lock);
1326}
1327EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1328
1329
1330
1331
1332
1333
1334
1335void pm_runtime_allow(struct device *dev)
1336{
1337 spin_lock_irq(&dev->power.lock);
1338 if (dev->power.runtime_auto)
1339 goto out;
1340
1341 dev->power.runtime_auto = true;
1342 if (atomic_dec_and_test(&dev->power.usage_count))
1343 rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
1344
1345 out:
1346 spin_unlock_irq(&dev->power.lock);
1347}
1348EXPORT_SYMBOL_GPL(pm_runtime_allow);
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358void pm_runtime_no_callbacks(struct device *dev)
1359{
1360 spin_lock_irq(&dev->power.lock);
1361 dev->power.no_callbacks = 1;
1362 spin_unlock_irq(&dev->power.lock);
1363 if (device_is_registered(dev))
1364 rpm_sysfs_remove(dev);
1365}
1366EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379void pm_runtime_irq_safe(struct device *dev)
1380{
1381 if (dev->parent)
1382 pm_runtime_get_sync(dev->parent);
1383 spin_lock_irq(&dev->power.lock);
1384 dev->power.irq_safe = 1;
1385 spin_unlock_irq(&dev->power.lock);
1386}
1387EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1401{
1402 int delay = dev->power.autosuspend_delay;
1403
1404
1405 if (dev->power.use_autosuspend && delay < 0) {
1406
1407
1408 if (!old_use || old_delay >= 0) {
1409 atomic_inc(&dev->power.usage_count);
1410 rpm_resume(dev, 0);
1411 }
1412 }
1413
1414
1415 else {
1416
1417
1418 if (old_use && old_delay < 0)
1419 atomic_dec(&dev->power.usage_count);
1420
1421
1422 rpm_idle(dev, RPM_AUTO);
1423 }
1424}
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1436{
1437 int old_delay, old_use;
1438
1439 spin_lock_irq(&dev->power.lock);
1440 old_delay = dev->power.autosuspend_delay;
1441 old_use = dev->power.use_autosuspend;
1442 dev->power.autosuspend_delay = delay;
1443 update_autosuspend(dev, old_delay, old_use);
1444 spin_unlock_irq(&dev->power.lock);
1445}
1446EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1457{
1458 int old_delay, old_use;
1459
1460 spin_lock_irq(&dev->power.lock);
1461 old_delay = dev->power.autosuspend_delay;
1462 old_use = dev->power.use_autosuspend;
1463 dev->power.use_autosuspend = use;
1464 update_autosuspend(dev, old_delay, old_use);
1465 spin_unlock_irq(&dev->power.lock);
1466}
1467EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1468
1469
1470
1471
1472
1473void pm_runtime_init(struct device *dev)
1474{
1475 dev->power.runtime_status = RPM_SUSPENDED;
1476 dev->power.idle_notification = false;
1477
1478 dev->power.disable_depth = 1;
1479 atomic_set(&dev->power.usage_count, 0);
1480
1481 dev->power.runtime_error = 0;
1482
1483 atomic_set(&dev->power.child_count, 0);
1484 pm_suspend_ignore_children(dev, false);
1485 dev->power.runtime_auto = true;
1486
1487 dev->power.request_pending = false;
1488 dev->power.request = RPM_REQ_NONE;
1489 dev->power.deferred_resume = false;
1490 dev->power.accounting_timestamp = jiffies;
1491 INIT_WORK(&dev->power.work, pm_runtime_work);
1492
1493 dev->power.timer_expires = 0;
1494 timer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, 0);
1495
1496 init_waitqueue_head(&dev->power.wait_queue);
1497}
1498
1499
1500
1501
1502
1503void pm_runtime_reinit(struct device *dev)
1504{
1505 if (!pm_runtime_enabled(dev)) {
1506 if (dev->power.runtime_status == RPM_ACTIVE)
1507 pm_runtime_set_suspended(dev);
1508 if (dev->power.irq_safe) {
1509 spin_lock_irq(&dev->power.lock);
1510 dev->power.irq_safe = 0;
1511 spin_unlock_irq(&dev->power.lock);
1512 if (dev->parent)
1513 pm_runtime_put(dev->parent);
1514 }
1515 }
1516}
1517
1518
1519
1520
1521
1522void pm_runtime_remove(struct device *dev)
1523{
1524 __pm_runtime_disable(dev, false);
1525 pm_runtime_reinit(dev);
1526}
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545void pm_runtime_clean_up_links(struct device *dev)
1546{
1547 struct device_link *link;
1548 int idx;
1549
1550 idx = device_links_read_lock();
1551
1552 list_for_each_entry_rcu(link, &dev->links.consumers, s_node) {
1553 if (link->flags & DL_FLAG_STATELESS)
1554 continue;
1555
1556 if (link->rpm_active) {
1557 pm_runtime_put_noidle(dev);
1558 link->rpm_active = false;
1559 }
1560 }
1561
1562 device_links_read_unlock(idx);
1563}
1564
1565
1566
1567
1568
1569void pm_runtime_get_suppliers(struct device *dev)
1570{
1571 struct device_link *link;
1572 int idx;
1573
1574 idx = device_links_read_lock();
1575
1576 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1577 if (link->flags & DL_FLAG_PM_RUNTIME)
1578 pm_runtime_get_sync(link->supplier);
1579
1580 device_links_read_unlock(idx);
1581}
1582
1583
1584
1585
1586
1587void pm_runtime_put_suppliers(struct device *dev)
1588{
1589 struct device_link *link;
1590 int idx;
1591
1592 idx = device_links_read_lock();
1593
1594 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1595 if (link->flags & DL_FLAG_PM_RUNTIME)
1596 pm_runtime_put(link->supplier);
1597
1598 device_links_read_unlock(idx);
1599}
1600
1601void pm_runtime_new_link(struct device *dev)
1602{
1603 spin_lock_irq(&dev->power.lock);
1604 dev->power.links_count++;
1605 spin_unlock_irq(&dev->power.lock);
1606}
1607
1608void pm_runtime_drop_link(struct device *dev)
1609{
1610 rpm_put_suppliers(dev);
1611
1612 spin_lock_irq(&dev->power.lock);
1613 WARN_ON(dev->power.links_count == 0);
1614 dev->power.links_count--;
1615 spin_unlock_irq(&dev->power.lock);
1616}
1617
1618static bool pm_runtime_need_not_resume(struct device *dev)
1619{
1620 return atomic_read(&dev->power.usage_count) <= 1 &&
1621 (atomic_read(&dev->power.child_count) == 0 ||
1622 dev->power.ignore_children);
1623}
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642int pm_runtime_force_suspend(struct device *dev)
1643{
1644 int (*callback)(struct device *);
1645 int ret;
1646
1647 pm_runtime_disable(dev);
1648 if (pm_runtime_status_suspended(dev))
1649 return 0;
1650
1651 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
1652
1653 ret = callback ? callback(dev) : 0;
1654 if (ret)
1655 goto err;
1656
1657
1658
1659
1660
1661
1662
1663 if (pm_runtime_need_not_resume(dev))
1664 pm_runtime_set_suspended(dev);
1665 else
1666 __update_runtime_status(dev, RPM_SUSPENDED);
1667
1668 return 0;
1669
1670err:
1671 pm_runtime_enable(dev);
1672 return ret;
1673}
1674EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688int pm_runtime_force_resume(struct device *dev)
1689{
1690 int (*callback)(struct device *);
1691 int ret = 0;
1692
1693 if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
1694 goto out;
1695
1696
1697
1698
1699
1700 __update_runtime_status(dev, RPM_ACTIVE);
1701
1702 callback = RPM_GET_CALLBACK(dev, runtime_resume);
1703
1704 ret = callback ? callback(dev) : 0;
1705 if (ret) {
1706 pm_runtime_set_suspended(dev);
1707 goto out;
1708 }
1709
1710 pm_runtime_mark_last_busy(dev);
1711out:
1712 pm_runtime_enable(dev);
1713 return ret;
1714}
1715EXPORT_SYMBOL_GPL(pm_runtime_force_resume);
1716