1
2
3
4
5
6
7
8
9
10#include <linux/sched.h>
11#include <linux/export.h>
12#include <linux/pm_runtime.h>
13#include <trace/events/rpm.h>
14#include "power.h"
15
16static int rpm_resume(struct device *dev, int rpmflags);
17static int rpm_suspend(struct device *dev, int rpmflags);
18
19
20
21
22
23
24
25
26
27
28
29
30void update_pm_runtime_accounting(struct device *dev)
31{
32 unsigned long now = jiffies;
33 unsigned long delta;
34
35 delta = now - dev->power.accounting_timestamp;
36
37 dev->power.accounting_timestamp = now;
38
39 if (dev->power.disable_depth > 0)
40 return;
41
42 if (dev->power.runtime_status == RPM_SUSPENDED)
43 dev->power.suspended_jiffies += delta;
44 else
45 dev->power.active_jiffies += delta;
46}
47
48static void __update_runtime_status(struct device *dev, enum rpm_status status)
49{
50 update_pm_runtime_accounting(dev);
51 dev->power.runtime_status = status;
52}
53
54
55
56
57
58static void pm_runtime_deactivate_timer(struct device *dev)
59{
60 if (dev->power.timer_expires > 0) {
61 del_timer(&dev->power.suspend_timer);
62 dev->power.timer_expires = 0;
63 }
64}
65
66
67
68
69
70static void pm_runtime_cancel_pending(struct device *dev)
71{
72 pm_runtime_deactivate_timer(dev);
73
74
75
76
77 dev->power.request = RPM_REQ_NONE;
78}
79
80
81
82
83
84
85
86
87
88
89
90
91
92unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
93{
94 int autosuspend_delay;
95 long elapsed;
96 unsigned long last_busy;
97 unsigned long expires = 0;
98
99 if (!dev->power.use_autosuspend)
100 goto out;
101
102 autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
103 if (autosuspend_delay < 0)
104 goto out;
105
106 last_busy = ACCESS_ONCE(dev->power.last_busy);
107 elapsed = jiffies - last_busy;
108 if (elapsed < 0)
109 goto out;
110
111
112
113
114
115 expires = last_busy + msecs_to_jiffies(autosuspend_delay);
116 if (autosuspend_delay >= 1000)
117 expires = round_jiffies(expires);
118 expires += !expires;
119 if (elapsed >= expires - last_busy)
120 expires = 0;
121
122 out:
123 return expires;
124}
125EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
126
127static int dev_memalloc_noio(struct device *dev, void *data)
128{
129 return dev->power.memalloc_noio;
130}
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
161{
162 static DEFINE_MUTEX(dev_hotplug_mutex);
163
164 mutex_lock(&dev_hotplug_mutex);
165 for (;;) {
166 bool enabled;
167
168
169 spin_lock_irq(&dev->power.lock);
170 enabled = dev->power.memalloc_noio;
171 dev->power.memalloc_noio = enable;
172 spin_unlock_irq(&dev->power.lock);
173
174
175
176
177
178 if (enabled && enable)
179 break;
180
181 dev = dev->parent;
182
183
184
185
186
187
188 if (!dev || (!enable &&
189 device_for_each_child(dev, NULL,
190 dev_memalloc_noio)))
191 break;
192 }
193 mutex_unlock(&dev_hotplug_mutex);
194}
195EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
196
197
198
199
200
201static int rpm_check_suspend_allowed(struct device *dev)
202{
203 int retval = 0;
204
205 if (dev->power.runtime_error)
206 retval = -EINVAL;
207 else if (dev->power.disable_depth > 0)
208 retval = -EACCES;
209 else if (atomic_read(&dev->power.usage_count) > 0)
210 retval = -EAGAIN;
211 else if (!pm_children_suspended(dev))
212 retval = -EBUSY;
213
214
215 else if ((dev->power.deferred_resume
216 && dev->power.runtime_status == RPM_SUSPENDING)
217 || (dev->power.request_pending
218 && dev->power.request == RPM_REQ_RESUME))
219 retval = -EAGAIN;
220 else if (__dev_pm_qos_read_value(dev) < 0)
221 retval = -EPERM;
222 else if (dev->power.runtime_status == RPM_SUSPENDED)
223 retval = 1;
224
225 return retval;
226}
227
228
229
230
231
232
233static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
234 __releases(&dev->power.lock) __acquires(&dev->power.lock)
235{
236 int retval;
237
238 if (dev->power.irq_safe)
239 spin_unlock(&dev->power.lock);
240 else
241 spin_unlock_irq(&dev->power.lock);
242
243 retval = cb(dev);
244
245 if (dev->power.irq_safe)
246 spin_lock(&dev->power.lock);
247 else
248 spin_lock_irq(&dev->power.lock);
249
250 return retval;
251}
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266static int rpm_idle(struct device *dev, int rpmflags)
267{
268 int (*callback)(struct device *);
269 int retval;
270
271 trace_rpm_idle(dev, rpmflags);
272 retval = rpm_check_suspend_allowed(dev);
273 if (retval < 0)
274 ;
275
276
277 else if (dev->power.runtime_status != RPM_ACTIVE)
278 retval = -EAGAIN;
279
280
281
282
283
284 else if (dev->power.request_pending &&
285 dev->power.request > RPM_REQ_IDLE)
286 retval = -EAGAIN;
287
288
289 else if (dev->power.idle_notification)
290 retval = -EINPROGRESS;
291 if (retval)
292 goto out;
293
294
295 dev->power.request = RPM_REQ_NONE;
296
297 if (dev->power.no_callbacks)
298 goto out;
299
300
301 if (rpmflags & RPM_ASYNC) {
302 dev->power.request = RPM_REQ_IDLE;
303 if (!dev->power.request_pending) {
304 dev->power.request_pending = true;
305 queue_work(pm_wq, &dev->power.work);
306 }
307 trace_rpm_return_int(dev, _THIS_IP_, 0);
308 return 0;
309 }
310
311 dev->power.idle_notification = true;
312
313 if (dev->pm_domain)
314 callback = dev->pm_domain->ops.runtime_idle;
315 else if (dev->type && dev->type->pm)
316 callback = dev->type->pm->runtime_idle;
317 else if (dev->class && dev->class->pm)
318 callback = dev->class->pm->runtime_idle;
319 else if (dev->bus && dev->bus->pm)
320 callback = dev->bus->pm->runtime_idle;
321 else
322 callback = NULL;
323
324 if (!callback && dev->driver && dev->driver->pm)
325 callback = dev->driver->pm->runtime_idle;
326
327 if (callback)
328 retval = __rpm_callback(callback, dev);
329
330 dev->power.idle_notification = false;
331 wake_up_all(&dev->power.wait_queue);
332
333 out:
334 trace_rpm_return_int(dev, _THIS_IP_, retval);
335 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
336}
337
338
339
340
341
342
343static int rpm_callback(int (*cb)(struct device *), struct device *dev)
344{
345 int retval;
346
347 if (!cb)
348 return -ENOSYS;
349
350 if (dev->power.memalloc_noio) {
351 unsigned int noio_flag;
352
353
354
355
356
357
358
359
360
361
362 noio_flag = memalloc_noio_save();
363 retval = __rpm_callback(cb, dev);
364 memalloc_noio_restore(noio_flag);
365 } else {
366 retval = __rpm_callback(cb, dev);
367 }
368
369 dev->power.runtime_error = retval;
370 return retval != -EACCES ? retval : -EIO;
371}
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394static int rpm_suspend(struct device *dev, int rpmflags)
395 __releases(&dev->power.lock) __acquires(&dev->power.lock)
396{
397 int (*callback)(struct device *);
398 struct device *parent = NULL;
399 int retval;
400
401 trace_rpm_suspend(dev, rpmflags);
402
403 repeat:
404 retval = rpm_check_suspend_allowed(dev);
405
406 if (retval < 0)
407 ;
408
409
410 else if (dev->power.runtime_status == RPM_RESUMING &&
411 !(rpmflags & RPM_ASYNC))
412 retval = -EAGAIN;
413 if (retval)
414 goto out;
415
416
417 if ((rpmflags & RPM_AUTO)
418 && dev->power.runtime_status != RPM_SUSPENDING) {
419 unsigned long expires = pm_runtime_autosuspend_expiration(dev);
420
421 if (expires != 0) {
422
423 dev->power.request = RPM_REQ_NONE;
424
425
426
427
428
429
430
431
432 if (!(dev->power.timer_expires && time_before_eq(
433 dev->power.timer_expires, expires))) {
434 dev->power.timer_expires = expires;
435 mod_timer(&dev->power.suspend_timer, expires);
436 }
437 dev->power.timer_autosuspends = 1;
438 goto out;
439 }
440 }
441
442
443 pm_runtime_cancel_pending(dev);
444
445 if (dev->power.runtime_status == RPM_SUSPENDING) {
446 DEFINE_WAIT(wait);
447
448 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
449 retval = -EINPROGRESS;
450 goto out;
451 }
452
453 if (dev->power.irq_safe) {
454 spin_unlock(&dev->power.lock);
455
456 cpu_relax();
457
458 spin_lock(&dev->power.lock);
459 goto repeat;
460 }
461
462
463 for (;;) {
464 prepare_to_wait(&dev->power.wait_queue, &wait,
465 TASK_UNINTERRUPTIBLE);
466 if (dev->power.runtime_status != RPM_SUSPENDING)
467 break;
468
469 spin_unlock_irq(&dev->power.lock);
470
471 schedule();
472
473 spin_lock_irq(&dev->power.lock);
474 }
475 finish_wait(&dev->power.wait_queue, &wait);
476 goto repeat;
477 }
478
479 if (dev->power.no_callbacks)
480 goto no_callback;
481
482
483 if (rpmflags & RPM_ASYNC) {
484 dev->power.request = (rpmflags & RPM_AUTO) ?
485 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
486 if (!dev->power.request_pending) {
487 dev->power.request_pending = true;
488 queue_work(pm_wq, &dev->power.work);
489 }
490 goto out;
491 }
492
493 __update_runtime_status(dev, RPM_SUSPENDING);
494
495 if (dev->pm_domain)
496 callback = dev->pm_domain->ops.runtime_suspend;
497 else if (dev->type && dev->type->pm)
498 callback = dev->type->pm->runtime_suspend;
499 else if (dev->class && dev->class->pm)
500 callback = dev->class->pm->runtime_suspend;
501 else if (dev->bus && dev->bus->pm)
502 callback = dev->bus->pm->runtime_suspend;
503 else
504 callback = NULL;
505
506 if (!callback && dev->driver && dev->driver->pm)
507 callback = dev->driver->pm->runtime_suspend;
508
509 retval = rpm_callback(callback, dev);
510 if (retval)
511 goto fail;
512
513 no_callback:
514 __update_runtime_status(dev, RPM_SUSPENDED);
515 pm_runtime_deactivate_timer(dev);
516
517 if (dev->parent) {
518 parent = dev->parent;
519 atomic_add_unless(&parent->power.child_count, -1, 0);
520 }
521 wake_up_all(&dev->power.wait_queue);
522
523 if (dev->power.deferred_resume) {
524 dev->power.deferred_resume = false;
525 rpm_resume(dev, 0);
526 retval = -EAGAIN;
527 goto out;
528 }
529
530
531 if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
532 spin_unlock(&dev->power.lock);
533
534 spin_lock(&parent->power.lock);
535 rpm_idle(parent, RPM_ASYNC);
536 spin_unlock(&parent->power.lock);
537
538 spin_lock(&dev->power.lock);
539 }
540
541 out:
542 trace_rpm_return_int(dev, _THIS_IP_, retval);
543
544 return retval;
545
546 fail:
547 __update_runtime_status(dev, RPM_ACTIVE);
548 dev->power.deferred_resume = false;
549 wake_up_all(&dev->power.wait_queue);
550
551 if (retval == -EAGAIN || retval == -EBUSY) {
552 dev->power.runtime_error = 0;
553
554
555
556
557
558
559
560 if ((rpmflags & RPM_AUTO) &&
561 pm_runtime_autosuspend_expiration(dev) != 0)
562 goto repeat;
563 } else {
564 pm_runtime_cancel_pending(dev);
565 }
566 goto out;
567}
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586static int rpm_resume(struct device *dev, int rpmflags)
587 __releases(&dev->power.lock) __acquires(&dev->power.lock)
588{
589 int (*callback)(struct device *);
590 struct device *parent = NULL;
591 int retval = 0;
592
593 trace_rpm_resume(dev, rpmflags);
594
595 repeat:
596 if (dev->power.runtime_error)
597 retval = -EINVAL;
598 else if (dev->power.disable_depth == 1 && dev->power.is_suspended
599 && dev->power.runtime_status == RPM_ACTIVE)
600 retval = 1;
601 else if (dev->power.disable_depth > 0)
602 retval = -EACCES;
603 if (retval)
604 goto out;
605
606
607
608
609
610
611
612 dev->power.request = RPM_REQ_NONE;
613 if (!dev->power.timer_autosuspends)
614 pm_runtime_deactivate_timer(dev);
615
616 if (dev->power.runtime_status == RPM_ACTIVE) {
617 retval = 1;
618 goto out;
619 }
620
621 if (dev->power.runtime_status == RPM_RESUMING
622 || dev->power.runtime_status == RPM_SUSPENDING) {
623 DEFINE_WAIT(wait);
624
625 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
626 if (dev->power.runtime_status == RPM_SUSPENDING)
627 dev->power.deferred_resume = true;
628 else
629 retval = -EINPROGRESS;
630 goto out;
631 }
632
633 if (dev->power.irq_safe) {
634 spin_unlock(&dev->power.lock);
635
636 cpu_relax();
637
638 spin_lock(&dev->power.lock);
639 goto repeat;
640 }
641
642
643 for (;;) {
644 prepare_to_wait(&dev->power.wait_queue, &wait,
645 TASK_UNINTERRUPTIBLE);
646 if (dev->power.runtime_status != RPM_RESUMING
647 && dev->power.runtime_status != RPM_SUSPENDING)
648 break;
649
650 spin_unlock_irq(&dev->power.lock);
651
652 schedule();
653
654 spin_lock_irq(&dev->power.lock);
655 }
656 finish_wait(&dev->power.wait_queue, &wait);
657 goto repeat;
658 }
659
660
661
662
663
664
665 if (dev->power.no_callbacks && !parent && dev->parent) {
666 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
667 if (dev->parent->power.disable_depth > 0
668 || dev->parent->power.ignore_children
669 || dev->parent->power.runtime_status == RPM_ACTIVE) {
670 atomic_inc(&dev->parent->power.child_count);
671 spin_unlock(&dev->parent->power.lock);
672 retval = 1;
673 goto no_callback;
674 }
675 spin_unlock(&dev->parent->power.lock);
676 }
677
678
679 if (rpmflags & RPM_ASYNC) {
680 dev->power.request = RPM_REQ_RESUME;
681 if (!dev->power.request_pending) {
682 dev->power.request_pending = true;
683 queue_work(pm_wq, &dev->power.work);
684 }
685 retval = 0;
686 goto out;
687 }
688
689 if (!parent && dev->parent) {
690
691
692
693
694
695 parent = dev->parent;
696 if (dev->power.irq_safe)
697 goto skip_parent;
698 spin_unlock(&dev->power.lock);
699
700 pm_runtime_get_noresume(parent);
701
702 spin_lock(&parent->power.lock);
703
704
705
706
707 if (!parent->power.disable_depth
708 && !parent->power.ignore_children) {
709 rpm_resume(parent, 0);
710 if (parent->power.runtime_status != RPM_ACTIVE)
711 retval = -EBUSY;
712 }
713 spin_unlock(&parent->power.lock);
714
715 spin_lock(&dev->power.lock);
716 if (retval)
717 goto out;
718 goto repeat;
719 }
720 skip_parent:
721
722 if (dev->power.no_callbacks)
723 goto no_callback;
724
725 __update_runtime_status(dev, RPM_RESUMING);
726
727 if (dev->pm_domain)
728 callback = dev->pm_domain->ops.runtime_resume;
729 else if (dev->type && dev->type->pm)
730 callback = dev->type->pm->runtime_resume;
731 else if (dev->class && dev->class->pm)
732 callback = dev->class->pm->runtime_resume;
733 else if (dev->bus && dev->bus->pm)
734 callback = dev->bus->pm->runtime_resume;
735 else
736 callback = NULL;
737
738 if (!callback && dev->driver && dev->driver->pm)
739 callback = dev->driver->pm->runtime_resume;
740
741 retval = rpm_callback(callback, dev);
742 if (retval) {
743 __update_runtime_status(dev, RPM_SUSPENDED);
744 pm_runtime_cancel_pending(dev);
745 } else {
746 no_callback:
747 __update_runtime_status(dev, RPM_ACTIVE);
748 if (parent)
749 atomic_inc(&parent->power.child_count);
750 }
751 wake_up_all(&dev->power.wait_queue);
752
753 if (retval >= 0)
754 rpm_idle(dev, RPM_ASYNC);
755
756 out:
757 if (parent && !dev->power.irq_safe) {
758 spin_unlock_irq(&dev->power.lock);
759
760 pm_runtime_put(parent);
761
762 spin_lock_irq(&dev->power.lock);
763 }
764
765 trace_rpm_return_int(dev, _THIS_IP_, retval);
766
767 return retval;
768}
769
770
771
772
773
774
775
776
777static void pm_runtime_work(struct work_struct *work)
778{
779 struct device *dev = container_of(work, struct device, power.work);
780 enum rpm_request req;
781
782 spin_lock_irq(&dev->power.lock);
783
784 if (!dev->power.request_pending)
785 goto out;
786
787 req = dev->power.request;
788 dev->power.request = RPM_REQ_NONE;
789 dev->power.request_pending = false;
790
791 switch (req) {
792 case RPM_REQ_NONE:
793 break;
794 case RPM_REQ_IDLE:
795 rpm_idle(dev, RPM_NOWAIT);
796 break;
797 case RPM_REQ_SUSPEND:
798 rpm_suspend(dev, RPM_NOWAIT);
799 break;
800 case RPM_REQ_AUTOSUSPEND:
801 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
802 break;
803 case RPM_REQ_RESUME:
804 rpm_resume(dev, RPM_NOWAIT);
805 break;
806 }
807
808 out:
809 spin_unlock_irq(&dev->power.lock);
810}
811
812
813
814
815
816
817
818static void pm_suspend_timer_fn(unsigned long data)
819{
820 struct device *dev = (struct device *)data;
821 unsigned long flags;
822 unsigned long expires;
823
824 spin_lock_irqsave(&dev->power.lock, flags);
825
826 expires = dev->power.timer_expires;
827
828 if (expires > 0 && !time_after(expires, jiffies)) {
829 dev->power.timer_expires = 0;
830 rpm_suspend(dev, dev->power.timer_autosuspends ?
831 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
832 }
833
834 spin_unlock_irqrestore(&dev->power.lock, flags);
835}
836
837
838
839
840
841
842int pm_schedule_suspend(struct device *dev, unsigned int delay)
843{
844 unsigned long flags;
845 int retval;
846
847 spin_lock_irqsave(&dev->power.lock, flags);
848
849 if (!delay) {
850 retval = rpm_suspend(dev, RPM_ASYNC);
851 goto out;
852 }
853
854 retval = rpm_check_suspend_allowed(dev);
855 if (retval)
856 goto out;
857
858
859 pm_runtime_cancel_pending(dev);
860
861 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
862 dev->power.timer_expires += !dev->power.timer_expires;
863 dev->power.timer_autosuspends = 0;
864 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
865
866 out:
867 spin_unlock_irqrestore(&dev->power.lock, flags);
868
869 return retval;
870}
871EXPORT_SYMBOL_GPL(pm_schedule_suspend);
872
873
874
875
876
877
878
879
880
881
882
883
884
885int __pm_runtime_idle(struct device *dev, int rpmflags)
886{
887 unsigned long flags;
888 int retval;
889
890 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
891
892 if (rpmflags & RPM_GET_PUT) {
893 if (!atomic_dec_and_test(&dev->power.usage_count))
894 return 0;
895 }
896
897 spin_lock_irqsave(&dev->power.lock, flags);
898 retval = rpm_idle(dev, rpmflags);
899 spin_unlock_irqrestore(&dev->power.lock, flags);
900
901 return retval;
902}
903EXPORT_SYMBOL_GPL(__pm_runtime_idle);
904
905
906
907
908
909
910
911
912
913
914
915
916
917int __pm_runtime_suspend(struct device *dev, int rpmflags)
918{
919 unsigned long flags;
920 int retval;
921
922 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
923
924 if (rpmflags & RPM_GET_PUT) {
925 if (!atomic_dec_and_test(&dev->power.usage_count))
926 return 0;
927 }
928
929 spin_lock_irqsave(&dev->power.lock, flags);
930 retval = rpm_suspend(dev, rpmflags);
931 spin_unlock_irqrestore(&dev->power.lock, flags);
932
933 return retval;
934}
935EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
936
937
938
939
940
941
942
943
944
945
946
947
948int __pm_runtime_resume(struct device *dev, int rpmflags)
949{
950 unsigned long flags;
951 int retval;
952
953 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
954
955 if (rpmflags & RPM_GET_PUT)
956 atomic_inc(&dev->power.usage_count);
957
958 spin_lock_irqsave(&dev->power.lock, flags);
959 retval = rpm_resume(dev, rpmflags);
960 spin_unlock_irqrestore(&dev->power.lock, flags);
961
962 return retval;
963}
964EXPORT_SYMBOL_GPL(__pm_runtime_resume);
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983int __pm_runtime_set_status(struct device *dev, unsigned int status)
984{
985 struct device *parent = dev->parent;
986 unsigned long flags;
987 bool notify_parent = false;
988 int error = 0;
989
990 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
991 return -EINVAL;
992
993 spin_lock_irqsave(&dev->power.lock, flags);
994
995 if (!dev->power.runtime_error && !dev->power.disable_depth) {
996 error = -EAGAIN;
997 goto out;
998 }
999
1000 if (dev->power.runtime_status == status)
1001 goto out_set;
1002
1003 if (status == RPM_SUSPENDED) {
1004
1005 if (parent) {
1006 atomic_add_unless(&parent->power.child_count, -1, 0);
1007 notify_parent = !parent->power.ignore_children;
1008 }
1009 goto out_set;
1010 }
1011
1012 if (parent) {
1013 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1014
1015
1016
1017
1018
1019
1020 if (!parent->power.disable_depth
1021 && !parent->power.ignore_children
1022 && parent->power.runtime_status != RPM_ACTIVE)
1023 error = -EBUSY;
1024 else if (dev->power.runtime_status == RPM_SUSPENDED)
1025 atomic_inc(&parent->power.child_count);
1026
1027 spin_unlock(&parent->power.lock);
1028
1029 if (error)
1030 goto out;
1031 }
1032
1033 out_set:
1034 __update_runtime_status(dev, status);
1035 dev->power.runtime_error = 0;
1036 out:
1037 spin_unlock_irqrestore(&dev->power.lock, flags);
1038
1039 if (notify_parent)
1040 pm_request_idle(parent);
1041
1042 return error;
1043}
1044EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055static void __pm_runtime_barrier(struct device *dev)
1056{
1057 pm_runtime_deactivate_timer(dev);
1058
1059 if (dev->power.request_pending) {
1060 dev->power.request = RPM_REQ_NONE;
1061 spin_unlock_irq(&dev->power.lock);
1062
1063 cancel_work_sync(&dev->power.work);
1064
1065 spin_lock_irq(&dev->power.lock);
1066 dev->power.request_pending = false;
1067 }
1068
1069 if (dev->power.runtime_status == RPM_SUSPENDING
1070 || dev->power.runtime_status == RPM_RESUMING
1071 || dev->power.idle_notification) {
1072 DEFINE_WAIT(wait);
1073
1074
1075 for (;;) {
1076 prepare_to_wait(&dev->power.wait_queue, &wait,
1077 TASK_UNINTERRUPTIBLE);
1078 if (dev->power.runtime_status != RPM_SUSPENDING
1079 && dev->power.runtime_status != RPM_RESUMING
1080 && !dev->power.idle_notification)
1081 break;
1082 spin_unlock_irq(&dev->power.lock);
1083
1084 schedule();
1085
1086 spin_lock_irq(&dev->power.lock);
1087 }
1088 finish_wait(&dev->power.wait_queue, &wait);
1089 }
1090}
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106int pm_runtime_barrier(struct device *dev)
1107{
1108 int retval = 0;
1109
1110 pm_runtime_get_noresume(dev);
1111 spin_lock_irq(&dev->power.lock);
1112
1113 if (dev->power.request_pending
1114 && dev->power.request == RPM_REQ_RESUME) {
1115 rpm_resume(dev, 0);
1116 retval = 1;
1117 }
1118
1119 __pm_runtime_barrier(dev);
1120
1121 spin_unlock_irq(&dev->power.lock);
1122 pm_runtime_put_noidle(dev);
1123
1124 return retval;
1125}
1126EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142void __pm_runtime_disable(struct device *dev, bool check_resume)
1143{
1144 spin_lock_irq(&dev->power.lock);
1145
1146 if (dev->power.disable_depth > 0) {
1147 dev->power.disable_depth++;
1148 goto out;
1149 }
1150
1151
1152
1153
1154
1155
1156 if (check_resume && dev->power.request_pending
1157 && dev->power.request == RPM_REQ_RESUME) {
1158
1159
1160
1161
1162 pm_runtime_get_noresume(dev);
1163
1164 rpm_resume(dev, 0);
1165
1166 pm_runtime_put_noidle(dev);
1167 }
1168
1169 if (!dev->power.disable_depth++)
1170 __pm_runtime_barrier(dev);
1171
1172 out:
1173 spin_unlock_irq(&dev->power.lock);
1174}
1175EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1176
1177
1178
1179
1180
1181void pm_runtime_enable(struct device *dev)
1182{
1183 unsigned long flags;
1184
1185 spin_lock_irqsave(&dev->power.lock, flags);
1186
1187 if (dev->power.disable_depth > 0)
1188 dev->power.disable_depth--;
1189 else
1190 dev_warn(dev, "Unbalanced %s!\n", __func__);
1191
1192 spin_unlock_irqrestore(&dev->power.lock, flags);
1193}
1194EXPORT_SYMBOL_GPL(pm_runtime_enable);
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204void pm_runtime_forbid(struct device *dev)
1205{
1206 spin_lock_irq(&dev->power.lock);
1207 if (!dev->power.runtime_auto)
1208 goto out;
1209
1210 dev->power.runtime_auto = false;
1211 atomic_inc(&dev->power.usage_count);
1212 rpm_resume(dev, 0);
1213
1214 out:
1215 spin_unlock_irq(&dev->power.lock);
1216}
1217EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1218
1219
1220
1221
1222
1223
1224
1225void pm_runtime_allow(struct device *dev)
1226{
1227 spin_lock_irq(&dev->power.lock);
1228 if (dev->power.runtime_auto)
1229 goto out;
1230
1231 dev->power.runtime_auto = true;
1232 if (atomic_dec_and_test(&dev->power.usage_count))
1233 rpm_idle(dev, RPM_AUTO);
1234
1235 out:
1236 spin_unlock_irq(&dev->power.lock);
1237}
1238EXPORT_SYMBOL_GPL(pm_runtime_allow);
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248void pm_runtime_no_callbacks(struct device *dev)
1249{
1250 spin_lock_irq(&dev->power.lock);
1251 dev->power.no_callbacks = 1;
1252 spin_unlock_irq(&dev->power.lock);
1253 if (device_is_registered(dev))
1254 rpm_sysfs_remove(dev);
1255}
1256EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269void pm_runtime_irq_safe(struct device *dev)
1270{
1271 if (dev->parent)
1272 pm_runtime_get_sync(dev->parent);
1273 spin_lock_irq(&dev->power.lock);
1274 dev->power.irq_safe = 1;
1275 spin_unlock_irq(&dev->power.lock);
1276}
1277EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1291{
1292 int delay = dev->power.autosuspend_delay;
1293
1294
1295 if (dev->power.use_autosuspend && delay < 0) {
1296
1297
1298 if (!old_use || old_delay >= 0) {
1299 atomic_inc(&dev->power.usage_count);
1300 rpm_resume(dev, 0);
1301 }
1302 }
1303
1304
1305 else {
1306
1307
1308 if (old_use && old_delay < 0)
1309 atomic_dec(&dev->power.usage_count);
1310
1311
1312 rpm_idle(dev, RPM_AUTO);
1313 }
1314}
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1326{
1327 int old_delay, old_use;
1328
1329 spin_lock_irq(&dev->power.lock);
1330 old_delay = dev->power.autosuspend_delay;
1331 old_use = dev->power.use_autosuspend;
1332 dev->power.autosuspend_delay = delay;
1333 update_autosuspend(dev, old_delay, old_use);
1334 spin_unlock_irq(&dev->power.lock);
1335}
1336EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1347{
1348 int old_delay, old_use;
1349
1350 spin_lock_irq(&dev->power.lock);
1351 old_delay = dev->power.autosuspend_delay;
1352 old_use = dev->power.use_autosuspend;
1353 dev->power.use_autosuspend = use;
1354 update_autosuspend(dev, old_delay, old_use);
1355 spin_unlock_irq(&dev->power.lock);
1356}
1357EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1358
1359
1360
1361
1362
1363void pm_runtime_init(struct device *dev)
1364{
1365 dev->power.runtime_status = RPM_SUSPENDED;
1366 dev->power.idle_notification = false;
1367
1368 dev->power.disable_depth = 1;
1369 atomic_set(&dev->power.usage_count, 0);
1370
1371 dev->power.runtime_error = 0;
1372
1373 atomic_set(&dev->power.child_count, 0);
1374 pm_suspend_ignore_children(dev, false);
1375 dev->power.runtime_auto = true;
1376
1377 dev->power.request_pending = false;
1378 dev->power.request = RPM_REQ_NONE;
1379 dev->power.deferred_resume = false;
1380 dev->power.accounting_timestamp = jiffies;
1381 INIT_WORK(&dev->power.work, pm_runtime_work);
1382
1383 dev->power.timer_expires = 0;
1384 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
1385 (unsigned long)dev);
1386
1387 init_waitqueue_head(&dev->power.wait_queue);
1388}
1389
1390
1391
1392
1393
1394void pm_runtime_remove(struct device *dev)
1395{
1396 __pm_runtime_disable(dev, false);
1397
1398
1399 if (dev->power.runtime_status == RPM_ACTIVE)
1400 pm_runtime_set_suspended(dev);
1401 if (dev->power.irq_safe && dev->parent)
1402 pm_runtime_put(dev->parent);
1403}
1404