1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37#include <linux/pm_qos.h>
38#include <linux/spinlock.h>
39#include <linux/slab.h>
40#include <linux/device.h>
41#include <linux/mutex.h>
42#include <linux/export.h>
43#include <linux/pm_runtime.h>
44#include <linux/err.h>
45#include <trace/events/power.h>
46
47#include "power.h"
48
49static DEFINE_MUTEX(dev_pm_qos_mtx);
50static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
51
52static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
53
54
55
56
57
58
59
60
61enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
62{
63 struct dev_pm_qos *qos = dev->power.qos;
64 struct pm_qos_flags *pqf;
65 s32 val;
66
67 lockdep_assert_held(&dev->power.lock);
68
69 if (IS_ERR_OR_NULL(qos))
70 return PM_QOS_FLAGS_UNDEFINED;
71
72 pqf = &qos->flags;
73 if (list_empty(&pqf->list))
74 return PM_QOS_FLAGS_UNDEFINED;
75
76 val = pqf->effective_flags & mask;
77 if (val)
78 return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME;
79
80 return PM_QOS_FLAGS_NONE;
81}
82
83
84
85
86
87
88enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
89{
90 unsigned long irqflags;
91 enum pm_qos_flags_status ret;
92
93 spin_lock_irqsave(&dev->power.lock, irqflags);
94 ret = __dev_pm_qos_flags(dev, mask);
95 spin_unlock_irqrestore(&dev->power.lock, irqflags);
96
97 return ret;
98}
99EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
100
101
102
103
104
105
106
107s32 __dev_pm_qos_read_value(struct device *dev)
108{
109 lockdep_assert_held(&dev->power.lock);
110
111 return IS_ERR_OR_NULL(dev->power.qos) ?
112 0 : pm_qos_read_value(&dev->power.qos->resume_latency);
113}
114
115
116
117
118
119s32 dev_pm_qos_read_value(struct device *dev)
120{
121 unsigned long flags;
122 s32 ret;
123
124 spin_lock_irqsave(&dev->power.lock, flags);
125 ret = __dev_pm_qos_read_value(dev);
126 spin_unlock_irqrestore(&dev->power.lock, flags);
127
128 return ret;
129}
130
131
132
133
134
135
136
137
138
139
140
141static int apply_constraint(struct dev_pm_qos_request *req,
142 enum pm_qos_req_action action, s32 value)
143{
144 struct dev_pm_qos *qos = req->dev->power.qos;
145 int ret;
146
147 switch(req->type) {
148 case DEV_PM_QOS_RESUME_LATENCY:
149 ret = pm_qos_update_target(&qos->resume_latency,
150 &req->data.pnode, action, value);
151 if (ret) {
152 value = pm_qos_read_value(&qos->resume_latency);
153 blocking_notifier_call_chain(&dev_pm_notifiers,
154 (unsigned long)value,
155 req);
156 }
157 break;
158 case DEV_PM_QOS_LATENCY_TOLERANCE:
159 ret = pm_qos_update_target(&qos->latency_tolerance,
160 &req->data.pnode, action, value);
161 if (ret) {
162 value = pm_qos_read_value(&qos->latency_tolerance);
163 req->dev->power.set_latency_tolerance(req->dev, value);
164 }
165 break;
166 case DEV_PM_QOS_FLAGS:
167 ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
168 action, value);
169 break;
170 default:
171 ret = -EINVAL;
172 }
173
174 return ret;
175}
176
177
178
179
180
181
182
183
184static int dev_pm_qos_constraints_allocate(struct device *dev)
185{
186 struct dev_pm_qos *qos;
187 struct pm_qos_constraints *c;
188 struct blocking_notifier_head *n;
189
190 qos = kzalloc(sizeof(*qos), GFP_KERNEL);
191 if (!qos)
192 return -ENOMEM;
193
194 n = kzalloc(sizeof(*n), GFP_KERNEL);
195 if (!n) {
196 kfree(qos);
197 return -ENOMEM;
198 }
199 BLOCKING_INIT_NOTIFIER_HEAD(n);
200
201 c = &qos->resume_latency;
202 plist_head_init(&c->list);
203 c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
204 c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
205 c->no_constraint_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
206 c->type = PM_QOS_MIN;
207 c->notifiers = n;
208
209 c = &qos->latency_tolerance;
210 plist_head_init(&c->list);
211 c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
212 c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
213 c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
214 c->type = PM_QOS_MIN;
215
216 INIT_LIST_HEAD(&qos->flags.list);
217
218 spin_lock_irq(&dev->power.lock);
219 dev->power.qos = qos;
220 spin_unlock_irq(&dev->power.lock);
221
222 return 0;
223}
224
225static void __dev_pm_qos_hide_latency_limit(struct device *dev);
226static void __dev_pm_qos_hide_flags(struct device *dev);
227
228
229
230
231
232
233
234void dev_pm_qos_constraints_destroy(struct device *dev)
235{
236 struct dev_pm_qos *qos;
237 struct dev_pm_qos_request *req, *tmp;
238 struct pm_qos_constraints *c;
239 struct pm_qos_flags *f;
240
241 mutex_lock(&dev_pm_qos_sysfs_mtx);
242
243
244
245
246
247 pm_qos_sysfs_remove_resume_latency(dev);
248 pm_qos_sysfs_remove_flags(dev);
249
250 mutex_lock(&dev_pm_qos_mtx);
251
252 __dev_pm_qos_hide_latency_limit(dev);
253 __dev_pm_qos_hide_flags(dev);
254
255 qos = dev->power.qos;
256 if (!qos)
257 goto out;
258
259
260 c = &qos->resume_latency;
261 plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
262
263
264
265
266 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
267 memset(req, 0, sizeof(*req));
268 }
269 c = &qos->latency_tolerance;
270 plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
271 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
272 memset(req, 0, sizeof(*req));
273 }
274 f = &qos->flags;
275 list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
276 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
277 memset(req, 0, sizeof(*req));
278 }
279
280 spin_lock_irq(&dev->power.lock);
281 dev->power.qos = ERR_PTR(-ENODEV);
282 spin_unlock_irq(&dev->power.lock);
283
284 kfree(c->notifiers);
285 kfree(qos);
286
287 out:
288 mutex_unlock(&dev_pm_qos_mtx);
289
290 mutex_unlock(&dev_pm_qos_sysfs_mtx);
291}
292
293static bool dev_pm_qos_invalid_request(struct device *dev,
294 struct dev_pm_qos_request *req)
295{
296 return !req || (req->type == DEV_PM_QOS_LATENCY_TOLERANCE
297 && !dev->power.set_latency_tolerance);
298}
299
300static int __dev_pm_qos_add_request(struct device *dev,
301 struct dev_pm_qos_request *req,
302 enum dev_pm_qos_req_type type, s32 value)
303{
304 int ret = 0;
305
306 if (!dev || dev_pm_qos_invalid_request(dev, req))
307 return -EINVAL;
308
309 if (WARN(dev_pm_qos_request_active(req),
310 "%s() called for already added request\n", __func__))
311 return -EINVAL;
312
313 if (IS_ERR(dev->power.qos))
314 ret = -ENODEV;
315 else if (!dev->power.qos)
316 ret = dev_pm_qos_constraints_allocate(dev);
317
318 trace_dev_pm_qos_add_request(dev_name(dev), type, value);
319 if (!ret) {
320 req->dev = dev;
321 req->type = type;
322 ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
323 }
324 return ret;
325}
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
350 enum dev_pm_qos_req_type type, s32 value)
351{
352 int ret;
353
354 mutex_lock(&dev_pm_qos_mtx);
355 ret = __dev_pm_qos_add_request(dev, req, type, value);
356 mutex_unlock(&dev_pm_qos_mtx);
357 return ret;
358}
359EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
360
361
362
363
364
365
366static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
367 s32 new_value)
368{
369 s32 curr_value;
370 int ret = 0;
371
372 if (!req)
373 return -EINVAL;
374
375 if (WARN(!dev_pm_qos_request_active(req),
376 "%s() called for unknown object\n", __func__))
377 return -EINVAL;
378
379 if (IS_ERR_OR_NULL(req->dev->power.qos))
380 return -ENODEV;
381
382 switch(req->type) {
383 case DEV_PM_QOS_RESUME_LATENCY:
384 case DEV_PM_QOS_LATENCY_TOLERANCE:
385 curr_value = req->data.pnode.prio;
386 break;
387 case DEV_PM_QOS_FLAGS:
388 curr_value = req->data.flr.flags;
389 break;
390 default:
391 return -EINVAL;
392 }
393
394 trace_dev_pm_qos_update_request(dev_name(req->dev), req->type,
395 new_value);
396 if (curr_value != new_value)
397 ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);
398
399 return ret;
400}
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
421{
422 int ret;
423
424 mutex_lock(&dev_pm_qos_mtx);
425 ret = __dev_pm_qos_update_request(req, new_value);
426 mutex_unlock(&dev_pm_qos_mtx);
427 return ret;
428}
429EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
430
431static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
432{
433 int ret;
434
435 if (!req)
436 return -EINVAL;
437
438 if (WARN(!dev_pm_qos_request_active(req),
439 "%s() called for unknown object\n", __func__))
440 return -EINVAL;
441
442 if (IS_ERR_OR_NULL(req->dev->power.qos))
443 return -ENODEV;
444
445 trace_dev_pm_qos_remove_request(dev_name(req->dev), req->type,
446 PM_QOS_DEFAULT_VALUE);
447 ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
448 memset(req, 0, sizeof(*req));
449 return ret;
450}
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
468{
469 int ret;
470
471 mutex_lock(&dev_pm_qos_mtx);
472 ret = __dev_pm_qos_remove_request(req);
473 mutex_unlock(&dev_pm_qos_mtx);
474 return ret;
475}
476EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
492{
493 int ret = 0;
494
495 mutex_lock(&dev_pm_qos_mtx);
496
497 if (IS_ERR(dev->power.qos))
498 ret = -ENODEV;
499 else if (!dev->power.qos)
500 ret = dev_pm_qos_constraints_allocate(dev);
501
502 if (!ret)
503 ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
504 notifier);
505
506 mutex_unlock(&dev_pm_qos_mtx);
507 return ret;
508}
509EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
510
511
512
513
514
515
516
517
518
519
520
521int dev_pm_qos_remove_notifier(struct device *dev,
522 struct notifier_block *notifier)
523{
524 int retval = 0;
525
526 mutex_lock(&dev_pm_qos_mtx);
527
528
529 if (!IS_ERR_OR_NULL(dev->power.qos))
530 retval = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
531 notifier);
532
533 mutex_unlock(&dev_pm_qos_mtx);
534 return retval;
535}
536EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
537
538
539
540
541
542
543
544
545
546
547int dev_pm_qos_add_global_notifier(struct notifier_block *notifier)
548{
549 return blocking_notifier_chain_register(&dev_pm_notifiers, notifier);
550}
551EXPORT_SYMBOL_GPL(dev_pm_qos_add_global_notifier);
552
553
554
555
556
557
558
559
560
561
562int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier)
563{
564 return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier);
565}
566EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);
567
568
569
570
571
572
573
574
575int dev_pm_qos_add_ancestor_request(struct device *dev,
576 struct dev_pm_qos_request *req,
577 enum dev_pm_qos_req_type type, s32 value)
578{
579 struct device *ancestor = dev->parent;
580 int ret = -ENODEV;
581
582 switch (type) {
583 case DEV_PM_QOS_RESUME_LATENCY:
584 while (ancestor && !ancestor->power.ignore_children)
585 ancestor = ancestor->parent;
586
587 break;
588 case DEV_PM_QOS_LATENCY_TOLERANCE:
589 while (ancestor && !ancestor->power.set_latency_tolerance)
590 ancestor = ancestor->parent;
591
592 break;
593 default:
594 ancestor = NULL;
595 }
596 if (ancestor)
597 ret = dev_pm_qos_add_request(ancestor, req, type, value);
598
599 if (ret < 0)
600 req->dev = NULL;
601
602 return ret;
603}
604EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
605
606static void __dev_pm_qos_drop_user_request(struct device *dev,
607 enum dev_pm_qos_req_type type)
608{
609 struct dev_pm_qos_request *req = NULL;
610
611 switch(type) {
612 case DEV_PM_QOS_RESUME_LATENCY:
613 req = dev->power.qos->resume_latency_req;
614 dev->power.qos->resume_latency_req = NULL;
615 break;
616 case DEV_PM_QOS_LATENCY_TOLERANCE:
617 req = dev->power.qos->latency_tolerance_req;
618 dev->power.qos->latency_tolerance_req = NULL;
619 break;
620 case DEV_PM_QOS_FLAGS:
621 req = dev->power.qos->flags_req;
622 dev->power.qos->flags_req = NULL;
623 break;
624 }
625 __dev_pm_qos_remove_request(req);
626 kfree(req);
627}
628
629static void dev_pm_qos_drop_user_request(struct device *dev,
630 enum dev_pm_qos_req_type type)
631{
632 mutex_lock(&dev_pm_qos_mtx);
633 __dev_pm_qos_drop_user_request(dev, type);
634 mutex_unlock(&dev_pm_qos_mtx);
635}
636
637
638
639
640
641
642int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
643{
644 struct dev_pm_qos_request *req;
645 int ret;
646
647 if (!device_is_registered(dev) || value < 0)
648 return -EINVAL;
649
650 req = kzalloc(sizeof(*req), GFP_KERNEL);
651 if (!req)
652 return -ENOMEM;
653
654 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);
655 if (ret < 0) {
656 kfree(req);
657 return ret;
658 }
659
660 mutex_lock(&dev_pm_qos_sysfs_mtx);
661
662 mutex_lock(&dev_pm_qos_mtx);
663
664 if (IS_ERR_OR_NULL(dev->power.qos))
665 ret = -ENODEV;
666 else if (dev->power.qos->resume_latency_req)
667 ret = -EEXIST;
668
669 if (ret < 0) {
670 __dev_pm_qos_remove_request(req);
671 kfree(req);
672 mutex_unlock(&dev_pm_qos_mtx);
673 goto out;
674 }
675 dev->power.qos->resume_latency_req = req;
676
677 mutex_unlock(&dev_pm_qos_mtx);
678
679 ret = pm_qos_sysfs_add_resume_latency(dev);
680 if (ret)
681 dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
682
683 out:
684 mutex_unlock(&dev_pm_qos_sysfs_mtx);
685 return ret;
686}
687EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
688
689static void __dev_pm_qos_hide_latency_limit(struct device *dev)
690{
691 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req)
692 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
693}
694
695
696
697
698
699void dev_pm_qos_hide_latency_limit(struct device *dev)
700{
701 mutex_lock(&dev_pm_qos_sysfs_mtx);
702
703 pm_qos_sysfs_remove_resume_latency(dev);
704
705 mutex_lock(&dev_pm_qos_mtx);
706 __dev_pm_qos_hide_latency_limit(dev);
707 mutex_unlock(&dev_pm_qos_mtx);
708
709 mutex_unlock(&dev_pm_qos_sysfs_mtx);
710}
711EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
712
713
714
715
716
717
718int dev_pm_qos_expose_flags(struct device *dev, s32 val)
719{
720 struct dev_pm_qos_request *req;
721 int ret;
722
723 if (!device_is_registered(dev))
724 return -EINVAL;
725
726 req = kzalloc(sizeof(*req), GFP_KERNEL);
727 if (!req)
728 return -ENOMEM;
729
730 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
731 if (ret < 0) {
732 kfree(req);
733 return ret;
734 }
735
736 pm_runtime_get_sync(dev);
737 mutex_lock(&dev_pm_qos_sysfs_mtx);
738
739 mutex_lock(&dev_pm_qos_mtx);
740
741 if (IS_ERR_OR_NULL(dev->power.qos))
742 ret = -ENODEV;
743 else if (dev->power.qos->flags_req)
744 ret = -EEXIST;
745
746 if (ret < 0) {
747 __dev_pm_qos_remove_request(req);
748 kfree(req);
749 mutex_unlock(&dev_pm_qos_mtx);
750 goto out;
751 }
752 dev->power.qos->flags_req = req;
753
754 mutex_unlock(&dev_pm_qos_mtx);
755
756 ret = pm_qos_sysfs_add_flags(dev);
757 if (ret)
758 dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
759
760 out:
761 mutex_unlock(&dev_pm_qos_sysfs_mtx);
762 pm_runtime_put(dev);
763 return ret;
764}
765EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
766
767static void __dev_pm_qos_hide_flags(struct device *dev)
768{
769 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
770 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
771}
772
773
774
775
776
777void dev_pm_qos_hide_flags(struct device *dev)
778{
779 pm_runtime_get_sync(dev);
780 mutex_lock(&dev_pm_qos_sysfs_mtx);
781
782 pm_qos_sysfs_remove_flags(dev);
783
784 mutex_lock(&dev_pm_qos_mtx);
785 __dev_pm_qos_hide_flags(dev);
786 mutex_unlock(&dev_pm_qos_mtx);
787
788 mutex_unlock(&dev_pm_qos_sysfs_mtx);
789 pm_runtime_put(dev);
790}
791EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
792
793
794
795
796
797
798
799int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
800{
801 s32 value;
802 int ret;
803
804 pm_runtime_get_sync(dev);
805 mutex_lock(&dev_pm_qos_mtx);
806
807 if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
808 ret = -EINVAL;
809 goto out;
810 }
811
812 value = dev_pm_qos_requested_flags(dev);
813 if (set)
814 value |= mask;
815 else
816 value &= ~mask;
817
818 ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
819
820 out:
821 mutex_unlock(&dev_pm_qos_mtx);
822 pm_runtime_put(dev);
823 return ret;
824}
825
826
827
828
829
830s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
831{
832 s32 ret;
833
834 mutex_lock(&dev_pm_qos_mtx);
835 ret = IS_ERR_OR_NULL(dev->power.qos)
836 || !dev->power.qos->latency_tolerance_req ?
837 PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
838 dev->power.qos->latency_tolerance_req->data.pnode.prio;
839 mutex_unlock(&dev_pm_qos_mtx);
840 return ret;
841}
842
843
844
845
846
847
848int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
849{
850 int ret;
851
852 mutex_lock(&dev_pm_qos_mtx);
853
854 if (IS_ERR_OR_NULL(dev->power.qos)
855 || !dev->power.qos->latency_tolerance_req) {
856 struct dev_pm_qos_request *req;
857
858 if (val < 0) {
859 ret = -EINVAL;
860 goto out;
861 }
862 req = kzalloc(sizeof(*req), GFP_KERNEL);
863 if (!req) {
864 ret = -ENOMEM;
865 goto out;
866 }
867 ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
868 if (ret < 0) {
869 kfree(req);
870 goto out;
871 }
872 dev->power.qos->latency_tolerance_req = req;
873 } else {
874 if (val < 0) {
875 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
876 ret = 0;
877 } else {
878 ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
879 }
880 }
881
882 out:
883 mutex_unlock(&dev_pm_qos_mtx);
884 return ret;
885}
886