1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/pm_qos.h>
35#include <linux/spinlock.h>
36#include <linux/slab.h>
37#include <linux/device.h>
38#include <linux/mutex.h>
39#include <linux/export.h>
40#include <linux/pm_runtime.h>
41#include <linux/err.h>
42#include <trace/events/power.h>
43
44#include "power.h"
45
46static DEFINE_MUTEX(dev_pm_qos_mtx);
47static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
48
49
50
51
52
53
54
55
56enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
57{
58 struct dev_pm_qos *qos = dev->power.qos;
59 struct pm_qos_flags *pqf;
60 s32 val;
61
62 lockdep_assert_held(&dev->power.lock);
63
64 if (IS_ERR_OR_NULL(qos))
65 return PM_QOS_FLAGS_UNDEFINED;
66
67 pqf = &qos->flags;
68 if (list_empty(&pqf->list))
69 return PM_QOS_FLAGS_UNDEFINED;
70
71 val = pqf->effective_flags & mask;
72 if (val)
73 return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME;
74
75 return PM_QOS_FLAGS_NONE;
76}
77
78
79
80
81
82
83enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
84{
85 unsigned long irqflags;
86 enum pm_qos_flags_status ret;
87
88 spin_lock_irqsave(&dev->power.lock, irqflags);
89 ret = __dev_pm_qos_flags(dev, mask);
90 spin_unlock_irqrestore(&dev->power.lock, irqflags);
91
92 return ret;
93}
94EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
95
96
97
98
99
100
101
102s32 __dev_pm_qos_read_value(struct device *dev)
103{
104 lockdep_assert_held(&dev->power.lock);
105
106 return dev_pm_qos_raw_read_value(dev);
107}
108
109
110
111
112
113s32 dev_pm_qos_read_value(struct device *dev)
114{
115 unsigned long flags;
116 s32 ret;
117
118 spin_lock_irqsave(&dev->power.lock, flags);
119 ret = __dev_pm_qos_read_value(dev);
120 spin_unlock_irqrestore(&dev->power.lock, flags);
121
122 return ret;
123}
124
125
126
127
128
129
130
131
132
133
134static int apply_constraint(struct dev_pm_qos_request *req,
135 enum pm_qos_req_action action, s32 value)
136{
137 struct dev_pm_qos *qos = req->dev->power.qos;
138 int ret;
139
140 switch(req->type) {
141 case DEV_PM_QOS_RESUME_LATENCY:
142 ret = pm_qos_update_target(&qos->resume_latency,
143 &req->data.pnode, action, value);
144 break;
145 case DEV_PM_QOS_LATENCY_TOLERANCE:
146 ret = pm_qos_update_target(&qos->latency_tolerance,
147 &req->data.pnode, action, value);
148 if (ret) {
149 value = pm_qos_read_value(&qos->latency_tolerance);
150 req->dev->power.set_latency_tolerance(req->dev, value);
151 }
152 break;
153 case DEV_PM_QOS_FLAGS:
154 ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
155 action, value);
156 break;
157 default:
158 ret = -EINVAL;
159 }
160
161 return ret;
162}
163
164
165
166
167
168
169
170
171static int dev_pm_qos_constraints_allocate(struct device *dev)
172{
173 struct dev_pm_qos *qos;
174 struct pm_qos_constraints *c;
175 struct blocking_notifier_head *n;
176
177 qos = kzalloc(sizeof(*qos), GFP_KERNEL);
178 if (!qos)
179 return -ENOMEM;
180
181 n = kzalloc(sizeof(*n), GFP_KERNEL);
182 if (!n) {
183 kfree(qos);
184 return -ENOMEM;
185 }
186 BLOCKING_INIT_NOTIFIER_HEAD(n);
187
188 c = &qos->resume_latency;
189 plist_head_init(&c->list);
190 c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
191 c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
192 c->no_constraint_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
193 c->type = PM_QOS_MIN;
194 c->notifiers = n;
195
196 c = &qos->latency_tolerance;
197 plist_head_init(&c->list);
198 c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
199 c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
200 c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
201 c->type = PM_QOS_MIN;
202
203 INIT_LIST_HEAD(&qos->flags.list);
204
205 spin_lock_irq(&dev->power.lock);
206 dev->power.qos = qos;
207 spin_unlock_irq(&dev->power.lock);
208
209 return 0;
210}
211
212static void __dev_pm_qos_hide_latency_limit(struct device *dev);
213static void __dev_pm_qos_hide_flags(struct device *dev);
214
215
216
217
218
219
220
221void dev_pm_qos_constraints_destroy(struct device *dev)
222{
223 struct dev_pm_qos *qos;
224 struct dev_pm_qos_request *req, *tmp;
225 struct pm_qos_constraints *c;
226 struct pm_qos_flags *f;
227
228 mutex_lock(&dev_pm_qos_sysfs_mtx);
229
230
231
232
233
234 pm_qos_sysfs_remove_resume_latency(dev);
235 pm_qos_sysfs_remove_flags(dev);
236
237 mutex_lock(&dev_pm_qos_mtx);
238
239 __dev_pm_qos_hide_latency_limit(dev);
240 __dev_pm_qos_hide_flags(dev);
241
242 qos = dev->power.qos;
243 if (!qos)
244 goto out;
245
246
247 c = &qos->resume_latency;
248 plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
249
250
251
252
253 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
254 memset(req, 0, sizeof(*req));
255 }
256 c = &qos->latency_tolerance;
257 plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
258 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
259 memset(req, 0, sizeof(*req));
260 }
261 f = &qos->flags;
262 list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
263 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
264 memset(req, 0, sizeof(*req));
265 }
266
267 spin_lock_irq(&dev->power.lock);
268 dev->power.qos = ERR_PTR(-ENODEV);
269 spin_unlock_irq(&dev->power.lock);
270
271 kfree(qos->resume_latency.notifiers);
272 kfree(qos);
273
274 out:
275 mutex_unlock(&dev_pm_qos_mtx);
276
277 mutex_unlock(&dev_pm_qos_sysfs_mtx);
278}
279
280static bool dev_pm_qos_invalid_req_type(struct device *dev,
281 enum dev_pm_qos_req_type type)
282{
283 return type == DEV_PM_QOS_LATENCY_TOLERANCE &&
284 !dev->power.set_latency_tolerance;
285}
286
287static int __dev_pm_qos_add_request(struct device *dev,
288 struct dev_pm_qos_request *req,
289 enum dev_pm_qos_req_type type, s32 value)
290{
291 int ret = 0;
292
293 if (!dev || !req || dev_pm_qos_invalid_req_type(dev, type))
294 return -EINVAL;
295
296 if (WARN(dev_pm_qos_request_active(req),
297 "%s() called for already added request\n", __func__))
298 return -EINVAL;
299
300 if (IS_ERR(dev->power.qos))
301 ret = -ENODEV;
302 else if (!dev->power.qos)
303 ret = dev_pm_qos_constraints_allocate(dev);
304
305 trace_dev_pm_qos_add_request(dev_name(dev), type, value);
306 if (!ret) {
307 req->dev = dev;
308 req->type = type;
309 ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
310 }
311 return ret;
312}
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
337 enum dev_pm_qos_req_type type, s32 value)
338{
339 int ret;
340
341 mutex_lock(&dev_pm_qos_mtx);
342 ret = __dev_pm_qos_add_request(dev, req, type, value);
343 mutex_unlock(&dev_pm_qos_mtx);
344 return ret;
345}
346EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
347
348
349
350
351
352
353static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
354 s32 new_value)
355{
356 s32 curr_value;
357 int ret = 0;
358
359 if (!req)
360 return -EINVAL;
361
362 if (WARN(!dev_pm_qos_request_active(req),
363 "%s() called for unknown object\n", __func__))
364 return -EINVAL;
365
366 if (IS_ERR_OR_NULL(req->dev->power.qos))
367 return -ENODEV;
368
369 switch(req->type) {
370 case DEV_PM_QOS_RESUME_LATENCY:
371 case DEV_PM_QOS_LATENCY_TOLERANCE:
372 curr_value = req->data.pnode.prio;
373 break;
374 case DEV_PM_QOS_FLAGS:
375 curr_value = req->data.flr.flags;
376 break;
377 default:
378 return -EINVAL;
379 }
380
381 trace_dev_pm_qos_update_request(dev_name(req->dev), req->type,
382 new_value);
383 if (curr_value != new_value)
384 ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);
385
386 return ret;
387}
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
408{
409 int ret;
410
411 mutex_lock(&dev_pm_qos_mtx);
412 ret = __dev_pm_qos_update_request(req, new_value);
413 mutex_unlock(&dev_pm_qos_mtx);
414 return ret;
415}
416EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
417
418static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
419{
420 int ret;
421
422 if (!req)
423 return -EINVAL;
424
425 if (WARN(!dev_pm_qos_request_active(req),
426 "%s() called for unknown object\n", __func__))
427 return -EINVAL;
428
429 if (IS_ERR_OR_NULL(req->dev->power.qos))
430 return -ENODEV;
431
432 trace_dev_pm_qos_remove_request(dev_name(req->dev), req->type,
433 PM_QOS_DEFAULT_VALUE);
434 ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
435 memset(req, 0, sizeof(*req));
436 return ret;
437}
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
455{
456 int ret;
457
458 mutex_lock(&dev_pm_qos_mtx);
459 ret = __dev_pm_qos_remove_request(req);
460 mutex_unlock(&dev_pm_qos_mtx);
461 return ret;
462}
463EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
479{
480 int ret = 0;
481
482 mutex_lock(&dev_pm_qos_mtx);
483
484 if (IS_ERR(dev->power.qos))
485 ret = -ENODEV;
486 else if (!dev->power.qos)
487 ret = dev_pm_qos_constraints_allocate(dev);
488
489 if (!ret)
490 ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
491 notifier);
492
493 mutex_unlock(&dev_pm_qos_mtx);
494 return ret;
495}
496EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
497
498
499
500
501
502
503
504
505
506
507
508int dev_pm_qos_remove_notifier(struct device *dev,
509 struct notifier_block *notifier)
510{
511 int retval = 0;
512
513 mutex_lock(&dev_pm_qos_mtx);
514
515
516 if (!IS_ERR_OR_NULL(dev->power.qos))
517 retval = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
518 notifier);
519
520 mutex_unlock(&dev_pm_qos_mtx);
521 return retval;
522}
523EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
524
525
526
527
528
529
530
531
532int dev_pm_qos_add_ancestor_request(struct device *dev,
533 struct dev_pm_qos_request *req,
534 enum dev_pm_qos_req_type type, s32 value)
535{
536 struct device *ancestor = dev->parent;
537 int ret = -ENODEV;
538
539 switch (type) {
540 case DEV_PM_QOS_RESUME_LATENCY:
541 while (ancestor && !ancestor->power.ignore_children)
542 ancestor = ancestor->parent;
543
544 break;
545 case DEV_PM_QOS_LATENCY_TOLERANCE:
546 while (ancestor && !ancestor->power.set_latency_tolerance)
547 ancestor = ancestor->parent;
548
549 break;
550 default:
551 ancestor = NULL;
552 }
553 if (ancestor)
554 ret = dev_pm_qos_add_request(ancestor, req, type, value);
555
556 if (ret < 0)
557 req->dev = NULL;
558
559 return ret;
560}
561EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
562
563static void __dev_pm_qos_drop_user_request(struct device *dev,
564 enum dev_pm_qos_req_type type)
565{
566 struct dev_pm_qos_request *req = NULL;
567
568 switch(type) {
569 case DEV_PM_QOS_RESUME_LATENCY:
570 req = dev->power.qos->resume_latency_req;
571 dev->power.qos->resume_latency_req = NULL;
572 break;
573 case DEV_PM_QOS_LATENCY_TOLERANCE:
574 req = dev->power.qos->latency_tolerance_req;
575 dev->power.qos->latency_tolerance_req = NULL;
576 break;
577 case DEV_PM_QOS_FLAGS:
578 req = dev->power.qos->flags_req;
579 dev->power.qos->flags_req = NULL;
580 break;
581 }
582 __dev_pm_qos_remove_request(req);
583 kfree(req);
584}
585
586static void dev_pm_qos_drop_user_request(struct device *dev,
587 enum dev_pm_qos_req_type type)
588{
589 mutex_lock(&dev_pm_qos_mtx);
590 __dev_pm_qos_drop_user_request(dev, type);
591 mutex_unlock(&dev_pm_qos_mtx);
592}
593
594
595
596
597
598
599int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
600{
601 struct dev_pm_qos_request *req;
602 int ret;
603
604 if (!device_is_registered(dev) || value < 0)
605 return -EINVAL;
606
607 req = kzalloc(sizeof(*req), GFP_KERNEL);
608 if (!req)
609 return -ENOMEM;
610
611 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);
612 if (ret < 0) {
613 kfree(req);
614 return ret;
615 }
616
617 mutex_lock(&dev_pm_qos_sysfs_mtx);
618
619 mutex_lock(&dev_pm_qos_mtx);
620
621 if (IS_ERR_OR_NULL(dev->power.qos))
622 ret = -ENODEV;
623 else if (dev->power.qos->resume_latency_req)
624 ret = -EEXIST;
625
626 if (ret < 0) {
627 __dev_pm_qos_remove_request(req);
628 kfree(req);
629 mutex_unlock(&dev_pm_qos_mtx);
630 goto out;
631 }
632 dev->power.qos->resume_latency_req = req;
633
634 mutex_unlock(&dev_pm_qos_mtx);
635
636 ret = pm_qos_sysfs_add_resume_latency(dev);
637 if (ret)
638 dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
639
640 out:
641 mutex_unlock(&dev_pm_qos_sysfs_mtx);
642 return ret;
643}
644EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
645
646static void __dev_pm_qos_hide_latency_limit(struct device *dev)
647{
648 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req)
649 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
650}
651
652
653
654
655
656void dev_pm_qos_hide_latency_limit(struct device *dev)
657{
658 mutex_lock(&dev_pm_qos_sysfs_mtx);
659
660 pm_qos_sysfs_remove_resume_latency(dev);
661
662 mutex_lock(&dev_pm_qos_mtx);
663 __dev_pm_qos_hide_latency_limit(dev);
664 mutex_unlock(&dev_pm_qos_mtx);
665
666 mutex_unlock(&dev_pm_qos_sysfs_mtx);
667}
668EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
669
670
671
672
673
674
675int dev_pm_qos_expose_flags(struct device *dev, s32 val)
676{
677 struct dev_pm_qos_request *req;
678 int ret;
679
680 if (!device_is_registered(dev))
681 return -EINVAL;
682
683 req = kzalloc(sizeof(*req), GFP_KERNEL);
684 if (!req)
685 return -ENOMEM;
686
687 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
688 if (ret < 0) {
689 kfree(req);
690 return ret;
691 }
692
693 pm_runtime_get_sync(dev);
694 mutex_lock(&dev_pm_qos_sysfs_mtx);
695
696 mutex_lock(&dev_pm_qos_mtx);
697
698 if (IS_ERR_OR_NULL(dev->power.qos))
699 ret = -ENODEV;
700 else if (dev->power.qos->flags_req)
701 ret = -EEXIST;
702
703 if (ret < 0) {
704 __dev_pm_qos_remove_request(req);
705 kfree(req);
706 mutex_unlock(&dev_pm_qos_mtx);
707 goto out;
708 }
709 dev->power.qos->flags_req = req;
710
711 mutex_unlock(&dev_pm_qos_mtx);
712
713 ret = pm_qos_sysfs_add_flags(dev);
714 if (ret)
715 dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
716
717 out:
718 mutex_unlock(&dev_pm_qos_sysfs_mtx);
719 pm_runtime_put(dev);
720 return ret;
721}
722EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
723
724static void __dev_pm_qos_hide_flags(struct device *dev)
725{
726 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
727 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
728}
729
730
731
732
733
734void dev_pm_qos_hide_flags(struct device *dev)
735{
736 pm_runtime_get_sync(dev);
737 mutex_lock(&dev_pm_qos_sysfs_mtx);
738
739 pm_qos_sysfs_remove_flags(dev);
740
741 mutex_lock(&dev_pm_qos_mtx);
742 __dev_pm_qos_hide_flags(dev);
743 mutex_unlock(&dev_pm_qos_mtx);
744
745 mutex_unlock(&dev_pm_qos_sysfs_mtx);
746 pm_runtime_put(dev);
747}
748EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
749
750
751
752
753
754
755
756int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
757{
758 s32 value;
759 int ret;
760
761 pm_runtime_get_sync(dev);
762 mutex_lock(&dev_pm_qos_mtx);
763
764 if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
765 ret = -EINVAL;
766 goto out;
767 }
768
769 value = dev_pm_qos_requested_flags(dev);
770 if (set)
771 value |= mask;
772 else
773 value &= ~mask;
774
775 ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
776
777 out:
778 mutex_unlock(&dev_pm_qos_mtx);
779 pm_runtime_put(dev);
780 return ret;
781}
782
783
784
785
786
787s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
788{
789 s32 ret;
790
791 mutex_lock(&dev_pm_qos_mtx);
792 ret = IS_ERR_OR_NULL(dev->power.qos)
793 || !dev->power.qos->latency_tolerance_req ?
794 PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
795 dev->power.qos->latency_tolerance_req->data.pnode.prio;
796 mutex_unlock(&dev_pm_qos_mtx);
797 return ret;
798}
799
800
801
802
803
804
805int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
806{
807 int ret;
808
809 mutex_lock(&dev_pm_qos_mtx);
810
811 if (IS_ERR_OR_NULL(dev->power.qos)
812 || !dev->power.qos->latency_tolerance_req) {
813 struct dev_pm_qos_request *req;
814
815 if (val < 0) {
816 if (val == PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT)
817 ret = 0;
818 else
819 ret = -EINVAL;
820 goto out;
821 }
822 req = kzalloc(sizeof(*req), GFP_KERNEL);
823 if (!req) {
824 ret = -ENOMEM;
825 goto out;
826 }
827 ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
828 if (ret < 0) {
829 kfree(req);
830 goto out;
831 }
832 dev->power.qos->latency_tolerance_req = req;
833 } else {
834 if (val < 0) {
835 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
836 ret = 0;
837 } else {
838 ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
839 }
840 }
841
842 out:
843 mutex_unlock(&dev_pm_qos_mtx);
844 return ret;
845}
846EXPORT_SYMBOL_GPL(dev_pm_qos_update_user_latency_tolerance);
847
848
849
850
851
852int dev_pm_qos_expose_latency_tolerance(struct device *dev)
853{
854 int ret;
855
856 if (!dev->power.set_latency_tolerance)
857 return -EINVAL;
858
859 mutex_lock(&dev_pm_qos_sysfs_mtx);
860 ret = pm_qos_sysfs_add_latency_tolerance(dev);
861 mutex_unlock(&dev_pm_qos_sysfs_mtx);
862
863 return ret;
864}
865EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance);
866
867
868
869
870
871void dev_pm_qos_hide_latency_tolerance(struct device *dev)
872{
873 mutex_lock(&dev_pm_qos_sysfs_mtx);
874 pm_qos_sysfs_remove_latency_tolerance(dev);
875 mutex_unlock(&dev_pm_qos_sysfs_mtx);
876
877
878 pm_runtime_get_sync(dev);
879 dev_pm_qos_update_user_latency_tolerance(dev,
880 PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT);
881 pm_runtime_put(dev);
882}
883EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance);
884