1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37#include <linux/pm_qos.h>
38#include <linux/spinlock.h>
39#include <linux/slab.h>
40#include <linux/device.h>
41#include <linux/mutex.h>
42#include <linux/export.h>
43#include <linux/pm_runtime.h>
44#include <linux/err.h>
45#include <trace/events/power.h>
46
47#include "power.h"
48
49static DEFINE_MUTEX(dev_pm_qos_mtx);
50static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
51
52static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
53
54
55
56
57
58
59
60
61enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
62{
63 struct dev_pm_qos *qos = dev->power.qos;
64 struct pm_qos_flags *pqf;
65 s32 val;
66
67 if (IS_ERR_OR_NULL(qos))
68 return PM_QOS_FLAGS_UNDEFINED;
69
70 pqf = &qos->flags;
71 if (list_empty(&pqf->list))
72 return PM_QOS_FLAGS_UNDEFINED;
73
74 val = pqf->effective_flags & mask;
75 if (val)
76 return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME;
77
78 return PM_QOS_FLAGS_NONE;
79}
80
81
82
83
84
85
86enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
87{
88 unsigned long irqflags;
89 enum pm_qos_flags_status ret;
90
91 spin_lock_irqsave(&dev->power.lock, irqflags);
92 ret = __dev_pm_qos_flags(dev, mask);
93 spin_unlock_irqrestore(&dev->power.lock, irqflags);
94
95 return ret;
96}
97EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
98
99
100
101
102
103
104
105s32 __dev_pm_qos_read_value(struct device *dev)
106{
107 return IS_ERR_OR_NULL(dev->power.qos) ?
108 0 : pm_qos_read_value(&dev->power.qos->latency);
109}
110
111
112
113
114
115s32 dev_pm_qos_read_value(struct device *dev)
116{
117 unsigned long flags;
118 s32 ret;
119
120 spin_lock_irqsave(&dev->power.lock, flags);
121 ret = __dev_pm_qos_read_value(dev);
122 spin_unlock_irqrestore(&dev->power.lock, flags);
123
124 return ret;
125}
126
127
128
129
130
131
132
133
134
135
136
137static int apply_constraint(struct dev_pm_qos_request *req,
138 enum pm_qos_req_action action, s32 value)
139{
140 struct dev_pm_qos *qos = req->dev->power.qos;
141 int ret;
142
143 switch(req->type) {
144 case DEV_PM_QOS_LATENCY:
145 ret = pm_qos_update_target(&qos->latency, &req->data.pnode,
146 action, value);
147 if (ret) {
148 value = pm_qos_read_value(&qos->latency);
149 blocking_notifier_call_chain(&dev_pm_notifiers,
150 (unsigned long)value,
151 req);
152 }
153 break;
154 case DEV_PM_QOS_LATENCY_TOLERANCE:
155 ret = pm_qos_update_target(&qos->latency_tolerance,
156 &req->data.pnode, action, value);
157 if (ret) {
158 value = pm_qos_read_value(&qos->latency_tolerance);
159 req->dev->device_rh->power.set_latency_tolerance(
160 req->dev, value);
161 }
162 break;
163 case DEV_PM_QOS_FLAGS:
164 ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
165 action, value);
166 break;
167 default:
168 ret = -EINVAL;
169 }
170
171 return ret;
172}
173
174
175
176
177
178
179
180
181static int dev_pm_qos_constraints_allocate(struct device *dev)
182{
183 struct dev_pm_qos *qos;
184 struct pm_qos_constraints *c;
185 struct blocking_notifier_head *n;
186
187 qos = kzalloc(sizeof(*qos), GFP_KERNEL);
188 if (!qos)
189 return -ENOMEM;
190
191 n = kzalloc(sizeof(*n), GFP_KERNEL);
192 if (!n) {
193 kfree(qos);
194 return -ENOMEM;
195 }
196 BLOCKING_INIT_NOTIFIER_HEAD(n);
197
198 c = &qos->latency;
199 plist_head_init(&c->list);
200 c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
201 c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
202 c->type = PM_QOS_MIN;
203 c->notifiers = n;
204
205 c = &qos->latency_tolerance;
206 plist_head_init(&c->list);
207 c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
208 c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
209 c->type = PM_QOS_MIN;
210
211 INIT_LIST_HEAD(&qos->flags.list);
212
213 spin_lock_irq(&dev->power.lock);
214 dev->power.qos = qos;
215 spin_unlock_irq(&dev->power.lock);
216
217 return 0;
218}
219
220static void __dev_pm_qos_hide_latency_limit(struct device *dev);
221static void __dev_pm_qos_hide_flags(struct device *dev);
222
223
224
225
226
227
228
229void dev_pm_qos_constraints_destroy(struct device *dev)
230{
231 struct dev_pm_qos *qos;
232 struct dev_pm_qos_request *req, *tmp;
233 struct pm_qos_constraints *c;
234 struct pm_qos_flags *f;
235
236 mutex_lock(&dev_pm_qos_sysfs_mtx);
237
238
239
240
241
242 pm_qos_sysfs_remove_latency(dev);
243 pm_qos_sysfs_remove_flags(dev);
244
245 mutex_lock(&dev_pm_qos_mtx);
246
247 __dev_pm_qos_hide_latency_limit(dev);
248 __dev_pm_qos_hide_flags(dev);
249
250 qos = dev->power.qos;
251 if (!qos)
252 goto out;
253
254
255 c = &qos->latency;
256 plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
257
258
259
260
261 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
262 memset(req, 0, sizeof(*req));
263 }
264 c = &qos->latency_tolerance;
265 plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
266 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
267 memset(req, 0, sizeof(*req));
268 }
269 f = &qos->flags;
270 list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
271 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
272 memset(req, 0, sizeof(*req));
273 }
274
275 spin_lock_irq(&dev->power.lock);
276 dev->power.qos = ERR_PTR(-ENODEV);
277 spin_unlock_irq(&dev->power.lock);
278
279 kfree(qos->latency.notifiers);
280 kfree(qos);
281
282 out:
283 mutex_unlock(&dev_pm_qos_mtx);
284
285 mutex_unlock(&dev_pm_qos_sysfs_mtx);
286}
287
288static bool dev_pm_qos_invalid_request(struct device *dev,
289 struct dev_pm_qos_request *req)
290{
291 return !req || (req->type == DEV_PM_QOS_LATENCY_TOLERANCE
292 && !dev->device_rh->power.set_latency_tolerance);
293}
294
295static int __dev_pm_qos_add_request(struct device *dev,
296 struct dev_pm_qos_request *req,
297 enum dev_pm_qos_req_type type, s32 value)
298{
299 int ret = 0;
300
301 if (!dev || dev_pm_qos_invalid_request(dev, req))
302 return -EINVAL;
303
304 if (WARN(dev_pm_qos_request_active(req),
305 "%s() called for already added request\n", __func__))
306 return -EINVAL;
307
308 if (IS_ERR(dev->power.qos))
309 ret = -ENODEV;
310 else if (!dev->power.qos)
311 ret = dev_pm_qos_constraints_allocate(dev);
312
313 trace_dev_pm_qos_add_request(dev_name(dev), type, value);
314 if (!ret) {
315 req->dev = dev;
316 req->type = type;
317 ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
318 }
319 return ret;
320}
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
345 enum dev_pm_qos_req_type type, s32 value)
346{
347 int ret;
348
349 mutex_lock(&dev_pm_qos_mtx);
350 ret = __dev_pm_qos_add_request(dev, req, type, value);
351 mutex_unlock(&dev_pm_qos_mtx);
352 return ret;
353}
354EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
355
356
357
358
359
360
361static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
362 s32 new_value)
363{
364 s32 curr_value;
365 int ret = 0;
366
367 if (!req)
368 return -EINVAL;
369
370 if (WARN(!dev_pm_qos_request_active(req),
371 "%s() called for unknown object\n", __func__))
372 return -EINVAL;
373
374 if (IS_ERR_OR_NULL(req->dev->power.qos))
375 return -ENODEV;
376
377 switch(req->type) {
378 case DEV_PM_QOS_LATENCY:
379 case DEV_PM_QOS_LATENCY_TOLERANCE:
380 curr_value = req->data.pnode.prio;
381 break;
382 case DEV_PM_QOS_FLAGS:
383 curr_value = req->data.flr.flags;
384 break;
385 default:
386 return -EINVAL;
387 }
388
389 trace_dev_pm_qos_update_request(dev_name(req->dev), req->type,
390 new_value);
391 if (curr_value != new_value)
392 ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);
393
394 return ret;
395}
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
416{
417 int ret;
418
419 mutex_lock(&dev_pm_qos_mtx);
420 ret = __dev_pm_qos_update_request(req, new_value);
421 mutex_unlock(&dev_pm_qos_mtx);
422 return ret;
423}
424EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
425
426static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
427{
428 int ret;
429
430 if (!req)
431 return -EINVAL;
432
433 if (WARN(!dev_pm_qos_request_active(req),
434 "%s() called for unknown object\n", __func__))
435 return -EINVAL;
436
437 if (IS_ERR_OR_NULL(req->dev->power.qos))
438 return -ENODEV;
439
440 trace_dev_pm_qos_remove_request(dev_name(req->dev), req->type,
441 PM_QOS_DEFAULT_VALUE);
442 ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
443 memset(req, 0, sizeof(*req));
444 return ret;
445}
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
463{
464 int ret;
465
466 mutex_lock(&dev_pm_qos_mtx);
467 ret = __dev_pm_qos_remove_request(req);
468 mutex_unlock(&dev_pm_qos_mtx);
469 return ret;
470}
471EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
487{
488 int ret = 0;
489
490 mutex_lock(&dev_pm_qos_mtx);
491
492 if (IS_ERR(dev->power.qos))
493 ret = -ENODEV;
494 else if (!dev->power.qos)
495 ret = dev_pm_qos_constraints_allocate(dev);
496
497 if (!ret)
498 ret = blocking_notifier_chain_register(
499 dev->power.qos->latency.notifiers, notifier);
500
501 mutex_unlock(&dev_pm_qos_mtx);
502 return ret;
503}
504EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
505
506
507
508
509
510
511
512
513
514
515
516int dev_pm_qos_remove_notifier(struct device *dev,
517 struct notifier_block *notifier)
518{
519 int retval = 0;
520
521 mutex_lock(&dev_pm_qos_mtx);
522
523
524 if (!IS_ERR_OR_NULL(dev->power.qos))
525 retval = blocking_notifier_chain_unregister(
526 dev->power.qos->latency.notifiers,
527 notifier);
528
529 mutex_unlock(&dev_pm_qos_mtx);
530 return retval;
531}
532EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
533
534
535
536
537
538
539
540
541
542
543int dev_pm_qos_add_global_notifier(struct notifier_block *notifier)
544{
545 return blocking_notifier_chain_register(&dev_pm_notifiers, notifier);
546}
547EXPORT_SYMBOL_GPL(dev_pm_qos_add_global_notifier);
548
549
550
551
552
553
554
555
556
557
558int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier)
559{
560 return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier);
561}
562EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);
563
564
565
566
567
568
569
570int dev_pm_qos_add_ancestor_request(struct device *dev,
571 struct dev_pm_qos_request *req, s32 value)
572{
573 struct device *ancestor = dev->parent;
574 int ret = -ENODEV;
575
576 while (ancestor && !ancestor->power.ignore_children)
577 ancestor = ancestor->parent;
578
579 if (ancestor)
580 ret = dev_pm_qos_add_request(ancestor, req,
581 DEV_PM_QOS_LATENCY, value);
582
583 if (ret < 0)
584 req->dev = NULL;
585
586 return ret;
587}
588EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
589
590#ifdef CONFIG_PM_RUNTIME
591static void __dev_pm_qos_drop_user_request(struct device *dev,
592 enum dev_pm_qos_req_type type)
593{
594 struct dev_pm_qos_request *req = NULL;
595
596 switch(type) {
597 case DEV_PM_QOS_LATENCY:
598 req = dev->power.qos->latency_req;
599 dev->power.qos->latency_req = NULL;
600 break;
601 case DEV_PM_QOS_LATENCY_TOLERANCE:
602 req = dev->power.qos->latency_tolerance_req;
603 dev->power.qos->latency_tolerance_req = NULL;
604 break;
605 case DEV_PM_QOS_FLAGS:
606 req = dev->power.qos->flags_req;
607 dev->power.qos->flags_req = NULL;
608 break;
609 }
610 __dev_pm_qos_remove_request(req);
611 kfree(req);
612}
613
614static void dev_pm_qos_drop_user_request(struct device *dev,
615 enum dev_pm_qos_req_type type)
616{
617 mutex_lock(&dev_pm_qos_mtx);
618 __dev_pm_qos_drop_user_request(dev, type);
619 mutex_unlock(&dev_pm_qos_mtx);
620}
621
622
623
624
625
626
627int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
628{
629 struct dev_pm_qos_request *req;
630 int ret;
631
632 if (!device_is_registered(dev) || value < 0)
633 return -EINVAL;
634
635 req = kzalloc(sizeof(*req), GFP_KERNEL);
636 if (!req)
637 return -ENOMEM;
638
639 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value);
640 if (ret < 0) {
641 kfree(req);
642 return ret;
643 }
644
645 mutex_lock(&dev_pm_qos_sysfs_mtx);
646
647 mutex_lock(&dev_pm_qos_mtx);
648
649 if (IS_ERR_OR_NULL(dev->power.qos))
650 ret = -ENODEV;
651 else if (dev->power.qos->latency_req)
652 ret = -EEXIST;
653
654 if (ret < 0) {
655 __dev_pm_qos_remove_request(req);
656 kfree(req);
657 mutex_unlock(&dev_pm_qos_mtx);
658 goto out;
659 }
660 dev->power.qos->latency_req = req;
661
662 mutex_unlock(&dev_pm_qos_mtx);
663
664 ret = pm_qos_sysfs_add_latency(dev);
665 if (ret)
666 dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
667
668 out:
669 mutex_unlock(&dev_pm_qos_sysfs_mtx);
670 return ret;
671}
672EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
673
674static void __dev_pm_qos_hide_latency_limit(struct device *dev)
675{
676 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req)
677 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
678}
679
680
681
682
683
684void dev_pm_qos_hide_latency_limit(struct device *dev)
685{
686 mutex_lock(&dev_pm_qos_sysfs_mtx);
687
688 pm_qos_sysfs_remove_latency(dev);
689
690 mutex_lock(&dev_pm_qos_mtx);
691 __dev_pm_qos_hide_latency_limit(dev);
692 mutex_unlock(&dev_pm_qos_mtx);
693
694 mutex_unlock(&dev_pm_qos_sysfs_mtx);
695}
696EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
697
698
699
700
701
702
703int dev_pm_qos_expose_flags(struct device *dev, s32 val)
704{
705 struct dev_pm_qos_request *req;
706 int ret;
707
708 if (!device_is_registered(dev))
709 return -EINVAL;
710
711 req = kzalloc(sizeof(*req), GFP_KERNEL);
712 if (!req)
713 return -ENOMEM;
714
715 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
716 if (ret < 0) {
717 kfree(req);
718 return ret;
719 }
720
721 pm_runtime_get_sync(dev);
722 mutex_lock(&dev_pm_qos_sysfs_mtx);
723
724 mutex_lock(&dev_pm_qos_mtx);
725
726 if (IS_ERR_OR_NULL(dev->power.qos))
727 ret = -ENODEV;
728 else if (dev->power.qos->flags_req)
729 ret = -EEXIST;
730
731 if (ret < 0) {
732 __dev_pm_qos_remove_request(req);
733 kfree(req);
734 mutex_unlock(&dev_pm_qos_mtx);
735 goto out;
736 }
737 dev->power.qos->flags_req = req;
738
739 mutex_unlock(&dev_pm_qos_mtx);
740
741 ret = pm_qos_sysfs_add_flags(dev);
742 if (ret)
743 dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
744
745 out:
746 mutex_unlock(&dev_pm_qos_sysfs_mtx);
747 pm_runtime_put(dev);
748 return ret;
749}
750EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
751
752static void __dev_pm_qos_hide_flags(struct device *dev)
753{
754 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
755 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
756}
757
758
759
760
761
762void dev_pm_qos_hide_flags(struct device *dev)
763{
764 pm_runtime_get_sync(dev);
765 mutex_lock(&dev_pm_qos_sysfs_mtx);
766
767 pm_qos_sysfs_remove_flags(dev);
768
769 mutex_lock(&dev_pm_qos_mtx);
770 __dev_pm_qos_hide_flags(dev);
771 mutex_unlock(&dev_pm_qos_mtx);
772
773 mutex_unlock(&dev_pm_qos_sysfs_mtx);
774 pm_runtime_put(dev);
775}
776EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
777
778
779
780
781
782
783
784int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
785{
786 s32 value;
787 int ret;
788
789 pm_runtime_get_sync(dev);
790 mutex_lock(&dev_pm_qos_mtx);
791
792 if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
793 ret = -EINVAL;
794 goto out;
795 }
796
797 value = dev_pm_qos_requested_flags(dev);
798 if (set)
799 value |= mask;
800 else
801 value &= ~mask;
802
803 ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
804
805 out:
806 mutex_unlock(&dev_pm_qos_mtx);
807 pm_runtime_put(dev);
808 return ret;
809}
810
811
812
813
814
815s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
816{
817 s32 ret;
818
819 mutex_lock(&dev_pm_qos_mtx);
820 ret = IS_ERR_OR_NULL(dev->power.qos)
821 || !dev->power.qos->latency_tolerance_req ?
822 PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
823 dev->power.qos->latency_tolerance_req->data.pnode.prio;
824 mutex_unlock(&dev_pm_qos_mtx);
825 return ret;
826}
827
828
829
830
831
832
833int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
834{
835 int ret;
836
837 mutex_lock(&dev_pm_qos_mtx);
838
839 if (IS_ERR_OR_NULL(dev->power.qos)
840 || !dev->power.qos->latency_tolerance_req) {
841 struct dev_pm_qos_request *req;
842
843 if (val < 0) {
844 if (val == PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT)
845 ret = 0;
846 else
847 ret = -EINVAL;
848 goto out;
849 }
850 req = kzalloc(sizeof(*req), GFP_KERNEL);
851 if (!req) {
852 ret = -ENOMEM;
853 goto out;
854 }
855 ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
856 if (ret < 0) {
857 kfree(req);
858 goto out;
859 }
860 dev->power.qos->latency_tolerance_req = req;
861 } else {
862 if (val < 0) {
863 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
864 ret = 0;
865 } else {
866 ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
867 }
868 }
869
870 out:
871 mutex_unlock(&dev_pm_qos_mtx);
872 return ret;
873}
874EXPORT_SYMBOL_GPL(dev_pm_qos_update_user_latency_tolerance);
875
876
877
878
879
880int dev_pm_qos_expose_latency_tolerance(struct device *dev)
881{
882 int ret;
883
884 if (!dev->device_rh->power.set_latency_tolerance)
885 return -EINVAL;
886
887 mutex_lock(&dev_pm_qos_sysfs_mtx);
888 ret = pm_qos_sysfs_add_latency_tolerance(dev);
889 mutex_unlock(&dev_pm_qos_sysfs_mtx);
890
891 return ret;
892}
893EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance);
894
895
896
897
898
899void dev_pm_qos_hide_latency_tolerance(struct device *dev)
900{
901 mutex_lock(&dev_pm_qos_sysfs_mtx);
902 pm_qos_sysfs_remove_latency_tolerance(dev);
903 mutex_unlock(&dev_pm_qos_sysfs_mtx);
904
905
906 pm_runtime_get_sync(dev);
907 dev_pm_qos_update_user_latency_tolerance(dev,
908 PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT);
909 pm_runtime_put(dev);
910}
911EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance);
912#else
913static void __dev_pm_qos_hide_latency_limit(struct device *dev) {}
914static void __dev_pm_qos_hide_flags(struct device *dev) {}
915#endif
916