1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37#include <linux/pm_qos.h>
38#include <linux/spinlock.h>
39#include <linux/slab.h>
40#include <linux/device.h>
41#include <linux/mutex.h>
42#include <linux/export.h>
43#include <linux/pm_runtime.h>
44#include <linux/err.h>
45
46#include "power.h"
47
48static DEFINE_MUTEX(dev_pm_qos_mtx);
49static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
50
51static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
52
53
54
55
56
57
58
59
60enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
61{
62 struct dev_pm_qos *qos = dev->power.qos;
63 struct pm_qos_flags *pqf;
64 s32 val;
65
66 if (IS_ERR_OR_NULL(qos))
67 return PM_QOS_FLAGS_UNDEFINED;
68
69 pqf = &qos->flags;
70 if (list_empty(&pqf->list))
71 return PM_QOS_FLAGS_UNDEFINED;
72
73 val = pqf->effective_flags & mask;
74 if (val)
75 return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME;
76
77 return PM_QOS_FLAGS_NONE;
78}
79
80
81
82
83
84
85enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
86{
87 unsigned long irqflags;
88 enum pm_qos_flags_status ret;
89
90 spin_lock_irqsave(&dev->power.lock, irqflags);
91 ret = __dev_pm_qos_flags(dev, mask);
92 spin_unlock_irqrestore(&dev->power.lock, irqflags);
93
94 return ret;
95}
96EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
97
98
99
100
101
102
103
104s32 __dev_pm_qos_read_value(struct device *dev)
105{
106 return IS_ERR_OR_NULL(dev->power.qos) ?
107 0 : pm_qos_read_value(&dev->power.qos->latency);
108}
109
110
111
112
113
114s32 dev_pm_qos_read_value(struct device *dev)
115{
116 unsigned long flags;
117 s32 ret;
118
119 spin_lock_irqsave(&dev->power.lock, flags);
120 ret = __dev_pm_qos_read_value(dev);
121 spin_unlock_irqrestore(&dev->power.lock, flags);
122
123 return ret;
124}
125
126
127
128
129
130
131
132
133
134
135
136static int apply_constraint(struct dev_pm_qos_request *req,
137 enum pm_qos_req_action action, s32 value)
138{
139 struct dev_pm_qos *qos = req->dev->power.qos;
140 int ret;
141
142 switch(req->type) {
143 case DEV_PM_QOS_LATENCY:
144 ret = pm_qos_update_target(&qos->latency, &req->data.pnode,
145 action, value);
146 if (ret) {
147 value = pm_qos_read_value(&qos->latency);
148 blocking_notifier_call_chain(&dev_pm_notifiers,
149 (unsigned long)value,
150 req);
151 }
152 break;
153 case DEV_PM_QOS_FLAGS:
154 ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
155 action, value);
156 break;
157 default:
158 ret = -EINVAL;
159 }
160
161 return ret;
162}
163
164
165
166
167
168
169
170
171static int dev_pm_qos_constraints_allocate(struct device *dev)
172{
173 struct dev_pm_qos *qos;
174 struct pm_qos_constraints *c;
175 struct blocking_notifier_head *n;
176
177 qos = kzalloc(sizeof(*qos), GFP_KERNEL);
178 if (!qos)
179 return -ENOMEM;
180
181 n = kzalloc(sizeof(*n), GFP_KERNEL);
182 if (!n) {
183 kfree(qos);
184 return -ENOMEM;
185 }
186 BLOCKING_INIT_NOTIFIER_HEAD(n);
187
188 c = &qos->latency;
189 plist_head_init(&c->list);
190 c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
191 c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
192 c->type = PM_QOS_MIN;
193 c->notifiers = n;
194
195 INIT_LIST_HEAD(&qos->flags.list);
196
197 spin_lock_irq(&dev->power.lock);
198 dev->power.qos = qos;
199 spin_unlock_irq(&dev->power.lock);
200
201 return 0;
202}
203
204static void __dev_pm_qos_hide_latency_limit(struct device *dev);
205static void __dev_pm_qos_hide_flags(struct device *dev);
206
207
208
209
210
211
212
213void dev_pm_qos_constraints_destroy(struct device *dev)
214{
215 struct dev_pm_qos *qos;
216 struct dev_pm_qos_request *req, *tmp;
217 struct pm_qos_constraints *c;
218 struct pm_qos_flags *f;
219
220 mutex_lock(&dev_pm_qos_sysfs_mtx);
221
222
223
224
225
226 pm_qos_sysfs_remove_latency(dev);
227 pm_qos_sysfs_remove_flags(dev);
228
229 mutex_lock(&dev_pm_qos_mtx);
230
231 __dev_pm_qos_hide_latency_limit(dev);
232 __dev_pm_qos_hide_flags(dev);
233
234 qos = dev->power.qos;
235 if (!qos)
236 goto out;
237
238
239 c = &qos->latency;
240 plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
241
242
243
244
245 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
246 memset(req, 0, sizeof(*req));
247 }
248 f = &qos->flags;
249 list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
250 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
251 memset(req, 0, sizeof(*req));
252 }
253
254 spin_lock_irq(&dev->power.lock);
255 dev->power.qos = ERR_PTR(-ENODEV);
256 spin_unlock_irq(&dev->power.lock);
257
258 kfree(c->notifiers);
259 kfree(qos);
260
261 out:
262 mutex_unlock(&dev_pm_qos_mtx);
263
264 mutex_unlock(&dev_pm_qos_sysfs_mtx);
265}
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
290 enum dev_pm_qos_req_type type, s32 value)
291{
292 int ret = 0;
293
294 if (!dev || !req)
295 return -EINVAL;
296
297 if (WARN(dev_pm_qos_request_active(req),
298 "%s() called for already added request\n", __func__))
299 return -EINVAL;
300
301 mutex_lock(&dev_pm_qos_mtx);
302
303 if (IS_ERR(dev->power.qos))
304 ret = -ENODEV;
305 else if (!dev->power.qos)
306 ret = dev_pm_qos_constraints_allocate(dev);
307
308 if (!ret) {
309 req->dev = dev;
310 req->type = type;
311 ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
312 }
313
314 mutex_unlock(&dev_pm_qos_mtx);
315
316 return ret;
317}
318EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
319
320
321
322
323
324
325static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
326 s32 new_value)
327{
328 s32 curr_value;
329 int ret = 0;
330
331 if (!req)
332 return -EINVAL;
333
334 if (WARN(!dev_pm_qos_request_active(req),
335 "%s() called for unknown object\n", __func__))
336 return -EINVAL;
337
338 if (IS_ERR_OR_NULL(req->dev->power.qos))
339 return -ENODEV;
340
341 switch(req->type) {
342 case DEV_PM_QOS_LATENCY:
343 curr_value = req->data.pnode.prio;
344 break;
345 case DEV_PM_QOS_FLAGS:
346 curr_value = req->data.flr.flags;
347 break;
348 default:
349 return -EINVAL;
350 }
351
352 if (curr_value != new_value)
353 ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);
354
355 return ret;
356}
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
377{
378 int ret;
379
380 mutex_lock(&dev_pm_qos_mtx);
381 ret = __dev_pm_qos_update_request(req, new_value);
382 mutex_unlock(&dev_pm_qos_mtx);
383 return ret;
384}
385EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
386
387static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
388{
389 int ret;
390
391 if (!req)
392 return -EINVAL;
393
394 if (WARN(!dev_pm_qos_request_active(req),
395 "%s() called for unknown object\n", __func__))
396 return -EINVAL;
397
398 if (IS_ERR_OR_NULL(req->dev->power.qos))
399 return -ENODEV;
400
401 ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
402 memset(req, 0, sizeof(*req));
403 return ret;
404}
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
422{
423 int ret;
424
425 mutex_lock(&dev_pm_qos_mtx);
426 ret = __dev_pm_qos_remove_request(req);
427 mutex_unlock(&dev_pm_qos_mtx);
428 return ret;
429}
430EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
446{
447 int ret = 0;
448
449 mutex_lock(&dev_pm_qos_mtx);
450
451 if (IS_ERR(dev->power.qos))
452 ret = -ENODEV;
453 else if (!dev->power.qos)
454 ret = dev_pm_qos_constraints_allocate(dev);
455
456 if (!ret)
457 ret = blocking_notifier_chain_register(
458 dev->power.qos->latency.notifiers, notifier);
459
460 mutex_unlock(&dev_pm_qos_mtx);
461 return ret;
462}
463EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
464
465
466
467
468
469
470
471
472
473
474
475int dev_pm_qos_remove_notifier(struct device *dev,
476 struct notifier_block *notifier)
477{
478 int retval = 0;
479
480 mutex_lock(&dev_pm_qos_mtx);
481
482
483 if (!IS_ERR_OR_NULL(dev->power.qos))
484 retval = blocking_notifier_chain_unregister(
485 dev->power.qos->latency.notifiers,
486 notifier);
487
488 mutex_unlock(&dev_pm_qos_mtx);
489 return retval;
490}
491EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
492
493
494
495
496
497
498
499
500
501
502int dev_pm_qos_add_global_notifier(struct notifier_block *notifier)
503{
504 return blocking_notifier_chain_register(&dev_pm_notifiers, notifier);
505}
506EXPORT_SYMBOL_GPL(dev_pm_qos_add_global_notifier);
507
508
509
510
511
512
513
514
515
516
517int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier)
518{
519 return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier);
520}
521EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);
522
523
524
525
526
527
528
529int dev_pm_qos_add_ancestor_request(struct device *dev,
530 struct dev_pm_qos_request *req, s32 value)
531{
532 struct device *ancestor = dev->parent;
533 int ret = -ENODEV;
534
535 while (ancestor && !ancestor->power.ignore_children)
536 ancestor = ancestor->parent;
537
538 if (ancestor)
539 ret = dev_pm_qos_add_request(ancestor, req,
540 DEV_PM_QOS_LATENCY, value);
541
542 if (ret < 0)
543 req->dev = NULL;
544
545 return ret;
546}
547EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
548
549#ifdef CONFIG_PM_RUNTIME
550static void __dev_pm_qos_drop_user_request(struct device *dev,
551 enum dev_pm_qos_req_type type)
552{
553 struct dev_pm_qos_request *req = NULL;
554
555 switch(type) {
556 case DEV_PM_QOS_LATENCY:
557 req = dev->power.qos->latency_req;
558 dev->power.qos->latency_req = NULL;
559 break;
560 case DEV_PM_QOS_FLAGS:
561 req = dev->power.qos->flags_req;
562 dev->power.qos->flags_req = NULL;
563 break;
564 }
565 __dev_pm_qos_remove_request(req);
566 kfree(req);
567}
568
569static void dev_pm_qos_drop_user_request(struct device *dev,
570 enum dev_pm_qos_req_type type)
571{
572 mutex_lock(&dev_pm_qos_mtx);
573 __dev_pm_qos_drop_user_request(dev, type);
574 mutex_unlock(&dev_pm_qos_mtx);
575}
576
577
578
579
580
581
582int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
583{
584 struct dev_pm_qos_request *req;
585 int ret;
586
587 if (!device_is_registered(dev) || value < 0)
588 return -EINVAL;
589
590 req = kzalloc(sizeof(*req), GFP_KERNEL);
591 if (!req)
592 return -ENOMEM;
593
594 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value);
595 if (ret < 0) {
596 kfree(req);
597 return ret;
598 }
599
600 mutex_lock(&dev_pm_qos_sysfs_mtx);
601
602 mutex_lock(&dev_pm_qos_mtx);
603
604 if (IS_ERR_OR_NULL(dev->power.qos))
605 ret = -ENODEV;
606 else if (dev->power.qos->latency_req)
607 ret = -EEXIST;
608
609 if (ret < 0) {
610 __dev_pm_qos_remove_request(req);
611 kfree(req);
612 mutex_unlock(&dev_pm_qos_mtx);
613 goto out;
614 }
615 dev->power.qos->latency_req = req;
616
617 mutex_unlock(&dev_pm_qos_mtx);
618
619 ret = pm_qos_sysfs_add_latency(dev);
620 if (ret)
621 dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
622
623 out:
624 mutex_unlock(&dev_pm_qos_sysfs_mtx);
625 return ret;
626}
627EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
628
629static void __dev_pm_qos_hide_latency_limit(struct device *dev)
630{
631 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req)
632 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
633}
634
635
636
637
638
639void dev_pm_qos_hide_latency_limit(struct device *dev)
640{
641 mutex_lock(&dev_pm_qos_sysfs_mtx);
642
643 pm_qos_sysfs_remove_latency(dev);
644
645 mutex_lock(&dev_pm_qos_mtx);
646 __dev_pm_qos_hide_latency_limit(dev);
647 mutex_unlock(&dev_pm_qos_mtx);
648
649 mutex_unlock(&dev_pm_qos_sysfs_mtx);
650}
651EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
652
653
654
655
656
657
658int dev_pm_qos_expose_flags(struct device *dev, s32 val)
659{
660 struct dev_pm_qos_request *req;
661 int ret;
662
663 if (!device_is_registered(dev))
664 return -EINVAL;
665
666 req = kzalloc(sizeof(*req), GFP_KERNEL);
667 if (!req)
668 return -ENOMEM;
669
670 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
671 if (ret < 0) {
672 kfree(req);
673 return ret;
674 }
675
676 pm_runtime_get_sync(dev);
677 mutex_lock(&dev_pm_qos_sysfs_mtx);
678
679 mutex_lock(&dev_pm_qos_mtx);
680
681 if (IS_ERR_OR_NULL(dev->power.qos))
682 ret = -ENODEV;
683 else if (dev->power.qos->flags_req)
684 ret = -EEXIST;
685
686 if (ret < 0) {
687 __dev_pm_qos_remove_request(req);
688 kfree(req);
689 mutex_unlock(&dev_pm_qos_mtx);
690 goto out;
691 }
692 dev->power.qos->flags_req = req;
693
694 mutex_unlock(&dev_pm_qos_mtx);
695
696 ret = pm_qos_sysfs_add_flags(dev);
697 if (ret)
698 dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
699
700 out:
701 mutex_unlock(&dev_pm_qos_sysfs_mtx);
702 pm_runtime_put(dev);
703 return ret;
704}
705EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
706
707static void __dev_pm_qos_hide_flags(struct device *dev)
708{
709 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
710 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
711}
712
713
714
715
716
717void dev_pm_qos_hide_flags(struct device *dev)
718{
719 pm_runtime_get_sync(dev);
720 mutex_lock(&dev_pm_qos_sysfs_mtx);
721
722 pm_qos_sysfs_remove_flags(dev);
723
724 mutex_lock(&dev_pm_qos_mtx);
725 __dev_pm_qos_hide_flags(dev);
726 mutex_unlock(&dev_pm_qos_mtx);
727
728 mutex_unlock(&dev_pm_qos_sysfs_mtx);
729 pm_runtime_put(dev);
730}
731EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
732
733
734
735
736
737
738
739int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
740{
741 s32 value;
742 int ret;
743
744 pm_runtime_get_sync(dev);
745 mutex_lock(&dev_pm_qos_mtx);
746
747 if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
748 ret = -EINVAL;
749 goto out;
750 }
751
752 value = dev_pm_qos_requested_flags(dev);
753 if (set)
754 value |= mask;
755 else
756 value &= ~mask;
757
758 ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
759
760 out:
761 mutex_unlock(&dev_pm_qos_mtx);
762 pm_runtime_put(dev);
763 return ret;
764}
765#else
766static void __dev_pm_qos_hide_latency_limit(struct device *dev) {}
767static void __dev_pm_qos_hide_flags(struct device *dev) {}
768#endif
769