1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/module.h>
14#include <linux/thermal.h>
15#include <linux/cpufreq.h>
16#include <linux/err.h>
17#include <linux/idr.h>
18#include <linux/pm_opp.h>
19#include <linux/slab.h>
20#include <linux/cpu.h>
21#include <linux/cpu_cooling.h>
22
23#include <trace/events/thermal.h>
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47struct freq_table {
48 u32 frequency;
49 u32 power;
50};
51
52
53
54
55
56
57struct time_in_idle {
58 u64 time;
59 u64 timestamp;
60};
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83struct cpufreq_cooling_device {
84 int id;
85 u32 last_load;
86 unsigned int cpufreq_state;
87 unsigned int clipped_freq;
88 unsigned int max_level;
89 struct freq_table *freq_table;
90 struct cpufreq_policy *policy;
91 struct list_head node;
92 struct time_in_idle *idle_time;
93};
94
95static DEFINE_IDA(cpufreq_ida);
96static DEFINE_MUTEX(cooling_list_lock);
97static LIST_HEAD(cpufreq_cdev_list);
98
99
100
101
102
103
104
105
106
107
108static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_cdev,
109 unsigned int freq)
110{
111 struct freq_table *freq_table = cpufreq_cdev->freq_table;
112 unsigned long level;
113
114 for (level = 1; level <= cpufreq_cdev->max_level; level++)
115 if (freq > freq_table[level].frequency)
116 break;
117
118 return level - 1;
119}
120
121
122
123
124
125
126
127
128
129
130
131
132
133static int cpufreq_thermal_notifier(struct notifier_block *nb,
134 unsigned long event, void *data)
135{
136 struct cpufreq_policy *policy = data;
137 unsigned long clipped_freq;
138 struct cpufreq_cooling_device *cpufreq_cdev;
139
140 if (event != CPUFREQ_ADJUST)
141 return NOTIFY_DONE;
142
143 mutex_lock(&cooling_list_lock);
144 list_for_each_entry(cpufreq_cdev, &cpufreq_cdev_list, node) {
145
146
147
148
149 if (policy->cpu != cpufreq_cdev->policy->cpu)
150 continue;
151
152
153
154
155
156
157
158
159
160
161
162
163 clipped_freq = cpufreq_cdev->clipped_freq;
164
165 if (policy->max > clipped_freq)
166 cpufreq_verify_within_limits(policy, 0, clipped_freq);
167 break;
168 }
169 mutex_unlock(&cooling_list_lock);
170
171 return NOTIFY_OK;
172}
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187static int update_freq_table(struct cpufreq_cooling_device *cpufreq_cdev,
188 u32 capacitance)
189{
190 struct freq_table *freq_table = cpufreq_cdev->freq_table;
191 struct dev_pm_opp *opp;
192 struct device *dev = NULL;
193 int num_opps = 0, cpu = cpufreq_cdev->policy->cpu, i;
194
195 dev = get_cpu_device(cpu);
196 if (unlikely(!dev)) {
197 pr_warn("No cpu device for cpu %d\n", cpu);
198 return -ENODEV;
199 }
200
201 num_opps = dev_pm_opp_get_opp_count(dev);
202 if (num_opps < 0)
203 return num_opps;
204
205
206
207
208
209 if (num_opps != cpufreq_cdev->max_level + 1) {
210 dev_warn(dev, "Number of OPPs not matching with max_levels\n");
211 return -EINVAL;
212 }
213
214 for (i = 0; i <= cpufreq_cdev->max_level; i++) {
215 unsigned long freq = freq_table[i].frequency * 1000;
216 u32 freq_mhz = freq_table[i].frequency / 1000;
217 u64 power;
218 u32 voltage_mv;
219
220
221
222
223
224 opp = dev_pm_opp_find_freq_ceil(dev, &freq);
225 if (IS_ERR(opp)) {
226 dev_err(dev, "failed to get opp for %lu frequency\n",
227 freq);
228 return -EINVAL;
229 }
230
231 voltage_mv = dev_pm_opp_get_voltage(opp) / 1000;
232 dev_pm_opp_put(opp);
233
234
235
236
237
238 power = (u64)capacitance * freq_mhz * voltage_mv * voltage_mv;
239 do_div(power, 1000000000);
240
241
242 freq_table[i].power = power;
243 }
244
245 return 0;
246}
247
248static u32 cpu_freq_to_power(struct cpufreq_cooling_device *cpufreq_cdev,
249 u32 freq)
250{
251 int i;
252 struct freq_table *freq_table = cpufreq_cdev->freq_table;
253
254 for (i = 1; i <= cpufreq_cdev->max_level; i++)
255 if (freq > freq_table[i].frequency)
256 break;
257
258 return freq_table[i - 1].power;
259}
260
261static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev,
262 u32 power)
263{
264 int i;
265 struct freq_table *freq_table = cpufreq_cdev->freq_table;
266
267 for (i = 1; i <= cpufreq_cdev->max_level; i++)
268 if (power > freq_table[i].power)
269 break;
270
271 return freq_table[i - 1].frequency;
272}
273
274
275
276
277
278
279
280
281
282
283static u32 get_load(struct cpufreq_cooling_device *cpufreq_cdev, int cpu,
284 int cpu_idx)
285{
286 u32 load;
287 u64 now, now_idle, delta_time, delta_idle;
288 struct time_in_idle *idle_time = &cpufreq_cdev->idle_time[cpu_idx];
289
290 now_idle = get_cpu_idle_time(cpu, &now, 0);
291 delta_idle = now_idle - idle_time->time;
292 delta_time = now - idle_time->timestamp;
293
294 if (delta_time <= delta_idle)
295 load = 0;
296 else
297 load = div64_u64(100 * (delta_time - delta_idle), delta_time);
298
299 idle_time->time = now_idle;
300 idle_time->timestamp = now;
301
302 return load;
303}
304
305
306
307
308
309
310
311
312
313static u32 get_dynamic_power(struct cpufreq_cooling_device *cpufreq_cdev,
314 unsigned long freq)
315{
316 u32 raw_cpu_power;
317
318 raw_cpu_power = cpu_freq_to_power(cpufreq_cdev, freq);
319 return (raw_cpu_power * cpufreq_cdev->last_load) / 100;
320}
321
322
323
324
325
326
327
328
329
330
331
332
333
334static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
335 unsigned long *state)
336{
337 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
338
339 *state = cpufreq_cdev->max_level;
340 return 0;
341}
342
343
344
345
346
347
348
349
350
351
352
353static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev,
354 unsigned long *state)
355{
356 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
357
358 *state = cpufreq_cdev->cpufreq_state;
359
360 return 0;
361}
362
363
364
365
366
367
368
369
370
371
372
373static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
374 unsigned long state)
375{
376 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
377 unsigned int clip_freq;
378
379
380 if (WARN_ON(state > cpufreq_cdev->max_level))
381 return -EINVAL;
382
383
384 if (cpufreq_cdev->cpufreq_state == state)
385 return 0;
386
387 clip_freq = cpufreq_cdev->freq_table[state].frequency;
388 cpufreq_cdev->cpufreq_state = state;
389 cpufreq_cdev->clipped_freq = clip_freq;
390
391 cpufreq_update_policy(cpufreq_cdev->policy->cpu);
392
393 return 0;
394}
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
420 struct thermal_zone_device *tz,
421 u32 *power)
422{
423 unsigned long freq;
424 int i = 0, cpu;
425 u32 total_load = 0;
426 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
427 struct cpufreq_policy *policy = cpufreq_cdev->policy;
428 u32 *load_cpu = NULL;
429
430 freq = cpufreq_quick_get(policy->cpu);
431
432 if (trace_thermal_power_cpu_get_power_enabled()) {
433 u32 ncpus = cpumask_weight(policy->related_cpus);
434
435 load_cpu = kcalloc(ncpus, sizeof(*load_cpu), GFP_KERNEL);
436 }
437
438 for_each_cpu(cpu, policy->related_cpus) {
439 u32 load;
440
441 if (cpu_online(cpu))
442 load = get_load(cpufreq_cdev, cpu, i);
443 else
444 load = 0;
445
446 total_load += load;
447 if (load_cpu)
448 load_cpu[i] = load;
449
450 i++;
451 }
452
453 cpufreq_cdev->last_load = total_load;
454
455 *power = get_dynamic_power(cpufreq_cdev, freq);
456
457 if (load_cpu) {
458 trace_thermal_power_cpu_get_power(policy->related_cpus, freq,
459 load_cpu, i, *power);
460
461 kfree(load_cpu);
462 }
463
464 return 0;
465}
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482static int cpufreq_state2power(struct thermal_cooling_device *cdev,
483 struct thermal_zone_device *tz,
484 unsigned long state, u32 *power)
485{
486 unsigned int freq, num_cpus;
487 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
488
489
490 if (WARN_ON(state > cpufreq_cdev->max_level))
491 return -EINVAL;
492
493 num_cpus = cpumask_weight(cpufreq_cdev->policy->cpus);
494
495 freq = cpufreq_cdev->freq_table[state].frequency;
496 *power = cpu_freq_to_power(cpufreq_cdev, freq) * num_cpus;
497
498 return 0;
499}
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521static int cpufreq_power2state(struct thermal_cooling_device *cdev,
522 struct thermal_zone_device *tz, u32 power,
523 unsigned long *state)
524{
525 unsigned int target_freq;
526 u32 last_load, normalised_power;
527 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
528 struct cpufreq_policy *policy = cpufreq_cdev->policy;
529
530 last_load = cpufreq_cdev->last_load ?: 1;
531 normalised_power = (power * 100) / last_load;
532 target_freq = cpu_power_to_freq(cpufreq_cdev, normalised_power);
533
534 *state = get_level(cpufreq_cdev, target_freq);
535 trace_thermal_power_cpu_limit(policy->related_cpus, target_freq, *state,
536 power);
537 return 0;
538}
539
540
541
542static struct thermal_cooling_device_ops cpufreq_cooling_ops = {
543 .get_max_state = cpufreq_get_max_state,
544 .get_cur_state = cpufreq_get_cur_state,
545 .set_cur_state = cpufreq_set_cur_state,
546};
547
548static struct thermal_cooling_device_ops cpufreq_power_cooling_ops = {
549 .get_max_state = cpufreq_get_max_state,
550 .get_cur_state = cpufreq_get_cur_state,
551 .set_cur_state = cpufreq_set_cur_state,
552 .get_requested_power = cpufreq_get_requested_power,
553 .state2power = cpufreq_state2power,
554 .power2state = cpufreq_power2state,
555};
556
557
558static struct notifier_block thermal_cpufreq_notifier_block = {
559 .notifier_call = cpufreq_thermal_notifier,
560};
561
562static unsigned int find_next_max(struct cpufreq_frequency_table *table,
563 unsigned int prev_max)
564{
565 struct cpufreq_frequency_table *pos;
566 unsigned int max = 0;
567
568 cpufreq_for_each_valid_entry(pos, table) {
569 if (pos->frequency > max && pos->frequency < prev_max)
570 max = pos->frequency;
571 }
572
573 return max;
574}
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591static struct thermal_cooling_device *
592__cpufreq_cooling_register(struct device_node *np,
593 struct cpufreq_policy *policy, u32 capacitance)
594{
595 struct thermal_cooling_device *cdev;
596 struct cpufreq_cooling_device *cpufreq_cdev;
597 char dev_name[THERMAL_NAME_LENGTH];
598 unsigned int freq, i, num_cpus;
599 int ret;
600 struct thermal_cooling_device_ops *cooling_ops;
601 bool first;
602
603 if (IS_ERR_OR_NULL(policy)) {
604 pr_err("%s: cpufreq policy isn't valid: %p\n", __func__, policy);
605 return ERR_PTR(-EINVAL);
606 }
607
608 i = cpufreq_table_count_valid_entries(policy);
609 if (!i) {
610 pr_debug("%s: CPUFreq table not found or has no valid entries\n",
611 __func__);
612 return ERR_PTR(-ENODEV);
613 }
614
615 cpufreq_cdev = kzalloc(sizeof(*cpufreq_cdev), GFP_KERNEL);
616 if (!cpufreq_cdev)
617 return ERR_PTR(-ENOMEM);
618
619 cpufreq_cdev->policy = policy;
620 num_cpus = cpumask_weight(policy->related_cpus);
621 cpufreq_cdev->idle_time = kcalloc(num_cpus,
622 sizeof(*cpufreq_cdev->idle_time),
623 GFP_KERNEL);
624 if (!cpufreq_cdev->idle_time) {
625 cdev = ERR_PTR(-ENOMEM);
626 goto free_cdev;
627 }
628
629
630 cpufreq_cdev->max_level = i - 1;
631
632 cpufreq_cdev->freq_table = kmalloc_array(i,
633 sizeof(*cpufreq_cdev->freq_table),
634 GFP_KERNEL);
635 if (!cpufreq_cdev->freq_table) {
636 cdev = ERR_PTR(-ENOMEM);
637 goto free_idle_time;
638 }
639
640 ret = ida_simple_get(&cpufreq_ida, 0, 0, GFP_KERNEL);
641 if (ret < 0) {
642 cdev = ERR_PTR(ret);
643 goto free_table;
644 }
645 cpufreq_cdev->id = ret;
646
647 snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
648 cpufreq_cdev->id);
649
650
651 for (i = 0, freq = -1; i <= cpufreq_cdev->max_level; i++) {
652 freq = find_next_max(policy->freq_table, freq);
653 cpufreq_cdev->freq_table[i].frequency = freq;
654
655
656 if (!freq)
657 pr_warn("%s: table has duplicate entries\n", __func__);
658 else
659 pr_debug("%s: freq:%u KHz\n", __func__, freq);
660 }
661
662 if (capacitance) {
663 ret = update_freq_table(cpufreq_cdev, capacitance);
664 if (ret) {
665 cdev = ERR_PTR(ret);
666 goto remove_ida;
667 }
668
669 cooling_ops = &cpufreq_power_cooling_ops;
670 } else {
671 cooling_ops = &cpufreq_cooling_ops;
672 }
673
674 cdev = thermal_of_cooling_device_register(np, dev_name, cpufreq_cdev,
675 cooling_ops);
676 if (IS_ERR(cdev))
677 goto remove_ida;
678
679 cpufreq_cdev->clipped_freq = cpufreq_cdev->freq_table[0].frequency;
680
681 mutex_lock(&cooling_list_lock);
682
683 first = list_empty(&cpufreq_cdev_list);
684 list_add(&cpufreq_cdev->node, &cpufreq_cdev_list);
685 mutex_unlock(&cooling_list_lock);
686
687 if (first)
688 cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
689 CPUFREQ_POLICY_NOTIFIER);
690
691 return cdev;
692
693remove_ida:
694 ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
695free_table:
696 kfree(cpufreq_cdev->freq_table);
697free_idle_time:
698 kfree(cpufreq_cdev->idle_time);
699free_cdev:
700 kfree(cpufreq_cdev);
701 return cdev;
702}
703
704
705
706
707
708
709
710
711
712
713
714
715struct thermal_cooling_device *
716cpufreq_cooling_register(struct cpufreq_policy *policy)
717{
718 return __cpufreq_cooling_register(NULL, policy, 0);
719}
720EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741struct thermal_cooling_device *
742of_cpufreq_cooling_register(struct cpufreq_policy *policy)
743{
744 struct device_node *np = of_get_cpu_node(policy->cpu, NULL);
745 struct thermal_cooling_device *cdev = NULL;
746 u32 capacitance = 0;
747
748 if (!np) {
749 pr_err("cpu_cooling: OF node not available for cpu%d\n",
750 policy->cpu);
751 return NULL;
752 }
753
754 if (of_find_property(np, "#cooling-cells", NULL)) {
755 of_property_read_u32(np, "dynamic-power-coefficient",
756 &capacitance);
757
758 cdev = __cpufreq_cooling_register(np, policy, capacitance);
759 if (IS_ERR(cdev)) {
760 pr_err("cpu_cooling: cpu%d failed to register as cooling device: %ld\n",
761 policy->cpu, PTR_ERR(cdev));
762 cdev = NULL;
763 }
764 }
765
766 of_node_put(np);
767 return cdev;
768}
769EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
770
771
772
773
774
775
776
777void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
778{
779 struct cpufreq_cooling_device *cpufreq_cdev;
780 bool last;
781
782 if (!cdev)
783 return;
784
785 cpufreq_cdev = cdev->devdata;
786
787 mutex_lock(&cooling_list_lock);
788 list_del(&cpufreq_cdev->node);
789
790 last = list_empty(&cpufreq_cdev_list);
791 mutex_unlock(&cooling_list_lock);
792
793 if (last)
794 cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
795 CPUFREQ_POLICY_NOTIFIER);
796
797 thermal_cooling_device_unregister(cdev);
798 ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
799 kfree(cpufreq_cdev->idle_time);
800 kfree(cpufreq_cdev->freq_table);
801 kfree(cpufreq_cdev);
802}
803EXPORT_SYMBOL_GPL(cpufreq_cooling_unregister);
804