1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/cpu.h>
14#include <linux/cpufreq.h>
15#include <linux/cpu_cooling.h>
16#include <linux/energy_model.h>
17#include <linux/err.h>
18#include <linux/export.h>
19#include <linux/idr.h>
20#include <linux/pm_opp.h>
21#include <linux/pm_qos.h>
22#include <linux/slab.h>
23#include <linux/thermal.h>
24
25#include <trace/events/thermal.h>
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46struct time_in_idle {
47 u64 time;
48 u64 timestamp;
49};
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71struct cpufreq_cooling_device {
72 int id;
73 u32 last_load;
74 unsigned int cpufreq_state;
75 unsigned int max_level;
76 struct em_perf_domain *em;
77 struct cpufreq_policy *policy;
78 struct list_head node;
79#ifndef CONFIG_SMP
80 struct time_in_idle *idle_time;
81#endif
82 struct freq_qos_request qos_req;
83};
84
85static DEFINE_IDA(cpufreq_ida);
86static DEFINE_MUTEX(cooling_list_lock);
87static LIST_HEAD(cpufreq_cdev_list);
88
89#ifdef CONFIG_THERMAL_GOV_POWER_ALLOCATOR
90
91
92
93
94
95
96
97static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_cdev,
98 unsigned int freq)
99{
100 int i;
101
102 for (i = cpufreq_cdev->max_level - 1; i >= 0; i--) {
103 if (freq > cpufreq_cdev->em->table[i].frequency)
104 break;
105 }
106
107 return cpufreq_cdev->max_level - i - 1;
108}
109
110static u32 cpu_freq_to_power(struct cpufreq_cooling_device *cpufreq_cdev,
111 u32 freq)
112{
113 int i;
114
115 for (i = cpufreq_cdev->max_level - 1; i >= 0; i--) {
116 if (freq > cpufreq_cdev->em->table[i].frequency)
117 break;
118 }
119
120 return cpufreq_cdev->em->table[i + 1].power;
121}
122
123static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev,
124 u32 power)
125{
126 int i;
127
128 for (i = cpufreq_cdev->max_level; i >= 0; i--) {
129 if (power >= cpufreq_cdev->em->table[i].power)
130 break;
131 }
132
133 return cpufreq_cdev->em->table[i].frequency;
134}
135
136
137
138
139
140
141
142
143
144
145#ifdef CONFIG_SMP
146static u32 get_load(struct cpufreq_cooling_device *cpufreq_cdev, int cpu,
147 int cpu_idx)
148{
149 unsigned long max = arch_scale_cpu_capacity(cpu);
150 unsigned long util;
151
152 util = sched_cpu_util(cpu, max);
153 return (util * 100) / max;
154}
155#else
156static u32 get_load(struct cpufreq_cooling_device *cpufreq_cdev, int cpu,
157 int cpu_idx)
158{
159 u32 load;
160 u64 now, now_idle, delta_time, delta_idle;
161 struct time_in_idle *idle_time = &cpufreq_cdev->idle_time[cpu_idx];
162
163 now_idle = get_cpu_idle_time(cpu, &now, 0);
164 delta_idle = now_idle - idle_time->time;
165 delta_time = now - idle_time->timestamp;
166
167 if (delta_time <= delta_idle)
168 load = 0;
169 else
170 load = div64_u64(100 * (delta_time - delta_idle), delta_time);
171
172 idle_time->time = now_idle;
173 idle_time->timestamp = now;
174
175 return load;
176}
177#endif
178
179
180
181
182
183
184
185
186
187static u32 get_dynamic_power(struct cpufreq_cooling_device *cpufreq_cdev,
188 unsigned long freq)
189{
190 u32 raw_cpu_power;
191
192 raw_cpu_power = cpu_freq_to_power(cpufreq_cdev, freq);
193 return (raw_cpu_power * cpufreq_cdev->last_load) / 100;
194}
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
219 u32 *power)
220{
221 unsigned long freq;
222 int i = 0, cpu;
223 u32 total_load = 0;
224 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
225 struct cpufreq_policy *policy = cpufreq_cdev->policy;
226 u32 *load_cpu = NULL;
227
228 freq = cpufreq_quick_get(policy->cpu);
229
230 if (trace_thermal_power_cpu_get_power_enabled()) {
231 u32 ncpus = cpumask_weight(policy->related_cpus);
232
233 load_cpu = kcalloc(ncpus, sizeof(*load_cpu), GFP_KERNEL);
234 }
235
236 for_each_cpu(cpu, policy->related_cpus) {
237 u32 load;
238
239 if (cpu_online(cpu))
240 load = get_load(cpufreq_cdev, cpu, i);
241 else
242 load = 0;
243
244 total_load += load;
245 if (load_cpu)
246 load_cpu[i] = load;
247
248 i++;
249 }
250
251 cpufreq_cdev->last_load = total_load;
252
253 *power = get_dynamic_power(cpufreq_cdev, freq);
254
255 if (load_cpu) {
256 trace_thermal_power_cpu_get_power(policy->related_cpus, freq,
257 load_cpu, i, *power);
258
259 kfree(load_cpu);
260 }
261
262 return 0;
263}
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279static int cpufreq_state2power(struct thermal_cooling_device *cdev,
280 unsigned long state, u32 *power)
281{
282 unsigned int freq, num_cpus, idx;
283 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
284
285
286 if (state > cpufreq_cdev->max_level)
287 return -EINVAL;
288
289 num_cpus = cpumask_weight(cpufreq_cdev->policy->cpus);
290
291 idx = cpufreq_cdev->max_level - state;
292 freq = cpufreq_cdev->em->table[idx].frequency;
293 *power = cpu_freq_to_power(cpufreq_cdev, freq) * num_cpus;
294
295 return 0;
296}
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317static int cpufreq_power2state(struct thermal_cooling_device *cdev,
318 u32 power, unsigned long *state)
319{
320 unsigned int target_freq;
321 u32 last_load, normalised_power;
322 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
323 struct cpufreq_policy *policy = cpufreq_cdev->policy;
324
325 last_load = cpufreq_cdev->last_load ?: 1;
326 normalised_power = (power * 100) / last_load;
327 target_freq = cpu_power_to_freq(cpufreq_cdev, normalised_power);
328
329 *state = get_level(cpufreq_cdev, target_freq);
330 trace_thermal_power_cpu_limit(policy->related_cpus, target_freq, *state,
331 power);
332 return 0;
333}
334
335static inline bool em_is_sane(struct cpufreq_cooling_device *cpufreq_cdev,
336 struct em_perf_domain *em) {
337 struct cpufreq_policy *policy;
338 unsigned int nr_levels;
339
340 if (!em)
341 return false;
342
343 policy = cpufreq_cdev->policy;
344 if (!cpumask_equal(policy->related_cpus, em_span_cpus(em))) {
345 pr_err("The span of pd %*pbl is misaligned with cpufreq policy %*pbl\n",
346 cpumask_pr_args(em_span_cpus(em)),
347 cpumask_pr_args(policy->related_cpus));
348 return false;
349 }
350
351 nr_levels = cpufreq_cdev->max_level + 1;
352 if (em_pd_nr_perf_states(em) != nr_levels) {
353 pr_err("The number of performance states in pd %*pbl (%u) doesn't match the number of cooling levels (%u)\n",
354 cpumask_pr_args(em_span_cpus(em)),
355 em_pd_nr_perf_states(em), nr_levels);
356 return false;
357 }
358
359 return true;
360}
361#endif
362
363#ifdef CONFIG_SMP
364static inline int allocate_idle_time(struct cpufreq_cooling_device *cpufreq_cdev)
365{
366 return 0;
367}
368
369static inline void free_idle_time(struct cpufreq_cooling_device *cpufreq_cdev)
370{
371}
372#else
373static int allocate_idle_time(struct cpufreq_cooling_device *cpufreq_cdev)
374{
375 unsigned int num_cpus = cpumask_weight(cpufreq_cdev->policy->related_cpus);
376
377 cpufreq_cdev->idle_time = kcalloc(num_cpus,
378 sizeof(*cpufreq_cdev->idle_time),
379 GFP_KERNEL);
380 if (!cpufreq_cdev->idle_time)
381 return -ENOMEM;
382
383 return 0;
384}
385
386static void free_idle_time(struct cpufreq_cooling_device *cpufreq_cdev)
387{
388 kfree(cpufreq_cdev->idle_time);
389 cpufreq_cdev->idle_time = NULL;
390}
391#endif
392
393static unsigned int get_state_freq(struct cpufreq_cooling_device *cpufreq_cdev,
394 unsigned long state)
395{
396 struct cpufreq_policy *policy;
397 unsigned long idx;
398
399#ifdef CONFIG_THERMAL_GOV_POWER_ALLOCATOR
400
401 if (cpufreq_cdev->em) {
402 idx = cpufreq_cdev->max_level - state;
403 return cpufreq_cdev->em->table[idx].frequency;
404 }
405#endif
406
407
408 policy = cpufreq_cdev->policy;
409 if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
410 idx = cpufreq_cdev->max_level - state;
411 else
412 idx = state;
413
414 return policy->freq_table[idx].frequency;
415}
416
417
418
419
420
421
422
423
424
425
426
427
428
429static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
430 unsigned long *state)
431{
432 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
433
434 *state = cpufreq_cdev->max_level;
435 return 0;
436}
437
438
439
440
441
442
443
444
445
446
447
448static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev,
449 unsigned long *state)
450{
451 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
452
453 *state = cpufreq_cdev->cpufreq_state;
454
455 return 0;
456}
457
458
459
460
461
462
463
464
465
466
467
468static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
469 unsigned long state)
470{
471 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
472 struct cpumask *cpus;
473 unsigned int frequency;
474 unsigned long max_capacity, capacity;
475 int ret;
476
477
478 if (state > cpufreq_cdev->max_level)
479 return -EINVAL;
480
481
482 if (cpufreq_cdev->cpufreq_state == state)
483 return 0;
484
485 frequency = get_state_freq(cpufreq_cdev, state);
486
487 ret = freq_qos_update_request(&cpufreq_cdev->qos_req, frequency);
488 if (ret >= 0) {
489 cpufreq_cdev->cpufreq_state = state;
490 cpus = cpufreq_cdev->policy->cpus;
491 max_capacity = arch_scale_cpu_capacity(cpumask_first(cpus));
492 capacity = frequency * max_capacity;
493 capacity /= cpufreq_cdev->policy->cpuinfo.max_freq;
494 arch_set_thermal_pressure(cpus, max_capacity - capacity);
495 ret = 0;
496 }
497
498 return ret;
499}
500
501
502
503static struct thermal_cooling_device_ops cpufreq_cooling_ops = {
504 .get_max_state = cpufreq_get_max_state,
505 .get_cur_state = cpufreq_get_cur_state,
506 .set_cur_state = cpufreq_set_cur_state,
507};
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524static struct thermal_cooling_device *
525__cpufreq_cooling_register(struct device_node *np,
526 struct cpufreq_policy *policy,
527 struct em_perf_domain *em)
528{
529 struct thermal_cooling_device *cdev;
530 struct cpufreq_cooling_device *cpufreq_cdev;
531 char dev_name[THERMAL_NAME_LENGTH];
532 unsigned int i;
533 struct device *dev;
534 int ret;
535 struct thermal_cooling_device_ops *cooling_ops;
536
537 dev = get_cpu_device(policy->cpu);
538 if (unlikely(!dev)) {
539 pr_warn("No cpu device for cpu %d\n", policy->cpu);
540 return ERR_PTR(-ENODEV);
541 }
542
543 if (IS_ERR_OR_NULL(policy)) {
544 pr_err("%s: cpufreq policy isn't valid: %p\n", __func__, policy);
545 return ERR_PTR(-EINVAL);
546 }
547
548 i = cpufreq_table_count_valid_entries(policy);
549 if (!i) {
550 pr_debug("%s: CPUFreq table not found or has no valid entries\n",
551 __func__);
552 return ERR_PTR(-ENODEV);
553 }
554
555 cpufreq_cdev = kzalloc(sizeof(*cpufreq_cdev), GFP_KERNEL);
556 if (!cpufreq_cdev)
557 return ERR_PTR(-ENOMEM);
558
559 cpufreq_cdev->policy = policy;
560
561 ret = allocate_idle_time(cpufreq_cdev);
562 if (ret) {
563 cdev = ERR_PTR(ret);
564 goto free_cdev;
565 }
566
567
568 cpufreq_cdev->max_level = i - 1;
569
570 ret = ida_simple_get(&cpufreq_ida, 0, 0, GFP_KERNEL);
571 if (ret < 0) {
572 cdev = ERR_PTR(ret);
573 goto free_idle_time;
574 }
575 cpufreq_cdev->id = ret;
576
577 snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
578 cpufreq_cdev->id);
579
580 cooling_ops = &cpufreq_cooling_ops;
581
582#ifdef CONFIG_THERMAL_GOV_POWER_ALLOCATOR
583 if (em_is_sane(cpufreq_cdev, em)) {
584 cpufreq_cdev->em = em;
585 cooling_ops->get_requested_power = cpufreq_get_requested_power;
586 cooling_ops->state2power = cpufreq_state2power;
587 cooling_ops->power2state = cpufreq_power2state;
588 } else
589#endif
590 if (policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED) {
591 pr_err("%s: unsorted frequency tables are not supported\n",
592 __func__);
593 cdev = ERR_PTR(-EINVAL);
594 goto remove_ida;
595 }
596
597 ret = freq_qos_add_request(&policy->constraints,
598 &cpufreq_cdev->qos_req, FREQ_QOS_MAX,
599 get_state_freq(cpufreq_cdev, 0));
600 if (ret < 0) {
601 pr_err("%s: Failed to add freq constraint (%d)\n", __func__,
602 ret);
603 cdev = ERR_PTR(ret);
604 goto remove_ida;
605 }
606
607 cdev = thermal_of_cooling_device_register(np, dev_name, cpufreq_cdev,
608 cooling_ops);
609 if (IS_ERR(cdev))
610 goto remove_qos_req;
611
612 mutex_lock(&cooling_list_lock);
613 list_add(&cpufreq_cdev->node, &cpufreq_cdev_list);
614 mutex_unlock(&cooling_list_lock);
615
616 return cdev;
617
618remove_qos_req:
619 freq_qos_remove_request(&cpufreq_cdev->qos_req);
620remove_ida:
621 ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
622free_idle_time:
623 free_idle_time(cpufreq_cdev);
624free_cdev:
625 kfree(cpufreq_cdev);
626 return cdev;
627}
628
629
630
631
632
633
634
635
636
637
638
639
640struct thermal_cooling_device *
641cpufreq_cooling_register(struct cpufreq_policy *policy)
642{
643 return __cpufreq_cooling_register(NULL, policy, NULL);
644}
645EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666struct thermal_cooling_device *
667of_cpufreq_cooling_register(struct cpufreq_policy *policy)
668{
669 struct device_node *np = of_get_cpu_node(policy->cpu, NULL);
670 struct thermal_cooling_device *cdev = NULL;
671
672 if (!np) {
673 pr_err("cpufreq_cooling: OF node not available for cpu%d\n",
674 policy->cpu);
675 return NULL;
676 }
677
678 if (of_find_property(np, "#cooling-cells", NULL)) {
679 struct em_perf_domain *em = em_cpu_get(policy->cpu);
680
681 cdev = __cpufreq_cooling_register(np, policy, em);
682 if (IS_ERR(cdev)) {
683 pr_err("cpufreq_cooling: cpu%d failed to register as cooling device: %ld\n",
684 policy->cpu, PTR_ERR(cdev));
685 cdev = NULL;
686 }
687 }
688
689 of_node_put(np);
690 return cdev;
691}
692EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
693
694
695
696
697
698
699
700void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
701{
702 struct cpufreq_cooling_device *cpufreq_cdev;
703
704 if (!cdev)
705 return;
706
707 cpufreq_cdev = cdev->devdata;
708
709 mutex_lock(&cooling_list_lock);
710 list_del(&cpufreq_cdev->node);
711 mutex_unlock(&cooling_list_lock);
712
713 thermal_cooling_device_unregister(cdev);
714 freq_qos_remove_request(&cpufreq_cdev->qos_req);
715 ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
716 free_idle_time(cpufreq_cdev);
717 kfree(cpufreq_cdev);
718}
719EXPORT_SYMBOL_GPL(cpufreq_cooling_unregister);
720