1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/module.h>
24#include <linux/thermal.h>
25#include <linux/cpufreq.h>
26#include <linux/err.h>
27#include <linux/slab.h>
28#include <linux/cpu.h>
29#include <linux/cpu_cooling.h>
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47struct cpufreq_cooling_device {
48 int id;
49 struct thermal_cooling_device *cool_dev;
50 unsigned int cpufreq_state;
51 unsigned int cpufreq_val;
52 struct cpumask allowed_cpus;
53};
54static DEFINE_IDR(cpufreq_idr);
55static DEFINE_MUTEX(cooling_cpufreq_lock);
56
57static unsigned int cpufreq_dev_count;
58
59
60#define NOTIFY_INVALID NULL
61static struct cpufreq_cooling_device *notify_device;
62
63
64
65
66
67
68
69
70
71
72
73static int get_idr(struct idr *idr, int *id)
74{
75 int ret;
76
77 mutex_lock(&cooling_cpufreq_lock);
78 ret = idr_alloc(idr, NULL, 0, 0, GFP_KERNEL);
79 mutex_unlock(&cooling_cpufreq_lock);
80 if (unlikely(ret < 0))
81 return ret;
82 *id = ret;
83
84 return 0;
85}
86
87
88
89
90
91
92static void release_idr(struct idr *idr, int id)
93{
94 mutex_lock(&cooling_cpufreq_lock);
95 idr_remove(idr, id);
96 mutex_unlock(&cooling_cpufreq_lock);
97}
98
99
100
101
102
103
104
105
106
107
108
109
110
111static int is_cpufreq_valid(int cpu)
112{
113 struct cpufreq_policy policy;
114
115 return !cpufreq_get_policy(&policy, cpu);
116}
117
118enum cpufreq_cooling_property {
119 GET_LEVEL,
120 GET_FREQ,
121 GET_MAXL,
122};
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143static int get_property(unsigned int cpu, unsigned long input,
144 unsigned int *output,
145 enum cpufreq_cooling_property property)
146{
147 int i, j;
148 unsigned long max_level = 0, level = 0;
149 unsigned int freq = CPUFREQ_ENTRY_INVALID;
150 int descend = -1;
151 struct cpufreq_frequency_table *table =
152 cpufreq_frequency_get_table(cpu);
153
154 if (!output)
155 return -EINVAL;
156
157 if (!table)
158 return -EINVAL;
159
160 for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
161
162 if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
163 continue;
164
165
166 if (freq == table[i].frequency)
167 continue;
168
169
170 if (freq != CPUFREQ_ENTRY_INVALID && descend != -1)
171 descend = !!(freq > table[i].frequency);
172
173 freq = table[i].frequency;
174 max_level++;
175 }
176
177
178 if (property == GET_MAXL) {
179 *output = (unsigned int)max_level;
180 return 0;
181 }
182
183 if (property == GET_FREQ)
184 level = descend ? input : (max_level - input - 1);
185
186 for (i = 0, j = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
187
188 if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
189 continue;
190
191
192 if (freq == table[i].frequency)
193 continue;
194
195
196 freq = table[i].frequency;
197
198 if (property == GET_LEVEL && (unsigned int)input == freq) {
199
200 *output = descend ? j : (max_level - j - 1);
201 return 0;
202 }
203 if (property == GET_FREQ && level == j) {
204
205 *output = freq;
206 return 0;
207 }
208 j++;
209 }
210
211 return -EINVAL;
212}
213
214
215
216
217
218
219
220
221
222
223
224
225unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
226{
227 unsigned int val;
228
229 if (get_property(cpu, (unsigned long)freq, &val, GET_LEVEL))
230 return THERMAL_CSTATE_INVALID;
231
232 return (unsigned long)val;
233}
234EXPORT_SYMBOL_GPL(cpufreq_cooling_get_level);
235
236
237
238
239
240
241
242
243
244
245
246
247
248static unsigned int get_cpu_frequency(unsigned int cpu, unsigned long level)
249{
250 int ret = 0;
251 unsigned int freq;
252
253 ret = get_property(cpu, level, &freq, GET_FREQ);
254 if (ret)
255 return 0;
256
257 return freq;
258}
259
260
261
262
263
264
265
266
267
268
269
270
271
272static int cpufreq_apply_cooling(struct cpufreq_cooling_device *cpufreq_device,
273 unsigned long cooling_state)
274{
275 unsigned int cpuid, clip_freq;
276 struct cpumask *mask = &cpufreq_device->allowed_cpus;
277 unsigned int cpu = cpumask_any(mask);
278
279
280
281 if (cpufreq_device->cpufreq_state == cooling_state)
282 return 0;
283
284 clip_freq = get_cpu_frequency(cpu, cooling_state);
285 if (!clip_freq)
286 return -EINVAL;
287
288 cpufreq_device->cpufreq_state = cooling_state;
289 cpufreq_device->cpufreq_val = clip_freq;
290 notify_device = cpufreq_device;
291
292 for_each_cpu(cpuid, mask) {
293 if (is_cpufreq_valid(cpuid))
294 cpufreq_update_policy(cpuid);
295 }
296
297 notify_device = NOTIFY_INVALID;
298
299 return 0;
300}
301
302
303
304
305
306
307
308
309
310
311
312
313
314static int cpufreq_thermal_notifier(struct notifier_block *nb,
315 unsigned long event, void *data)
316{
317 struct cpufreq_policy *policy = data;
318 unsigned long max_freq = 0;
319
320 if (event != CPUFREQ_ADJUST || notify_device == NOTIFY_INVALID)
321 return 0;
322
323 if (cpumask_test_cpu(policy->cpu, ¬ify_device->allowed_cpus))
324 max_freq = notify_device->cpufreq_val;
325
326
327 if (max_freq > policy->user_policy.max)
328 max_freq = policy->user_policy.max;
329
330 if (policy->max != max_freq)
331 cpufreq_verify_within_limits(policy, 0, max_freq);
332
333 return 0;
334}
335
336
337
338
339
340
341
342
343
344
345
346
347
348static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
349 unsigned long *state)
350{
351 struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
352 struct cpumask *mask = &cpufreq_device->allowed_cpus;
353 unsigned int cpu;
354 unsigned int count = 0;
355 int ret;
356
357 cpu = cpumask_any(mask);
358
359 ret = get_property(cpu, 0, &count, GET_MAXL);
360
361 if (count > 0)
362 *state = count;
363
364 return ret;
365}
366
367
368
369
370
371
372
373
374
375
376
377static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev,
378 unsigned long *state)
379{
380 struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
381
382 *state = cpufreq_device->cpufreq_state;
383
384 return 0;
385}
386
387
388
389
390
391
392
393
394
395
396
397static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
398 unsigned long state)
399{
400 struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
401
402 return cpufreq_apply_cooling(cpufreq_device, state);
403}
404
405
406static struct thermal_cooling_device_ops const cpufreq_cooling_ops = {
407 .get_max_state = cpufreq_get_max_state,
408 .get_cur_state = cpufreq_get_cur_state,
409 .set_cur_state = cpufreq_set_cur_state,
410};
411
412
413static struct notifier_block thermal_cpufreq_notifier_block = {
414 .notifier_call = cpufreq_thermal_notifier,
415};
416
417
418
419
420
421
422
423
424
425
426
427
428struct thermal_cooling_device *
429cpufreq_cooling_register(const struct cpumask *clip_cpus)
430{
431 struct thermal_cooling_device *cool_dev;
432 struct cpufreq_cooling_device *cpufreq_dev = NULL;
433 unsigned int min = 0, max = 0;
434 char dev_name[THERMAL_NAME_LENGTH];
435 int ret = 0, i;
436 struct cpufreq_policy policy;
437
438
439 for_each_cpu(i, clip_cpus) {
440
441 if (!cpufreq_get_policy(&policy, i))
442 continue;
443 if (min == 0 && max == 0) {
444 min = policy.cpuinfo.min_freq;
445 max = policy.cpuinfo.max_freq;
446 } else {
447 if (min != policy.cpuinfo.min_freq ||
448 max != policy.cpuinfo.max_freq)
449 return ERR_PTR(-EINVAL);
450 }
451 }
452 cpufreq_dev = kzalloc(sizeof(struct cpufreq_cooling_device),
453 GFP_KERNEL);
454 if (!cpufreq_dev)
455 return ERR_PTR(-ENOMEM);
456
457 cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus);
458
459 ret = get_idr(&cpufreq_idr, &cpufreq_dev->id);
460 if (ret) {
461 kfree(cpufreq_dev);
462 return ERR_PTR(-EINVAL);
463 }
464
465 snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
466 cpufreq_dev->id);
467
468 cool_dev = thermal_cooling_device_register(dev_name, cpufreq_dev,
469 &cpufreq_cooling_ops);
470 if (!cool_dev) {
471 release_idr(&cpufreq_idr, cpufreq_dev->id);
472 kfree(cpufreq_dev);
473 return ERR_PTR(-EINVAL);
474 }
475 cpufreq_dev->cool_dev = cool_dev;
476 cpufreq_dev->cpufreq_state = 0;
477 mutex_lock(&cooling_cpufreq_lock);
478
479
480 if (cpufreq_dev_count == 0)
481 cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
482 CPUFREQ_POLICY_NOTIFIER);
483 cpufreq_dev_count++;
484
485 mutex_unlock(&cooling_cpufreq_lock);
486
487 return cool_dev;
488}
489EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
490
491
492
493
494
495
496
497void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
498{
499 struct cpufreq_cooling_device *cpufreq_dev = cdev->devdata;
500
501 mutex_lock(&cooling_cpufreq_lock);
502 cpufreq_dev_count--;
503
504
505 if (cpufreq_dev_count == 0)
506 cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
507 CPUFREQ_POLICY_NOTIFIER);
508 mutex_unlock(&cooling_cpufreq_lock);
509
510 thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
511 release_idr(&cpufreq_idr, cpufreq_dev->id);
512 kfree(cpufreq_dev);
513}
514EXPORT_SYMBOL_GPL(cpufreq_cooling_unregister);
515