1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/module.h>
24#include <linux/thermal.h>
25#include <linux/cpufreq.h>
26#include <linux/err.h>
27#include <linux/slab.h>
28#include <linux/cpu.h>
29#include <linux/cpu_cooling.h>
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47struct cpufreq_cooling_device {
48 int id;
49 struct thermal_cooling_device *cool_dev;
50 unsigned int cpufreq_state;
51 unsigned int cpufreq_val;
52 struct cpumask allowed_cpus;
53};
54static DEFINE_IDR(cpufreq_idr);
55static DEFINE_MUTEX(cooling_cpufreq_lock);
56
57static unsigned int cpufreq_dev_count;
58
59
60#define NOTIFY_INVALID NULL
61static struct cpufreq_cooling_device *notify_device;
62
63
64
65
66
67
68
69
70
71
72
73static int get_idr(struct idr *idr, int *id)
74{
75 int ret;
76
77 mutex_lock(&cooling_cpufreq_lock);
78 ret = idr_alloc(idr, NULL, 0, 0, GFP_KERNEL);
79 mutex_unlock(&cooling_cpufreq_lock);
80 if (unlikely(ret < 0))
81 return ret;
82 *id = ret;
83
84 return 0;
85}
86
87
88
89
90
91
92static void release_idr(struct idr *idr, int id)
93{
94 mutex_lock(&cooling_cpufreq_lock);
95 idr_remove(idr, id);
96 mutex_unlock(&cooling_cpufreq_lock);
97}
98
99
100
101
102
103
104
105
106
107
108
109
110
111static int is_cpufreq_valid(int cpu)
112{
113 struct cpufreq_policy policy;
114
115 return !cpufreq_get_policy(&policy, cpu);
116}
117
118enum cpufreq_cooling_property {
119 GET_LEVEL,
120 GET_FREQ,
121 GET_MAXL,
122};
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143static int get_property(unsigned int cpu, unsigned long input,
144 unsigned int *output,
145 enum cpufreq_cooling_property property)
146{
147 int i, j;
148 unsigned long max_level = 0, level = 0;
149 unsigned int freq = CPUFREQ_ENTRY_INVALID;
150 int descend = -1;
151 struct cpufreq_frequency_table *table =
152 cpufreq_frequency_get_table(cpu);
153
154 if (!output)
155 return -EINVAL;
156
157 if (!table)
158 return -EINVAL;
159
160 for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
161
162 if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
163 continue;
164
165
166 if (freq == table[i].frequency)
167 continue;
168
169
170 if (freq != CPUFREQ_ENTRY_INVALID && descend == -1)
171 descend = !!(freq > table[i].frequency);
172
173 freq = table[i].frequency;
174 max_level++;
175 }
176
177
178 if (property == GET_MAXL) {
179 *output = (unsigned int)max_level;
180 return 0;
181 }
182
183 if (property == GET_FREQ)
184 level = descend ? input : (max_level - input - 1);
185
186 for (i = 0, j = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
187
188 if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
189 continue;
190
191
192 if (freq == table[i].frequency)
193 continue;
194
195
196 freq = table[i].frequency;
197
198 if (property == GET_LEVEL && (unsigned int)input == freq) {
199
200 *output = descend ? j : (max_level - j - 1);
201 return 0;
202 }
203 if (property == GET_FREQ && level == j) {
204
205 *output = freq;
206 return 0;
207 }
208 j++;
209 }
210
211 return -EINVAL;
212}
213
214
215
216
217
218
219
220
221
222
223
224
225unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
226{
227 unsigned int val;
228
229 if (get_property(cpu, (unsigned long)freq, &val, GET_LEVEL))
230 return THERMAL_CSTATE_INVALID;
231
232 return (unsigned long)val;
233}
234EXPORT_SYMBOL_GPL(cpufreq_cooling_get_level);
235
236
237
238
239
240
241
242
243
244
245
246
247
248static unsigned int get_cpu_frequency(unsigned int cpu, unsigned long level)
249{
250 int ret = 0;
251 unsigned int freq;
252
253 ret = get_property(cpu, level, &freq, GET_FREQ);
254 if (ret)
255 return 0;
256
257 return freq;
258}
259
260
261
262
263
264
265
266
267
268
269
270
271
272static int cpufreq_apply_cooling(struct cpufreq_cooling_device *cpufreq_device,
273 unsigned long cooling_state)
274{
275 unsigned int cpuid, clip_freq;
276 struct cpumask *mask = &cpufreq_device->allowed_cpus;
277 unsigned int cpu = cpumask_any(mask);
278
279
280
281 if (cpufreq_device->cpufreq_state == cooling_state)
282 return 0;
283
284 clip_freq = get_cpu_frequency(cpu, cooling_state);
285 if (!clip_freq)
286 return -EINVAL;
287
288 cpufreq_device->cpufreq_state = cooling_state;
289 cpufreq_device->cpufreq_val = clip_freq;
290 notify_device = cpufreq_device;
291
292 for_each_cpu(cpuid, mask) {
293 if (is_cpufreq_valid(cpuid))
294 cpufreq_update_policy(cpuid);
295 }
296
297 notify_device = NOTIFY_INVALID;
298
299 return 0;
300}
301
302
303
304
305
306
307
308
309
310
311
312
313
314static int cpufreq_thermal_notifier(struct notifier_block *nb,
315 unsigned long event, void *data)
316{
317 struct cpufreq_policy *policy = data;
318 unsigned long max_freq = 0;
319
320 if (event != CPUFREQ_ADJUST || notify_device == NOTIFY_INVALID)
321 return 0;
322
323 if (cpumask_test_cpu(policy->cpu, ¬ify_device->allowed_cpus))
324 max_freq = notify_device->cpufreq_val;
325 else
326 return 0;
327
328
329 if (max_freq > policy->user_policy.max)
330 max_freq = policy->user_policy.max;
331
332 if (policy->max != max_freq)
333 cpufreq_verify_within_limits(policy, 0, max_freq);
334
335 return 0;
336}
337
338
339
340
341
342
343
344
345
346
347
348
349
350static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
351 unsigned long *state)
352{
353 struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
354 struct cpumask *mask = &cpufreq_device->allowed_cpus;
355 unsigned int cpu;
356 unsigned int count = 0;
357 int ret;
358
359 cpu = cpumask_any(mask);
360
361 ret = get_property(cpu, 0, &count, GET_MAXL);
362
363 if (count > 0)
364 *state = count;
365
366 return ret;
367}
368
369
370
371
372
373
374
375
376
377
378
379static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev,
380 unsigned long *state)
381{
382 struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
383
384 *state = cpufreq_device->cpufreq_state;
385
386 return 0;
387}
388
389
390
391
392
393
394
395
396
397
398
399static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
400 unsigned long state)
401{
402 struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
403
404 return cpufreq_apply_cooling(cpufreq_device, state);
405}
406
407
408static struct thermal_cooling_device_ops const cpufreq_cooling_ops = {
409 .get_max_state = cpufreq_get_max_state,
410 .get_cur_state = cpufreq_get_cur_state,
411 .set_cur_state = cpufreq_set_cur_state,
412};
413
414
415static struct notifier_block thermal_cpufreq_notifier_block = {
416 .notifier_call = cpufreq_thermal_notifier,
417};
418
419
420
421
422
423
424
425
426
427
428
429
430struct thermal_cooling_device *
431cpufreq_cooling_register(const struct cpumask *clip_cpus)
432{
433 struct thermal_cooling_device *cool_dev;
434 struct cpufreq_cooling_device *cpufreq_dev = NULL;
435 unsigned int min = 0, max = 0;
436 char dev_name[THERMAL_NAME_LENGTH];
437 int ret = 0, i;
438 struct cpufreq_policy policy;
439
440
441 for_each_cpu(i, clip_cpus) {
442
443 if (!cpufreq_get_policy(&policy, i))
444 continue;
445 if (min == 0 && max == 0) {
446 min = policy.cpuinfo.min_freq;
447 max = policy.cpuinfo.max_freq;
448 } else {
449 if (min != policy.cpuinfo.min_freq ||
450 max != policy.cpuinfo.max_freq)
451 return ERR_PTR(-EINVAL);
452 }
453 }
454 cpufreq_dev = kzalloc(sizeof(struct cpufreq_cooling_device),
455 GFP_KERNEL);
456 if (!cpufreq_dev)
457 return ERR_PTR(-ENOMEM);
458
459 cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus);
460
461 ret = get_idr(&cpufreq_idr, &cpufreq_dev->id);
462 if (ret) {
463 kfree(cpufreq_dev);
464 return ERR_PTR(-EINVAL);
465 }
466
467 snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
468 cpufreq_dev->id);
469
470 cool_dev = thermal_cooling_device_register(dev_name, cpufreq_dev,
471 &cpufreq_cooling_ops);
472 if (!cool_dev) {
473 release_idr(&cpufreq_idr, cpufreq_dev->id);
474 kfree(cpufreq_dev);
475 return ERR_PTR(-EINVAL);
476 }
477 cpufreq_dev->cool_dev = cool_dev;
478 cpufreq_dev->cpufreq_state = 0;
479 mutex_lock(&cooling_cpufreq_lock);
480
481
482 if (cpufreq_dev_count == 0)
483 cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
484 CPUFREQ_POLICY_NOTIFIER);
485 cpufreq_dev_count++;
486
487 mutex_unlock(&cooling_cpufreq_lock);
488
489 return cool_dev;
490}
491EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
492
493
494
495
496
497
498
499void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
500{
501 struct cpufreq_cooling_device *cpufreq_dev;
502
503 if (!cdev)
504 return;
505
506 cpufreq_dev = cdev->devdata;
507 mutex_lock(&cooling_cpufreq_lock);
508 cpufreq_dev_count--;
509
510
511 if (cpufreq_dev_count == 0)
512 cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
513 CPUFREQ_POLICY_NOTIFIER);
514 mutex_unlock(&cooling_cpufreq_lock);
515
516 thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
517 release_idr(&cpufreq_idr, cpufreq_dev->id);
518 kfree(cpufreq_dev);
519}
520EXPORT_SYMBOL_GPL(cpufreq_cooling_unregister);
521