1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#define pr_fmt(fmt) "CPPC Cpufreq:" fmt
16
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/delay.h>
20#include <linux/cpu.h>
21#include <linux/cpufreq.h>
22#include <linux/dmi.h>
23#include <linux/time.h>
24#include <linux/vmalloc.h>
25
26#include <asm/unaligned.h>
27
28#include <acpi/cppc_acpi.h>
29
30
31#define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48
32
33
34#define DMI_PROCESSOR_MAX_SPEED 0x14
35
36
37
38
39
40
41
42
43static struct cppc_cpudata **all_cpu_data;
44
45struct cppc_workaround_oem_info {
46 char oem_id[ACPI_OEM_ID_SIZE +1];
47 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
48 u32 oem_revision;
49};
50
51static bool apply_hisi_workaround;
52
53static struct cppc_workaround_oem_info wa_info[] = {
54 {
55 .oem_id = "HISI ",
56 .oem_table_id = "HIP07 ",
57 .oem_revision = 0,
58 }, {
59 .oem_id = "HISI ",
60 .oem_table_id = "HIP08 ",
61 .oem_revision = 0,
62 }
63};
64
65static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu,
66 unsigned int perf);
67
68
69
70
71
72
73
74static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpunum)
75{
76 struct cppc_cpudata *cpudata = all_cpu_data[cpunum];
77 u64 desired_perf;
78 int ret;
79
80 ret = cppc_get_desired_perf(cpunum, &desired_perf);
81 if (ret < 0)
82 return -EIO;
83
84 return cppc_cpufreq_perf_to_khz(cpudata, desired_perf);
85}
86
87static void cppc_check_hisi_workaround(void)
88{
89 struct acpi_table_header *tbl;
90 acpi_status status = AE_OK;
91 int i;
92
93 status = acpi_get_table(ACPI_SIG_PCCT, 0, &tbl);
94 if (ACPI_FAILURE(status) || !tbl)
95 return;
96
97 for (i = 0; i < ARRAY_SIZE(wa_info); i++) {
98 if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) &&
99 !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
100 wa_info[i].oem_revision == tbl->oem_revision)
101 apply_hisi_workaround = true;
102 }
103}
104
105
106static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
107{
108 const u8 *dmi_data = (const u8 *)dm;
109 u16 *mhz = (u16 *)private;
110
111 if (dm->type == DMI_ENTRY_PROCESSOR &&
112 dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) {
113 u16 val = (u16)get_unaligned((const u16 *)
114 (dmi_data + DMI_PROCESSOR_MAX_SPEED));
115 *mhz = val > *mhz ? val : *mhz;
116 }
117}
118
119
120static u64 cppc_get_dmi_max_khz(void)
121{
122 u16 mhz = 0;
123
124 dmi_walk(cppc_find_dmi_mhz, &mhz);
125
126
127
128
129
130 mhz = mhz ? mhz : 1;
131
132 return (1000 * mhz);
133}
134
135
136
137
138
139
140
141
142
143
144static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu,
145 unsigned int perf)
146{
147 static u64 max_khz;
148 struct cppc_perf_caps *caps = &cpu->perf_caps;
149 u64 mul, div;
150
151 if (caps->lowest_freq && caps->nominal_freq) {
152 if (perf >= caps->nominal_perf) {
153 mul = caps->nominal_freq;
154 div = caps->nominal_perf;
155 } else {
156 mul = caps->nominal_freq - caps->lowest_freq;
157 div = caps->nominal_perf - caps->lowest_perf;
158 }
159 } else {
160 if (!max_khz)
161 max_khz = cppc_get_dmi_max_khz();
162 mul = max_khz;
163 div = cpu->perf_caps.highest_perf;
164 }
165 return (u64)perf * mul / div;
166}
167
168static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu,
169 unsigned int freq)
170{
171 static u64 max_khz;
172 struct cppc_perf_caps *caps = &cpu->perf_caps;
173 u64 mul, div;
174
175 if (caps->lowest_freq && caps->nominal_freq) {
176 if (freq >= caps->nominal_freq) {
177 mul = caps->nominal_perf;
178 div = caps->nominal_freq;
179 } else {
180 mul = caps->lowest_perf;
181 div = caps->lowest_freq;
182 }
183 } else {
184 if (!max_khz)
185 max_khz = cppc_get_dmi_max_khz();
186 mul = cpu->perf_caps.highest_perf;
187 div = max_khz;
188 }
189
190 return (u64)freq * mul / div;
191}
192
193static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
194 unsigned int target_freq,
195 unsigned int relation)
196{
197 struct cppc_cpudata *cpu;
198 struct cpufreq_freqs freqs;
199 u32 desired_perf;
200 int ret = 0;
201
202 cpu = all_cpu_data[policy->cpu];
203
204 desired_perf = cppc_cpufreq_khz_to_perf(cpu, target_freq);
205
206 if (desired_perf == cpu->perf_ctrls.desired_perf)
207 return ret;
208
209 cpu->perf_ctrls.desired_perf = desired_perf;
210 freqs.old = policy->cur;
211 freqs.new = target_freq;
212
213 cpufreq_freq_transition_begin(policy, &freqs);
214 ret = cppc_set_perf(cpu->cpu, &cpu->perf_ctrls);
215 cpufreq_freq_transition_end(policy, &freqs, ret != 0);
216
217 if (ret)
218 pr_debug("Failed to set target on CPU:%d. ret:%d\n",
219 cpu->cpu, ret);
220
221 return ret;
222}
223
224static int cppc_verify_policy(struct cpufreq_policy *policy)
225{
226 cpufreq_verify_within_cpu_limits(policy);
227 return 0;
228}
229
230static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy)
231{
232 int cpu_num = policy->cpu;
233 struct cppc_cpudata *cpu = all_cpu_data[cpu_num];
234 int ret;
235
236 cpu->perf_ctrls.desired_perf = cpu->perf_caps.lowest_perf;
237
238 ret = cppc_set_perf(cpu_num, &cpu->perf_ctrls);
239 if (ret)
240 pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
241 cpu->perf_caps.lowest_perf, cpu_num, ret);
242}
243
244
245
246
247
248
249
250#ifdef CONFIG_ARM64
251#include <asm/cputype.h>
252
253static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu)
254{
255 unsigned long implementor = read_cpuid_implementor();
256 unsigned long part_num = read_cpuid_part_number();
257 unsigned int delay_us = 0;
258
259 switch (implementor) {
260 case ARM_CPU_IMP_QCOM:
261 switch (part_num) {
262 case QCOM_CPU_PART_FALKOR_V1:
263 case QCOM_CPU_PART_FALKOR:
264 delay_us = 10000;
265 break;
266 default:
267 delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
268 break;
269 }
270 break;
271 default:
272 delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
273 break;
274 }
275
276 return delay_us;
277}
278
279#else
280
281static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu)
282{
283 return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
284}
285#endif
286
287static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
288{
289 struct cppc_cpudata *cpu;
290 unsigned int cpu_num = policy->cpu;
291 int ret = 0;
292
293 cpu = all_cpu_data[policy->cpu];
294
295 cpu->cpu = cpu_num;
296 ret = cppc_get_perf_caps(policy->cpu, &cpu->perf_caps);
297
298 if (ret) {
299 pr_debug("Err reading CPU%d perf capabilities. ret:%d\n",
300 cpu_num, ret);
301 return ret;
302 }
303
304
305 cpu->perf_caps.lowest_freq *= 1000;
306 cpu->perf_caps.nominal_freq *= 1000;
307
308
309
310
311
312 policy->min = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.lowest_nonlinear_perf);
313 policy->max = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.highest_perf);
314
315
316
317
318
319
320 policy->cpuinfo.min_freq = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.lowest_perf);
321 policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.highest_perf);
322
323 policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu_num);
324 policy->shared_type = cpu->shared_type;
325
326 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
327 int i;
328
329 cpumask_copy(policy->cpus, cpu->shared_cpu_map);
330
331 for_each_cpu(i, policy->cpus) {
332 if (unlikely(i == policy->cpu))
333 continue;
334
335 memcpy(&all_cpu_data[i]->perf_caps, &cpu->perf_caps,
336 sizeof(cpu->perf_caps));
337 }
338 } else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) {
339
340 pr_debug("Unsupported CPU co-ord type\n");
341 return -EFAULT;
342 }
343
344 cpu->cur_policy = policy;
345
346
347 policy->cur = cppc_cpufreq_perf_to_khz(cpu,
348 cpu->perf_caps.highest_perf);
349 cpu->perf_ctrls.desired_perf = cpu->perf_caps.highest_perf;
350
351 ret = cppc_set_perf(cpu_num, &cpu->perf_ctrls);
352 if (ret)
353 pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
354 cpu->perf_caps.highest_perf, cpu_num, ret);
355
356 return ret;
357}
358
359static inline u64 get_delta(u64 t1, u64 t0)
360{
361 if (t1 > t0 || t0 > ~(u32)0)
362 return t1 - t0;
363
364 return (u32)t1 - (u32)t0;
365}
366
367static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu,
368 struct cppc_perf_fb_ctrs fb_ctrs_t0,
369 struct cppc_perf_fb_ctrs fb_ctrs_t1)
370{
371 u64 delta_reference, delta_delivered;
372 u64 reference_perf, delivered_perf;
373
374 reference_perf = fb_ctrs_t0.reference_perf;
375
376 delta_reference = get_delta(fb_ctrs_t1.reference,
377 fb_ctrs_t0.reference);
378 delta_delivered = get_delta(fb_ctrs_t1.delivered,
379 fb_ctrs_t0.delivered);
380
381
382 if (delta_reference || delta_delivered)
383 delivered_perf = (reference_perf * delta_delivered) /
384 delta_reference;
385 else
386 delivered_perf = cpu->perf_ctrls.desired_perf;
387
388 return cppc_cpufreq_perf_to_khz(cpu, delivered_perf);
389}
390
391static unsigned int cppc_cpufreq_get_rate(unsigned int cpunum)
392{
393 struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
394 struct cppc_cpudata *cpu = all_cpu_data[cpunum];
395 int ret;
396
397 if (apply_hisi_workaround)
398 return hisi_cppc_cpufreq_get_rate(cpunum);
399
400 ret = cppc_get_perf_ctrs(cpunum, &fb_ctrs_t0);
401 if (ret)
402 return ret;
403
404 udelay(2);
405
406 ret = cppc_get_perf_ctrs(cpunum, &fb_ctrs_t1);
407 if (ret)
408 return ret;
409
410 return cppc_get_rate_from_fbctrs(cpu, fb_ctrs_t0, fb_ctrs_t1);
411}
412
413static struct cpufreq_driver cppc_cpufreq_driver = {
414 .flags = CPUFREQ_CONST_LOOPS,
415 .verify = cppc_verify_policy,
416 .target = cppc_cpufreq_set_target,
417 .get = cppc_cpufreq_get_rate,
418 .init = cppc_cpufreq_cpu_init,
419 .stop_cpu = cppc_cpufreq_stop_cpu,
420 .name = "cppc_cpufreq",
421};
422
423static int __init cppc_cpufreq_init(void)
424{
425 int i, ret = 0;
426 struct cppc_cpudata *cpu;
427
428 if (acpi_disabled)
429 return -ENODEV;
430
431 all_cpu_data = kcalloc(num_possible_cpus(), sizeof(void *),
432 GFP_KERNEL);
433 if (!all_cpu_data)
434 return -ENOMEM;
435
436 for_each_possible_cpu(i) {
437 all_cpu_data[i] = kzalloc(sizeof(struct cppc_cpudata), GFP_KERNEL);
438 if (!all_cpu_data[i])
439 goto out;
440
441 cpu = all_cpu_data[i];
442 if (!zalloc_cpumask_var(&cpu->shared_cpu_map, GFP_KERNEL))
443 goto out;
444 }
445
446 ret = acpi_get_psd_map(all_cpu_data);
447 if (ret) {
448 pr_debug("Error parsing PSD data. Aborting cpufreq registration.\n");
449 goto out;
450 }
451
452 cppc_check_hisi_workaround();
453
454 ret = cpufreq_register_driver(&cppc_cpufreq_driver);
455 if (ret)
456 goto out;
457
458 return ret;
459
460out:
461 for_each_possible_cpu(i) {
462 cpu = all_cpu_data[i];
463 if (!cpu)
464 break;
465 free_cpumask_var(cpu->shared_cpu_map);
466 kfree(cpu);
467 }
468
469 kfree(all_cpu_data);
470 return -ENODEV;
471}
472
473static void __exit cppc_cpufreq_exit(void)
474{
475 struct cppc_cpudata *cpu;
476 int i;
477
478 cpufreq_unregister_driver(&cppc_cpufreq_driver);
479
480 for_each_possible_cpu(i) {
481 cpu = all_cpu_data[i];
482 free_cpumask_var(cpu->shared_cpu_map);
483 kfree(cpu);
484 }
485
486 kfree(all_cpu_data);
487}
488
489module_exit(cppc_cpufreq_exit);
490MODULE_AUTHOR("Ashwin Chaugule");
491MODULE_DESCRIPTION("CPUFreq driver based on the ACPI CPPC v5.0+ spec");
492MODULE_LICENSE("GPL");
493
494late_initcall(cppc_cpufreq_init);
495
496static const struct acpi_device_id cppc_acpi_ids[] __used = {
497 {ACPI_PROCESSOR_DEVICE_HID, },
498 {}
499};
500
501MODULE_DEVICE_TABLE(acpi, cppc_acpi_ids);
502