1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#define pr_fmt(fmt) "CPPC Cpufreq:" fmt
16
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/delay.h>
20#include <linux/cpu.h>
21#include <linux/cpufreq.h>
22#include <linux/dmi.h>
23#include <linux/time.h>
24#include <linux/vmalloc.h>
25
26#include <asm/unaligned.h>
27
28#include <acpi/cppc_acpi.h>
29
30
31#define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48
32
33
34#define DMI_PROCESSOR_MAX_SPEED 0x14
35
36
37
38
39
40
41
42
43static struct cppc_cpudata **all_cpu_data;
44
45struct cppc_workaround_oem_info {
46 char oem_id[ACPI_OEM_ID_SIZE + 1];
47 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
48 u32 oem_revision;
49};
50
51static bool apply_hisi_workaround;
52
53static struct cppc_workaround_oem_info wa_info[] = {
54 {
55 .oem_id = "HISI ",
56 .oem_table_id = "HIP07 ",
57 .oem_revision = 0,
58 }, {
59 .oem_id = "HISI ",
60 .oem_table_id = "HIP08 ",
61 .oem_revision = 0,
62 }
63};
64
65static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu,
66 unsigned int perf);
67
68
69
70
71
72
73
74static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpunum)
75{
76 struct cppc_cpudata *cpudata = all_cpu_data[cpunum];
77 u64 desired_perf;
78 int ret;
79
80 ret = cppc_get_desired_perf(cpunum, &desired_perf);
81 if (ret < 0)
82 return -EIO;
83
84 return cppc_cpufreq_perf_to_khz(cpudata, desired_perf);
85}
86
87static void cppc_check_hisi_workaround(void)
88{
89 struct acpi_table_header *tbl;
90 acpi_status status = AE_OK;
91 int i;
92
93 status = acpi_get_table(ACPI_SIG_PCCT, 0, &tbl);
94 if (ACPI_FAILURE(status) || !tbl)
95 return;
96
97 for (i = 0; i < ARRAY_SIZE(wa_info); i++) {
98 if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) &&
99 !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
100 wa_info[i].oem_revision == tbl->oem_revision) {
101 apply_hisi_workaround = true;
102 break;
103 }
104 }
105
106 acpi_put_table(tbl);
107}
108
109
110static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
111{
112 const u8 *dmi_data = (const u8 *)dm;
113 u16 *mhz = (u16 *)private;
114
115 if (dm->type == DMI_ENTRY_PROCESSOR &&
116 dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) {
117 u16 val = (u16)get_unaligned((const u16 *)
118 (dmi_data + DMI_PROCESSOR_MAX_SPEED));
119 *mhz = val > *mhz ? val : *mhz;
120 }
121}
122
123
124static u64 cppc_get_dmi_max_khz(void)
125{
126 u16 mhz = 0;
127
128 dmi_walk(cppc_find_dmi_mhz, &mhz);
129
130
131
132
133
134 mhz = mhz ? mhz : 1;
135
136 return (1000 * mhz);
137}
138
139
140
141
142
143
144
145
146
147
148static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu,
149 unsigned int perf)
150{
151 static u64 max_khz;
152 struct cppc_perf_caps *caps = &cpu->perf_caps;
153 u64 mul, div;
154
155 if (caps->lowest_freq && caps->nominal_freq) {
156 if (perf >= caps->nominal_perf) {
157 mul = caps->nominal_freq;
158 div = caps->nominal_perf;
159 } else {
160 mul = caps->nominal_freq - caps->lowest_freq;
161 div = caps->nominal_perf - caps->lowest_perf;
162 }
163 } else {
164 if (!max_khz)
165 max_khz = cppc_get_dmi_max_khz();
166 mul = max_khz;
167 div = cpu->perf_caps.highest_perf;
168 }
169 return (u64)perf * mul / div;
170}
171
172static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu,
173 unsigned int freq)
174{
175 static u64 max_khz;
176 struct cppc_perf_caps *caps = &cpu->perf_caps;
177 u64 mul, div;
178
179 if (caps->lowest_freq && caps->nominal_freq) {
180 if (freq >= caps->nominal_freq) {
181 mul = caps->nominal_perf;
182 div = caps->nominal_freq;
183 } else {
184 mul = caps->lowest_perf;
185 div = caps->lowest_freq;
186 }
187 } else {
188 if (!max_khz)
189 max_khz = cppc_get_dmi_max_khz();
190 mul = cpu->perf_caps.highest_perf;
191 div = max_khz;
192 }
193
194 return (u64)freq * mul / div;
195}
196
197static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
198 unsigned int target_freq,
199 unsigned int relation)
200{
201 struct cppc_cpudata *cpu;
202 struct cpufreq_freqs freqs;
203 u32 desired_perf;
204 int ret = 0;
205
206 cpu = all_cpu_data[policy->cpu];
207
208 desired_perf = cppc_cpufreq_khz_to_perf(cpu, target_freq);
209
210 if (desired_perf == cpu->perf_ctrls.desired_perf)
211 return ret;
212
213 cpu->perf_ctrls.desired_perf = desired_perf;
214 freqs.old = policy->cur;
215 freqs.new = target_freq;
216
217 cpufreq_freq_transition_begin(policy, &freqs);
218 ret = cppc_set_perf(cpu->cpu, &cpu->perf_ctrls);
219 cpufreq_freq_transition_end(policy, &freqs, ret != 0);
220
221 if (ret)
222 pr_debug("Failed to set target on CPU:%d. ret:%d\n",
223 cpu->cpu, ret);
224
225 return ret;
226}
227
228static int cppc_verify_policy(struct cpufreq_policy_data *policy)
229{
230 cpufreq_verify_within_cpu_limits(policy);
231 return 0;
232}
233
234static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy)
235{
236 int cpu_num = policy->cpu;
237 struct cppc_cpudata *cpu = all_cpu_data[cpu_num];
238 int ret;
239
240 cpu->perf_ctrls.desired_perf = cpu->perf_caps.lowest_perf;
241
242 ret = cppc_set_perf(cpu_num, &cpu->perf_ctrls);
243 if (ret)
244 pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
245 cpu->perf_caps.lowest_perf, cpu_num, ret);
246}
247
248
249
250
251
252
253
254#ifdef CONFIG_ARM64
255#include <asm/cputype.h>
256
257static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu)
258{
259 unsigned long implementor = read_cpuid_implementor();
260 unsigned long part_num = read_cpuid_part_number();
261 unsigned int delay_us = 0;
262
263 switch (implementor) {
264 case ARM_CPU_IMP_QCOM:
265 switch (part_num) {
266 case QCOM_CPU_PART_FALKOR_V1:
267 case QCOM_CPU_PART_FALKOR:
268 delay_us = 10000;
269 break;
270 default:
271 delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
272 break;
273 }
274 break;
275 default:
276 delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
277 break;
278 }
279
280 return delay_us;
281}
282
283#else
284
285static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu)
286{
287 return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
288}
289#endif
290
291static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
292{
293 struct cppc_cpudata *cpu;
294 unsigned int cpu_num = policy->cpu;
295 int ret = 0;
296
297 cpu = all_cpu_data[policy->cpu];
298
299 cpu->cpu = cpu_num;
300 ret = cppc_get_perf_caps(policy->cpu, &cpu->perf_caps);
301
302 if (ret) {
303 pr_debug("Err reading CPU%d perf capabilities. ret:%d\n",
304 cpu_num, ret);
305 return ret;
306 }
307
308
309 cpu->perf_caps.lowest_freq *= 1000;
310 cpu->perf_caps.nominal_freq *= 1000;
311
312
313
314
315
316 policy->min = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.lowest_nonlinear_perf);
317 policy->max = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.highest_perf);
318
319
320
321
322
323
324 policy->cpuinfo.min_freq = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.lowest_perf);
325 policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.highest_perf);
326
327 policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu_num);
328 policy->shared_type = cpu->shared_type;
329
330 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
331 int i;
332
333 cpumask_copy(policy->cpus, cpu->shared_cpu_map);
334
335 for_each_cpu(i, policy->cpus) {
336 if (unlikely(i == policy->cpu))
337 continue;
338
339 memcpy(&all_cpu_data[i]->perf_caps, &cpu->perf_caps,
340 sizeof(cpu->perf_caps));
341 }
342 } else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) {
343
344 pr_debug("Unsupported CPU co-ord type\n");
345 return -EFAULT;
346 }
347
348 cpu->cur_policy = policy;
349
350
351 policy->cur = cppc_cpufreq_perf_to_khz(cpu,
352 cpu->perf_caps.highest_perf);
353 cpu->perf_ctrls.desired_perf = cpu->perf_caps.highest_perf;
354
355 ret = cppc_set_perf(cpu_num, &cpu->perf_ctrls);
356 if (ret)
357 pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
358 cpu->perf_caps.highest_perf, cpu_num, ret);
359
360 return ret;
361}
362
363static inline u64 get_delta(u64 t1, u64 t0)
364{
365 if (t1 > t0 || t0 > ~(u32)0)
366 return t1 - t0;
367
368 return (u32)t1 - (u32)t0;
369}
370
371static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu,
372 struct cppc_perf_fb_ctrs fb_ctrs_t0,
373 struct cppc_perf_fb_ctrs fb_ctrs_t1)
374{
375 u64 delta_reference, delta_delivered;
376 u64 reference_perf, delivered_perf;
377
378 reference_perf = fb_ctrs_t0.reference_perf;
379
380 delta_reference = get_delta(fb_ctrs_t1.reference,
381 fb_ctrs_t0.reference);
382 delta_delivered = get_delta(fb_ctrs_t1.delivered,
383 fb_ctrs_t0.delivered);
384
385
386 if (delta_reference || delta_delivered)
387 delivered_perf = (reference_perf * delta_delivered) /
388 delta_reference;
389 else
390 delivered_perf = cpu->perf_ctrls.desired_perf;
391
392 return cppc_cpufreq_perf_to_khz(cpu, delivered_perf);
393}
394
395static unsigned int cppc_cpufreq_get_rate(unsigned int cpunum)
396{
397 struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
398 struct cppc_cpudata *cpu = all_cpu_data[cpunum];
399 int ret;
400
401 if (apply_hisi_workaround)
402 return hisi_cppc_cpufreq_get_rate(cpunum);
403
404 ret = cppc_get_perf_ctrs(cpunum, &fb_ctrs_t0);
405 if (ret)
406 return ret;
407
408 udelay(2);
409
410 ret = cppc_get_perf_ctrs(cpunum, &fb_ctrs_t1);
411 if (ret)
412 return ret;
413
414 return cppc_get_rate_from_fbctrs(cpu, fb_ctrs_t0, fb_ctrs_t1);
415}
416
417static struct cpufreq_driver cppc_cpufreq_driver = {
418 .flags = CPUFREQ_CONST_LOOPS,
419 .verify = cppc_verify_policy,
420 .target = cppc_cpufreq_set_target,
421 .get = cppc_cpufreq_get_rate,
422 .init = cppc_cpufreq_cpu_init,
423 .stop_cpu = cppc_cpufreq_stop_cpu,
424 .name = "cppc_cpufreq",
425};
426
427static int __init cppc_cpufreq_init(void)
428{
429 int i, ret = 0;
430 struct cppc_cpudata *cpu;
431
432 if (acpi_disabled)
433 return -ENODEV;
434
435 all_cpu_data = kcalloc(num_possible_cpus(), sizeof(void *),
436 GFP_KERNEL);
437 if (!all_cpu_data)
438 return -ENOMEM;
439
440 for_each_possible_cpu(i) {
441 all_cpu_data[i] = kzalloc(sizeof(struct cppc_cpudata), GFP_KERNEL);
442 if (!all_cpu_data[i])
443 goto out;
444
445 cpu = all_cpu_data[i];
446 if (!zalloc_cpumask_var(&cpu->shared_cpu_map, GFP_KERNEL))
447 goto out;
448 }
449
450 ret = acpi_get_psd_map(all_cpu_data);
451 if (ret) {
452 pr_debug("Error parsing PSD data. Aborting cpufreq registration.\n");
453 goto out;
454 }
455
456 cppc_check_hisi_workaround();
457
458 ret = cpufreq_register_driver(&cppc_cpufreq_driver);
459 if (ret)
460 goto out;
461
462 return ret;
463
464out:
465 for_each_possible_cpu(i) {
466 cpu = all_cpu_data[i];
467 if (!cpu)
468 break;
469 free_cpumask_var(cpu->shared_cpu_map);
470 kfree(cpu);
471 }
472
473 kfree(all_cpu_data);
474 return -ENODEV;
475}
476
477static void __exit cppc_cpufreq_exit(void)
478{
479 struct cppc_cpudata *cpu;
480 int i;
481
482 cpufreq_unregister_driver(&cppc_cpufreq_driver);
483
484 for_each_possible_cpu(i) {
485 cpu = all_cpu_data[i];
486 free_cpumask_var(cpu->shared_cpu_map);
487 kfree(cpu);
488 }
489
490 kfree(all_cpu_data);
491}
492
493module_exit(cppc_cpufreq_exit);
494MODULE_AUTHOR("Ashwin Chaugule");
495MODULE_DESCRIPTION("CPUFreq driver based on the ACPI CPPC v5.0+ spec");
496MODULE_LICENSE("GPL");
497
498late_initcall(cppc_cpufreq_init);
499
500static const struct acpi_device_id cppc_acpi_ids[] __used = {
501 {ACPI_PROCESSOR_DEVICE_HID, },
502 {}
503};
504
505MODULE_DEVICE_TABLE(acpi, cppc_acpi_ids);
506