1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/clk.h>
16#include <linux/cpu.h>
17#include <linux/cpu_cooling.h>
18#include <linux/cpufreq.h>
19#include <linux/cpumask.h>
20#include <linux/module.h>
21#include <linux/of.h>
22#include <linux/platform_device.h>
23#include <linux/pm_opp.h>
24#include <linux/regulator/consumer.h>
25#include <linux/slab.h>
26#include <linux/thermal.h>
27
28#define MIN_VOLT_SHIFT (100000)
29#define MAX_VOLT_SHIFT (200000)
30#define MAX_VOLT_LIMIT (1150000)
31#define VOLT_TOL (10000)
32
33
34
35
36
37
38
39
40
41
42
43
44struct mtk_cpu_dvfs_info {
45 struct cpumask cpus;
46 struct device *cpu_dev;
47 struct regulator *proc_reg;
48 struct regulator *sram_reg;
49 struct clk *cpu_clk;
50 struct clk *inter_clk;
51 struct thermal_cooling_device *cdev;
52 struct list_head list_head;
53 int intermediate_voltage;
54 bool need_voltage_tracking;
55};
56
57static LIST_HEAD(dvfs_info_list);
58
59static struct mtk_cpu_dvfs_info *mtk_cpu_dvfs_info_lookup(int cpu)
60{
61 struct mtk_cpu_dvfs_info *info;
62 struct list_head *list;
63
64 list_for_each(list, &dvfs_info_list) {
65 info = list_entry(list, struct mtk_cpu_dvfs_info, list_head);
66
67 if (cpumask_test_cpu(cpu, &info->cpus))
68 return info;
69 }
70
71 return NULL;
72}
73
74static int mtk_cpufreq_voltage_tracking(struct mtk_cpu_dvfs_info *info,
75 int new_vproc)
76{
77 struct regulator *proc_reg = info->proc_reg;
78 struct regulator *sram_reg = info->sram_reg;
79 int old_vproc, old_vsram, new_vsram, vsram, vproc, ret;
80
81 old_vproc = regulator_get_voltage(proc_reg);
82 if (old_vproc < 0) {
83 pr_err("%s: invalid Vproc value: %d\n", __func__, old_vproc);
84 return old_vproc;
85 }
86
87 new_vsram = min(new_vproc + MIN_VOLT_SHIFT, MAX_VOLT_LIMIT);
88
89 if (old_vproc < new_vproc) {
90
91
92
93
94
95
96 do {
97 old_vsram = regulator_get_voltage(sram_reg);
98 if (old_vsram < 0) {
99 pr_err("%s: invalid Vsram value: %d\n",
100 __func__, old_vsram);
101 return old_vsram;
102 }
103 old_vproc = regulator_get_voltage(proc_reg);
104 if (old_vproc < 0) {
105 pr_err("%s: invalid Vproc value: %d\n",
106 __func__, old_vproc);
107 return old_vproc;
108 }
109
110 vsram = min(new_vsram, old_vproc + MAX_VOLT_SHIFT);
111
112 if (vsram + VOLT_TOL >= MAX_VOLT_LIMIT) {
113 vsram = MAX_VOLT_LIMIT;
114
115
116
117
118
119 ret = regulator_set_voltage(sram_reg, vsram,
120 vsram);
121 if (ret)
122 ret = regulator_set_voltage(sram_reg,
123 vsram - VOLT_TOL,
124 vsram);
125
126 vproc = new_vproc;
127 } else {
128 ret = regulator_set_voltage(sram_reg, vsram,
129 vsram + VOLT_TOL);
130
131 vproc = vsram - MIN_VOLT_SHIFT;
132 }
133 if (ret)
134 return ret;
135
136 ret = regulator_set_voltage(proc_reg, vproc,
137 vproc + VOLT_TOL);
138 if (ret) {
139 regulator_set_voltage(sram_reg, old_vsram,
140 old_vsram);
141 return ret;
142 }
143 } while (vproc < new_vproc || vsram < new_vsram);
144 } else if (old_vproc > new_vproc) {
145
146
147
148
149
150
151 do {
152 old_vproc = regulator_get_voltage(proc_reg);
153 if (old_vproc < 0) {
154 pr_err("%s: invalid Vproc value: %d\n",
155 __func__, old_vproc);
156 return old_vproc;
157 }
158 old_vsram = regulator_get_voltage(sram_reg);
159 if (old_vsram < 0) {
160 pr_err("%s: invalid Vsram value: %d\n",
161 __func__, old_vsram);
162 return old_vsram;
163 }
164
165 vproc = max(new_vproc, old_vsram - MAX_VOLT_SHIFT);
166 ret = regulator_set_voltage(proc_reg, vproc,
167 vproc + VOLT_TOL);
168 if (ret)
169 return ret;
170
171 if (vproc == new_vproc)
172 vsram = new_vsram;
173 else
174 vsram = max(new_vsram, vproc + MIN_VOLT_SHIFT);
175
176 if (vsram + VOLT_TOL >= MAX_VOLT_LIMIT) {
177 vsram = MAX_VOLT_LIMIT;
178
179
180
181
182
183 ret = regulator_set_voltage(sram_reg, vsram,
184 vsram);
185 if (ret)
186 ret = regulator_set_voltage(sram_reg,
187 vsram - VOLT_TOL,
188 vsram);
189 } else {
190 ret = regulator_set_voltage(sram_reg, vsram,
191 vsram + VOLT_TOL);
192 }
193
194 if (ret) {
195 regulator_set_voltage(proc_reg, old_vproc,
196 old_vproc);
197 return ret;
198 }
199 } while (vproc > new_vproc + VOLT_TOL ||
200 vsram > new_vsram + VOLT_TOL);
201 }
202
203 return 0;
204}
205
206static int mtk_cpufreq_set_voltage(struct mtk_cpu_dvfs_info *info, int vproc)
207{
208 if (info->need_voltage_tracking)
209 return mtk_cpufreq_voltage_tracking(info, vproc);
210 else
211 return regulator_set_voltage(info->proc_reg, vproc,
212 vproc + VOLT_TOL);
213}
214
215static int mtk_cpufreq_set_target(struct cpufreq_policy *policy,
216 unsigned int index)
217{
218 struct cpufreq_frequency_table *freq_table = policy->freq_table;
219 struct clk *cpu_clk = policy->clk;
220 struct clk *armpll = clk_get_parent(cpu_clk);
221 struct mtk_cpu_dvfs_info *info = policy->driver_data;
222 struct device *cpu_dev = info->cpu_dev;
223 struct dev_pm_opp *opp;
224 long freq_hz, old_freq_hz;
225 int vproc, old_vproc, inter_vproc, target_vproc, ret;
226
227 inter_vproc = info->intermediate_voltage;
228
229 old_freq_hz = clk_get_rate(cpu_clk);
230 old_vproc = regulator_get_voltage(info->proc_reg);
231 if (old_vproc < 0) {
232 pr_err("%s: invalid Vproc value: %d\n", __func__, old_vproc);
233 return old_vproc;
234 }
235
236 freq_hz = freq_table[index].frequency * 1000;
237
238 rcu_read_lock();
239 opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz);
240 if (IS_ERR(opp)) {
241 rcu_read_unlock();
242 pr_err("cpu%d: failed to find OPP for %ld\n",
243 policy->cpu, freq_hz);
244 return PTR_ERR(opp);
245 }
246 vproc = dev_pm_opp_get_voltage(opp);
247 rcu_read_unlock();
248
249
250
251
252
253 target_vproc = (inter_vproc > vproc) ? inter_vproc : vproc;
254 if (old_vproc < target_vproc) {
255 ret = mtk_cpufreq_set_voltage(info, target_vproc);
256 if (ret) {
257 pr_err("cpu%d: failed to scale up voltage!\n",
258 policy->cpu);
259 mtk_cpufreq_set_voltage(info, old_vproc);
260 return ret;
261 }
262 }
263
264
265 ret = clk_set_parent(cpu_clk, info->inter_clk);
266 if (ret) {
267 pr_err("cpu%d: failed to re-parent cpu clock!\n",
268 policy->cpu);
269 mtk_cpufreq_set_voltage(info, old_vproc);
270 WARN_ON(1);
271 return ret;
272 }
273
274
275 ret = clk_set_rate(armpll, freq_hz);
276 if (ret) {
277 pr_err("cpu%d: failed to scale cpu clock rate!\n",
278 policy->cpu);
279 clk_set_parent(cpu_clk, armpll);
280 mtk_cpufreq_set_voltage(info, old_vproc);
281 return ret;
282 }
283
284
285 ret = clk_set_parent(cpu_clk, armpll);
286 if (ret) {
287 pr_err("cpu%d: failed to re-parent cpu clock!\n",
288 policy->cpu);
289 mtk_cpufreq_set_voltage(info, inter_vproc);
290 WARN_ON(1);
291 return ret;
292 }
293
294
295
296
297
298 if (vproc < inter_vproc || vproc < old_vproc) {
299 ret = mtk_cpufreq_set_voltage(info, vproc);
300 if (ret) {
301 pr_err("cpu%d: failed to scale down voltage!\n",
302 policy->cpu);
303 clk_set_parent(cpu_clk, info->inter_clk);
304 clk_set_rate(armpll, old_freq_hz);
305 clk_set_parent(cpu_clk, armpll);
306 return ret;
307 }
308 }
309
310 return 0;
311}
312
313static void mtk_cpufreq_ready(struct cpufreq_policy *policy)
314{
315 struct mtk_cpu_dvfs_info *info = policy->driver_data;
316 struct device_node *np = of_node_get(info->cpu_dev->of_node);
317
318 if (WARN_ON(!np))
319 return;
320
321 if (of_find_property(np, "#cooling-cells", NULL)) {
322 info->cdev = of_cpufreq_cooling_register(np,
323 policy->related_cpus);
324
325 if (IS_ERR(info->cdev)) {
326 dev_err(info->cpu_dev,
327 "running cpufreq without cooling device: %ld\n",
328 PTR_ERR(info->cdev));
329
330 info->cdev = NULL;
331 }
332 }
333
334 of_node_put(np);
335}
336
337static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
338{
339 struct device *cpu_dev;
340 struct regulator *proc_reg = ERR_PTR(-ENODEV);
341 struct regulator *sram_reg = ERR_PTR(-ENODEV);
342 struct clk *cpu_clk = ERR_PTR(-ENODEV);
343 struct clk *inter_clk = ERR_PTR(-ENODEV);
344 struct dev_pm_opp *opp;
345 unsigned long rate;
346 int ret;
347
348 cpu_dev = get_cpu_device(cpu);
349 if (!cpu_dev) {
350 pr_err("failed to get cpu%d device\n", cpu);
351 return -ENODEV;
352 }
353
354 cpu_clk = clk_get(cpu_dev, "cpu");
355 if (IS_ERR(cpu_clk)) {
356 if (PTR_ERR(cpu_clk) == -EPROBE_DEFER)
357 pr_warn("cpu clk for cpu%d not ready, retry.\n", cpu);
358 else
359 pr_err("failed to get cpu clk for cpu%d\n", cpu);
360
361 ret = PTR_ERR(cpu_clk);
362 return ret;
363 }
364
365 inter_clk = clk_get(cpu_dev, "intermediate");
366 if (IS_ERR(inter_clk)) {
367 if (PTR_ERR(inter_clk) == -EPROBE_DEFER)
368 pr_warn("intermediate clk for cpu%d not ready, retry.\n",
369 cpu);
370 else
371 pr_err("failed to get intermediate clk for cpu%d\n",
372 cpu);
373
374 ret = PTR_ERR(inter_clk);
375 goto out_free_resources;
376 }
377
378 proc_reg = regulator_get_exclusive(cpu_dev, "proc");
379 if (IS_ERR(proc_reg)) {
380 if (PTR_ERR(proc_reg) == -EPROBE_DEFER)
381 pr_warn("proc regulator for cpu%d not ready, retry.\n",
382 cpu);
383 else
384 pr_err("failed to get proc regulator for cpu%d\n",
385 cpu);
386
387 ret = PTR_ERR(proc_reg);
388 goto out_free_resources;
389 }
390
391
392 sram_reg = regulator_get_exclusive(cpu_dev, "sram");
393
394
395 ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, &info->cpus);
396 if (ret) {
397 pr_err("failed to get OPP-sharing information for cpu%d\n",
398 cpu);
399 goto out_free_resources;
400 }
401
402 ret = dev_pm_opp_of_cpumask_add_table(&info->cpus);
403 if (ret) {
404 pr_warn("no OPP table for cpu%d\n", cpu);
405 goto out_free_resources;
406 }
407
408
409 rate = clk_get_rate(inter_clk);
410 rcu_read_lock();
411 opp = dev_pm_opp_find_freq_ceil(cpu_dev, &rate);
412 if (IS_ERR(opp)) {
413 rcu_read_unlock();
414 pr_err("failed to get intermediate opp for cpu%d\n", cpu);
415 ret = PTR_ERR(opp);
416 goto out_free_opp_table;
417 }
418 info->intermediate_voltage = dev_pm_opp_get_voltage(opp);
419 rcu_read_unlock();
420
421 info->cpu_dev = cpu_dev;
422 info->proc_reg = proc_reg;
423 info->sram_reg = IS_ERR(sram_reg) ? NULL : sram_reg;
424 info->cpu_clk = cpu_clk;
425 info->inter_clk = inter_clk;
426
427
428
429
430
431 info->need_voltage_tracking = !IS_ERR(sram_reg);
432
433 return 0;
434
435out_free_opp_table:
436 dev_pm_opp_of_cpumask_remove_table(&info->cpus);
437
438out_free_resources:
439 if (!IS_ERR(proc_reg))
440 regulator_put(proc_reg);
441 if (!IS_ERR(sram_reg))
442 regulator_put(sram_reg);
443 if (!IS_ERR(cpu_clk))
444 clk_put(cpu_clk);
445 if (!IS_ERR(inter_clk))
446 clk_put(inter_clk);
447
448 return ret;
449}
450
451static void mtk_cpu_dvfs_info_release(struct mtk_cpu_dvfs_info *info)
452{
453 if (!IS_ERR(info->proc_reg))
454 regulator_put(info->proc_reg);
455 if (!IS_ERR(info->sram_reg))
456 regulator_put(info->sram_reg);
457 if (!IS_ERR(info->cpu_clk))
458 clk_put(info->cpu_clk);
459 if (!IS_ERR(info->inter_clk))
460 clk_put(info->inter_clk);
461
462 dev_pm_opp_of_cpumask_remove_table(&info->cpus);
463}
464
465static int mtk_cpufreq_init(struct cpufreq_policy *policy)
466{
467 struct mtk_cpu_dvfs_info *info;
468 struct cpufreq_frequency_table *freq_table;
469 int ret;
470
471 info = mtk_cpu_dvfs_info_lookup(policy->cpu);
472 if (!info) {
473 pr_err("dvfs info for cpu%d is not initialized.\n",
474 policy->cpu);
475 return -EINVAL;
476 }
477
478 ret = dev_pm_opp_init_cpufreq_table(info->cpu_dev, &freq_table);
479 if (ret) {
480 pr_err("failed to init cpufreq table for cpu%d: %d\n",
481 policy->cpu, ret);
482 return ret;
483 }
484
485 ret = cpufreq_table_validate_and_show(policy, freq_table);
486 if (ret) {
487 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
488 goto out_free_cpufreq_table;
489 }
490
491 cpumask_copy(policy->cpus, &info->cpus);
492 policy->driver_data = info;
493 policy->clk = info->cpu_clk;
494
495 return 0;
496
497out_free_cpufreq_table:
498 dev_pm_opp_free_cpufreq_table(info->cpu_dev, &freq_table);
499 return ret;
500}
501
502static int mtk_cpufreq_exit(struct cpufreq_policy *policy)
503{
504 struct mtk_cpu_dvfs_info *info = policy->driver_data;
505
506 cpufreq_cooling_unregister(info->cdev);
507 dev_pm_opp_free_cpufreq_table(info->cpu_dev, &policy->freq_table);
508
509 return 0;
510}
511
512static struct cpufreq_driver mt8173_cpufreq_driver = {
513 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
514 CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
515 .verify = cpufreq_generic_frequency_table_verify,
516 .target_index = mtk_cpufreq_set_target,
517 .get = cpufreq_generic_get,
518 .init = mtk_cpufreq_init,
519 .exit = mtk_cpufreq_exit,
520 .ready = mtk_cpufreq_ready,
521 .name = "mtk-cpufreq",
522 .attr = cpufreq_generic_attr,
523};
524
525static int mt8173_cpufreq_probe(struct platform_device *pdev)
526{
527 struct mtk_cpu_dvfs_info *info;
528 struct list_head *list, *tmp;
529 int cpu, ret;
530
531 for_each_possible_cpu(cpu) {
532 info = mtk_cpu_dvfs_info_lookup(cpu);
533 if (info)
534 continue;
535
536 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
537 if (!info) {
538 ret = -ENOMEM;
539 goto release_dvfs_info_list;
540 }
541
542 ret = mtk_cpu_dvfs_info_init(info, cpu);
543 if (ret) {
544 dev_err(&pdev->dev,
545 "failed to initialize dvfs info for cpu%d\n",
546 cpu);
547 goto release_dvfs_info_list;
548 }
549
550 list_add(&info->list_head, &dvfs_info_list);
551 }
552
553 ret = cpufreq_register_driver(&mt8173_cpufreq_driver);
554 if (ret) {
555 dev_err(&pdev->dev, "failed to register mtk cpufreq driver\n");
556 goto release_dvfs_info_list;
557 }
558
559 return 0;
560
561release_dvfs_info_list:
562 list_for_each_safe(list, tmp, &dvfs_info_list) {
563 info = list_entry(list, struct mtk_cpu_dvfs_info, list_head);
564
565 mtk_cpu_dvfs_info_release(info);
566 list_del(list);
567 }
568
569 return ret;
570}
571
572static struct platform_driver mt8173_cpufreq_platdrv = {
573 .driver = {
574 .name = "mt8173-cpufreq",
575 },
576 .probe = mt8173_cpufreq_probe,
577};
578
579static int mt8173_cpufreq_driver_init(void)
580{
581 struct platform_device *pdev;
582 int err;
583
584 if (!of_machine_is_compatible("mediatek,mt8173"))
585 return -ENODEV;
586
587 err = platform_driver_register(&mt8173_cpufreq_platdrv);
588 if (err)
589 return err;
590
591
592
593
594
595
596
597 pdev = platform_device_register_simple("mt8173-cpufreq", -1, NULL, 0);
598 if (IS_ERR(pdev)) {
599 pr_err("failed to register mtk-cpufreq platform device\n");
600 return PTR_ERR(pdev);
601 }
602
603 return 0;
604}
605device_initcall(mt8173_cpufreq_driver_init);
606