1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/clk.h>
15#include <linux/cpu.h>
16#include <linux/cpufreq.h>
17#include <linux/err.h>
18#include <linux/interrupt.h>
19#include <linux/io.h>
20#include <linux/module.h>
21#include <linux/of_address.h>
22#include <linux/of_irq.h>
23#include <linux/pm_opp.h>
24#include <linux/platform_device.h>
25#include <linux/slab.h>
26
27
28#define XMU_DVFS_CTRL 0x0060
29#define XMU_PMU_P0_7 0x0064
30#define XMU_C0_3_PSTATE 0x0090
31#define XMU_P_LIMIT 0x00a0
32#define XMU_P_STATUS 0x00a4
33#define XMU_PMUEVTEN 0x00d0
34#define XMU_PMUIRQEN 0x00d4
35#define XMU_PMUIRQ 0x00d8
36
37
38#define P_VALUE_MASK 0x7
39
40#define XMU_DVFS_CTRL_EN_SHIFT 0
41
42#define P0_7_CPUCLKDEV_SHIFT 21
43#define P0_7_CPUCLKDEV_MASK 0x7
44#define P0_7_ATBCLKDEV_SHIFT 18
45#define P0_7_ATBCLKDEV_MASK 0x7
46#define P0_7_CSCLKDEV_SHIFT 15
47#define P0_7_CSCLKDEV_MASK 0x7
48#define P0_7_CPUEMA_SHIFT 28
49#define P0_7_CPUEMA_MASK 0xf
50#define P0_7_L2EMA_SHIFT 24
51#define P0_7_L2EMA_MASK 0xf
52#define P0_7_VDD_SHIFT 8
53#define P0_7_VDD_MASK 0x7f
54#define P0_7_FREQ_SHIFT 0
55#define P0_7_FREQ_MASK 0xff
56
57#define C0_3_PSTATE_VALID_SHIFT 8
58#define C0_3_PSTATE_CURR_SHIFT 4
59#define C0_3_PSTATE_NEW_SHIFT 0
60
61#define PSTATE_CHANGED_EVTEN_SHIFT 0
62
63#define PSTATE_CHANGED_IRQEN_SHIFT 0
64
65#define PSTATE_CHANGED_SHIFT 0
66
67
68#define CPU_DIV_FREQ_MAX 500
69#define CPU_DBG_FREQ_MAX 375
70#define CPU_ATB_FREQ_MAX 500
71
72#define PMIC_LOW_VOLT 0x30
73#define PMIC_HIGH_VOLT 0x28
74
75#define CPUEMA_HIGH 0x2
76#define CPUEMA_MID 0x4
77#define CPUEMA_LOW 0x7
78
79#define L2EMA_HIGH 0x1
80#define L2EMA_MID 0x3
81#define L2EMA_LOW 0x4
82
83#define DIV_TAB_MAX 2
84
85#define FREQ_UNIT 20
86#define MAX_VOLTAGE 1550000
87#define VOLTAGE_STEP 12500
88
89#define CPUFREQ_NAME "exynos5440_dvfs"
90#define DEF_TRANS_LATENCY 100000
91
92enum cpufreq_level_index {
93 L0, L1, L2, L3, L4,
94 L5, L6, L7, L8, L9,
95};
96#define CPUFREQ_LEVEL_END (L7 + 1)
97
98struct exynos_dvfs_data {
99 void __iomem *base;
100 struct resource *mem;
101 int irq;
102 struct clk *cpu_clk;
103 unsigned int latency;
104 struct cpufreq_frequency_table *freq_table;
105 unsigned int freq_count;
106 struct device *dev;
107 bool dvfs_enabled;
108 struct work_struct irq_work;
109};
110
111static struct exynos_dvfs_data *dvfs_info;
112static DEFINE_MUTEX(cpufreq_lock);
113static struct cpufreq_freqs freqs;
114
115static int init_div_table(void)
116{
117 struct cpufreq_frequency_table *pos, *freq_tbl = dvfs_info->freq_table;
118 unsigned int tmp, clk_div, ema_div, freq, volt_id;
119 struct dev_pm_opp *opp;
120
121 cpufreq_for_each_entry(pos, freq_tbl) {
122 opp = dev_pm_opp_find_freq_exact(dvfs_info->dev,
123 pos->frequency * 1000, true);
124 if (IS_ERR(opp)) {
125 dev_err(dvfs_info->dev,
126 "failed to find valid OPP for %u KHZ\n",
127 pos->frequency);
128 return PTR_ERR(opp);
129 }
130
131 freq = pos->frequency / 1000;
132 clk_div = ((freq / CPU_DIV_FREQ_MAX) & P0_7_CPUCLKDEV_MASK)
133 << P0_7_CPUCLKDEV_SHIFT;
134 clk_div |= ((freq / CPU_ATB_FREQ_MAX) & P0_7_ATBCLKDEV_MASK)
135 << P0_7_ATBCLKDEV_SHIFT;
136 clk_div |= ((freq / CPU_DBG_FREQ_MAX) & P0_7_CSCLKDEV_MASK)
137 << P0_7_CSCLKDEV_SHIFT;
138
139
140 volt_id = dev_pm_opp_get_voltage(opp);
141
142 volt_id = (MAX_VOLTAGE - volt_id) / VOLTAGE_STEP;
143 if (volt_id < PMIC_HIGH_VOLT) {
144 ema_div = (CPUEMA_HIGH << P0_7_CPUEMA_SHIFT) |
145 (L2EMA_HIGH << P0_7_L2EMA_SHIFT);
146 } else if (volt_id > PMIC_LOW_VOLT) {
147 ema_div = (CPUEMA_LOW << P0_7_CPUEMA_SHIFT) |
148 (L2EMA_LOW << P0_7_L2EMA_SHIFT);
149 } else {
150 ema_div = (CPUEMA_MID << P0_7_CPUEMA_SHIFT) |
151 (L2EMA_MID << P0_7_L2EMA_SHIFT);
152 }
153
154 tmp = (clk_div | ema_div | (volt_id << P0_7_VDD_SHIFT)
155 | ((freq / FREQ_UNIT) << P0_7_FREQ_SHIFT));
156
157 __raw_writel(tmp, dvfs_info->base + XMU_PMU_P0_7 + 4 *
158 (pos - freq_tbl));
159 dev_pm_opp_put(opp);
160 }
161
162 return 0;
163}
164
165static void exynos_enable_dvfs(unsigned int cur_frequency)
166{
167 unsigned int tmp, cpu;
168 struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
169 struct cpufreq_frequency_table *pos;
170
171 __raw_writel(0, dvfs_info->base + XMU_DVFS_CTRL);
172
173
174 tmp = __raw_readl(dvfs_info->base + XMU_PMUEVTEN);
175 tmp |= (1 << PSTATE_CHANGED_EVTEN_SHIFT);
176 __raw_writel(tmp, dvfs_info->base + XMU_PMUEVTEN);
177
178
179 tmp = __raw_readl(dvfs_info->base + XMU_PMUIRQEN);
180 tmp |= (1 << PSTATE_CHANGED_IRQEN_SHIFT);
181 __raw_writel(tmp, dvfs_info->base + XMU_PMUIRQEN);
182
183
184 cpufreq_for_each_entry(pos, freq_table)
185 if (pos->frequency == cur_frequency)
186 break;
187
188 if (pos->frequency == CPUFREQ_TABLE_END) {
189 dev_crit(dvfs_info->dev, "Boot up frequency not supported\n");
190
191 pos = freq_table;
192 cur_frequency = pos->frequency;
193 }
194
195 dev_info(dvfs_info->dev, "Setting dvfs initial frequency = %uKHZ",
196 cur_frequency);
197
198 for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++) {
199 tmp = __raw_readl(dvfs_info->base + XMU_C0_3_PSTATE + cpu * 4);
200 tmp &= ~(P_VALUE_MASK << C0_3_PSTATE_NEW_SHIFT);
201 tmp |= ((pos - freq_table) << C0_3_PSTATE_NEW_SHIFT);
202 __raw_writel(tmp, dvfs_info->base + XMU_C0_3_PSTATE + cpu * 4);
203 }
204
205
206 __raw_writel(1 << XMU_DVFS_CTRL_EN_SHIFT,
207 dvfs_info->base + XMU_DVFS_CTRL);
208}
209
210static int exynos_target(struct cpufreq_policy *policy, unsigned int index)
211{
212 unsigned int tmp;
213 int i;
214 struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
215
216 mutex_lock(&cpufreq_lock);
217
218 freqs.old = policy->cur;
219 freqs.new = freq_table[index].frequency;
220
221 cpufreq_freq_transition_begin(policy, &freqs);
222
223
224 for_each_cpu(i, policy->cpus) {
225 tmp = __raw_readl(dvfs_info->base + XMU_C0_3_PSTATE + i * 4);
226 tmp &= ~(P_VALUE_MASK << C0_3_PSTATE_NEW_SHIFT);
227 tmp |= (index << C0_3_PSTATE_NEW_SHIFT);
228
229 __raw_writel(tmp, dvfs_info->base + XMU_C0_3_PSTATE + i * 4);
230 }
231 mutex_unlock(&cpufreq_lock);
232 return 0;
233}
234
235static void exynos_cpufreq_work(struct work_struct *work)
236{
237 unsigned int cur_pstate, index;
238 struct cpufreq_policy *policy = cpufreq_cpu_get(0);
239 struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
240
241
242 if (unlikely(dvfs_info->dvfs_enabled == false))
243 goto skip_work;
244
245 mutex_lock(&cpufreq_lock);
246 freqs.old = policy->cur;
247
248 cur_pstate = __raw_readl(dvfs_info->base + XMU_P_STATUS);
249 if (cur_pstate >> C0_3_PSTATE_VALID_SHIFT & 0x1)
250 index = (cur_pstate >> C0_3_PSTATE_CURR_SHIFT) & P_VALUE_MASK;
251 else
252 index = (cur_pstate >> C0_3_PSTATE_NEW_SHIFT) & P_VALUE_MASK;
253
254 if (likely(index < dvfs_info->freq_count)) {
255 freqs.new = freq_table[index].frequency;
256 } else {
257 dev_crit(dvfs_info->dev, "New frequency out of range\n");
258 freqs.new = freqs.old;
259 }
260 cpufreq_freq_transition_end(policy, &freqs, 0);
261
262 cpufreq_cpu_put(policy);
263 mutex_unlock(&cpufreq_lock);
264skip_work:
265 enable_irq(dvfs_info->irq);
266}
267
268static irqreturn_t exynos_cpufreq_irq(int irq, void *id)
269{
270 unsigned int tmp;
271
272 tmp = __raw_readl(dvfs_info->base + XMU_PMUIRQ);
273 if (tmp >> PSTATE_CHANGED_SHIFT & 0x1) {
274 __raw_writel(tmp, dvfs_info->base + XMU_PMUIRQ);
275 disable_irq_nosync(irq);
276 schedule_work(&dvfs_info->irq_work);
277 }
278 return IRQ_HANDLED;
279}
280
281static void exynos_sort_descend_freq_table(void)
282{
283 struct cpufreq_frequency_table *freq_tbl = dvfs_info->freq_table;
284 int i = 0, index;
285 unsigned int tmp_freq;
286
287
288
289
290
291
292 for (i = 0; i < dvfs_info->freq_count / 2; i++) {
293 index = dvfs_info->freq_count - i - 1;
294 tmp_freq = freq_tbl[i].frequency;
295 freq_tbl[i].frequency = freq_tbl[index].frequency;
296 freq_tbl[index].frequency = tmp_freq;
297 }
298}
299
300static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
301{
302 policy->clk = dvfs_info->cpu_clk;
303 return cpufreq_generic_init(policy, dvfs_info->freq_table,
304 dvfs_info->latency);
305}
306
307static struct cpufreq_driver exynos_driver = {
308 .flags = CPUFREQ_STICKY | CPUFREQ_ASYNC_NOTIFICATION |
309 CPUFREQ_NEED_INITIAL_FREQ_CHECK,
310 .verify = cpufreq_generic_frequency_table_verify,
311 .target_index = exynos_target,
312 .get = cpufreq_generic_get,
313 .init = exynos_cpufreq_cpu_init,
314 .name = CPUFREQ_NAME,
315 .attr = cpufreq_generic_attr,
316};
317
318static const struct of_device_id exynos_cpufreq_match[] = {
319 {
320 .compatible = "samsung,exynos5440-cpufreq",
321 },
322 {},
323};
324MODULE_DEVICE_TABLE(of, exynos_cpufreq_match);
325
326static int exynos_cpufreq_probe(struct platform_device *pdev)
327{
328 int ret = -EINVAL;
329 struct device_node *np;
330 struct resource res;
331 unsigned int cur_frequency;
332
333 np = pdev->dev.of_node;
334 if (!np)
335 return -ENODEV;
336
337 dvfs_info = devm_kzalloc(&pdev->dev, sizeof(*dvfs_info), GFP_KERNEL);
338 if (!dvfs_info) {
339 ret = -ENOMEM;
340 goto err_put_node;
341 }
342
343 dvfs_info->dev = &pdev->dev;
344
345 ret = of_address_to_resource(np, 0, &res);
346 if (ret)
347 goto err_put_node;
348
349 dvfs_info->base = devm_ioremap_resource(dvfs_info->dev, &res);
350 if (IS_ERR(dvfs_info->base)) {
351 ret = PTR_ERR(dvfs_info->base);
352 goto err_put_node;
353 }
354
355 dvfs_info->irq = irq_of_parse_and_map(np, 0);
356 if (!dvfs_info->irq) {
357 dev_err(dvfs_info->dev, "No cpufreq irq found\n");
358 ret = -ENODEV;
359 goto err_put_node;
360 }
361
362 ret = dev_pm_opp_of_add_table(dvfs_info->dev);
363 if (ret) {
364 dev_err(dvfs_info->dev, "failed to init OPP table: %d\n", ret);
365 goto err_put_node;
366 }
367
368 ret = dev_pm_opp_init_cpufreq_table(dvfs_info->dev,
369 &dvfs_info->freq_table);
370 if (ret) {
371 dev_err(dvfs_info->dev,
372 "failed to init cpufreq table: %d\n", ret);
373 goto err_free_opp;
374 }
375 dvfs_info->freq_count = dev_pm_opp_get_opp_count(dvfs_info->dev);
376 exynos_sort_descend_freq_table();
377
378 if (of_property_read_u32(np, "clock-latency", &dvfs_info->latency))
379 dvfs_info->latency = DEF_TRANS_LATENCY;
380
381 dvfs_info->cpu_clk = devm_clk_get(dvfs_info->dev, "armclk");
382 if (IS_ERR(dvfs_info->cpu_clk)) {
383 dev_err(dvfs_info->dev, "Failed to get cpu clock\n");
384 ret = PTR_ERR(dvfs_info->cpu_clk);
385 goto err_free_table;
386 }
387
388 cur_frequency = clk_get_rate(dvfs_info->cpu_clk);
389 if (!cur_frequency) {
390 dev_err(dvfs_info->dev, "Failed to get clock rate\n");
391 ret = -EINVAL;
392 goto err_free_table;
393 }
394 cur_frequency /= 1000;
395
396 INIT_WORK(&dvfs_info->irq_work, exynos_cpufreq_work);
397 ret = devm_request_irq(dvfs_info->dev, dvfs_info->irq,
398 exynos_cpufreq_irq, IRQF_TRIGGER_NONE,
399 CPUFREQ_NAME, dvfs_info);
400 if (ret) {
401 dev_err(dvfs_info->dev, "Failed to register IRQ\n");
402 goto err_free_table;
403 }
404
405 ret = init_div_table();
406 if (ret) {
407 dev_err(dvfs_info->dev, "Failed to initialise div table\n");
408 goto err_free_table;
409 }
410
411 exynos_enable_dvfs(cur_frequency);
412 ret = cpufreq_register_driver(&exynos_driver);
413 if (ret) {
414 dev_err(dvfs_info->dev,
415 "%s: failed to register cpufreq driver\n", __func__);
416 goto err_free_table;
417 }
418
419 of_node_put(np);
420 dvfs_info->dvfs_enabled = true;
421 return 0;
422
423err_free_table:
424 dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
425err_free_opp:
426 dev_pm_opp_of_remove_table(dvfs_info->dev);
427err_put_node:
428 of_node_put(np);
429 dev_err(&pdev->dev, "%s: failed initialization\n", __func__);
430 return ret;
431}
432
433static int exynos_cpufreq_remove(struct platform_device *pdev)
434{
435 cpufreq_unregister_driver(&exynos_driver);
436 dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
437 dev_pm_opp_of_remove_table(dvfs_info->dev);
438 return 0;
439}
440
441static struct platform_driver exynos_cpufreq_platdrv = {
442 .driver = {
443 .name = "exynos5440-cpufreq",
444 .of_match_table = exynos_cpufreq_match,
445 },
446 .probe = exynos_cpufreq_probe,
447 .remove = exynos_cpufreq_remove,
448};
449module_platform_driver(exynos_cpufreq_platdrv);
450
451MODULE_AUTHOR("Amit Daniel Kachhap <amit.daniel@samsung.com>");
452MODULE_DESCRIPTION("Exynos5440 cpufreq driver");
453MODULE_LICENSE("GPL");
454