1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#include <linux/errno.h>
36#include <linux/slab.h>
37#include <linux/clk.h>
38#include <linux/clk-provider.h>
39#include "clk-cpu.h"
40
41#define E4210_SRC_CPU 0x0
42#define E4210_STAT_CPU 0x200
43#define E4210_DIV_CPU0 0x300
44#define E4210_DIV_CPU1 0x304
45#define E4210_DIV_STAT_CPU0 0x400
46#define E4210_DIV_STAT_CPU1 0x404
47
48#define E5433_MUX_SEL2 0x008
49#define E5433_MUX_STAT2 0x208
50#define E5433_DIV_CPU0 0x400
51#define E5433_DIV_CPU1 0x404
52#define E5433_DIV_STAT_CPU0 0x500
53#define E5433_DIV_STAT_CPU1 0x504
54
55#define E4210_DIV0_RATIO0_MASK 0x7
56#define E4210_DIV1_HPM_MASK (0x7 << 4)
57#define E4210_DIV1_COPY_MASK (0x7 << 0)
58#define E4210_MUX_HPM_MASK (1 << 20)
59#define E4210_DIV0_ATB_SHIFT 16
60#define E4210_DIV0_ATB_MASK (DIV_MASK << E4210_DIV0_ATB_SHIFT)
61
62#define MAX_DIV 8
63#define DIV_MASK 7
64#define DIV_MASK_ALL 0xffffffff
65#define MUX_MASK 7
66
67
68
69
70
71static void wait_until_divider_stable(void __iomem *div_reg, unsigned long mask)
72{
73 unsigned long timeout = jiffies + msecs_to_jiffies(10);
74
75 do {
76 if (!(readl(div_reg) & mask))
77 return;
78 } while (time_before(jiffies, timeout));
79
80 if (!(readl(div_reg) & mask))
81 return;
82
83 pr_err("%s: timeout in divider stablization\n", __func__);
84}
85
86
87
88
89
90static void wait_until_mux_stable(void __iomem *mux_reg, u32 mux_pos,
91 unsigned long mux_value)
92{
93 unsigned long timeout = jiffies + msecs_to_jiffies(10);
94
95 do {
96 if (((readl(mux_reg) >> mux_pos) & MUX_MASK) == mux_value)
97 return;
98 } while (time_before(jiffies, timeout));
99
100 if (((readl(mux_reg) >> mux_pos) & MUX_MASK) == mux_value)
101 return;
102
103 pr_err("%s: re-parenting mux timed-out\n", __func__);
104}
105
106
107static long exynos_cpuclk_round_rate(struct clk_hw *hw,
108 unsigned long drate, unsigned long *prate)
109{
110 struct clk_hw *parent = clk_hw_get_parent(hw);
111 *prate = clk_hw_round_rate(parent, drate);
112 return *prate;
113}
114
115
116static unsigned long exynos_cpuclk_recalc_rate(struct clk_hw *hw,
117 unsigned long parent_rate)
118{
119
120
121
122
123
124
125
126 return parent_rate;
127}
128
129static const struct clk_ops exynos_cpuclk_clk_ops = {
130 .recalc_rate = exynos_cpuclk_recalc_rate,
131 .round_rate = exynos_cpuclk_round_rate,
132};
133
134
135
136
137
138
139static void exynos_set_safe_div(void __iomem *base, unsigned long div,
140 unsigned long mask)
141{
142 unsigned long div0;
143
144 div0 = readl(base + E4210_DIV_CPU0);
145 div0 = (div0 & ~mask) | (div & mask);
146 writel(div0, base + E4210_DIV_CPU0);
147 wait_until_divider_stable(base + E4210_DIV_STAT_CPU0, mask);
148}
149
150
151static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
152 struct exynos_cpuclk *cpuclk, void __iomem *base)
153{
154 const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
155 unsigned long alt_prate = clk_get_rate(cpuclk->alt_parent);
156 unsigned long alt_div = 0, alt_div_mask = DIV_MASK;
157 unsigned long div0, div1 = 0, mux_reg;
158 unsigned long flags;
159
160
161 while ((cfg_data->prate * 1000) != ndata->new_rate) {
162 if (cfg_data->prate == 0)
163 return -EINVAL;
164 cfg_data++;
165 }
166
167 spin_lock_irqsave(cpuclk->lock, flags);
168
169
170
171
172
173
174 div0 = cfg_data->div0;
175 if (cpuclk->flags & CLK_CPU_HAS_DIV1) {
176 div1 = cfg_data->div1;
177 if (readl(base + E4210_SRC_CPU) & E4210_MUX_HPM_MASK)
178 div1 = readl(base + E4210_DIV_CPU1) &
179 (E4210_DIV1_HPM_MASK | E4210_DIV1_COPY_MASK);
180 }
181
182
183
184
185
186
187
188
189
190 if (alt_prate > ndata->old_rate || ndata->old_rate > ndata->new_rate) {
191 unsigned long tmp_rate = min(ndata->old_rate, ndata->new_rate);
192
193 alt_div = DIV_ROUND_UP(alt_prate, tmp_rate) - 1;
194 WARN_ON(alt_div >= MAX_DIV);
195
196 if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
197
198
199
200
201 alt_div |= E4210_DIV0_ATB_MASK;
202 alt_div_mask |= E4210_DIV0_ATB_MASK;
203 }
204 exynos_set_safe_div(base, alt_div, alt_div_mask);
205 div0 |= alt_div;
206 }
207
208
209 mux_reg = readl(base + E4210_SRC_CPU);
210 writel(mux_reg | (1 << 16), base + E4210_SRC_CPU);
211 wait_until_mux_stable(base + E4210_STAT_CPU, 16, 2);
212
213
214 writel(div0, base + E4210_DIV_CPU0);
215 wait_until_divider_stable(base + E4210_DIV_STAT_CPU0, DIV_MASK_ALL);
216
217 if (cpuclk->flags & CLK_CPU_HAS_DIV1) {
218 writel(div1, base + E4210_DIV_CPU1);
219 wait_until_divider_stable(base + E4210_DIV_STAT_CPU1,
220 DIV_MASK_ALL);
221 }
222
223 spin_unlock_irqrestore(cpuclk->lock, flags);
224 return 0;
225}
226
227
228static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
229 struct exynos_cpuclk *cpuclk, void __iomem *base)
230{
231 const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
232 unsigned long div = 0, div_mask = DIV_MASK;
233 unsigned long mux_reg;
234 unsigned long flags;
235
236
237 if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
238 while ((cfg_data->prate * 1000) != ndata->new_rate) {
239 if (cfg_data->prate == 0)
240 return -EINVAL;
241 cfg_data++;
242 }
243 }
244
245 spin_lock_irqsave(cpuclk->lock, flags);
246
247
248 mux_reg = readl(base + E4210_SRC_CPU);
249 writel(mux_reg & ~(1 << 16), base + E4210_SRC_CPU);
250 wait_until_mux_stable(base + E4210_STAT_CPU, 16, 1);
251
252 if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
253 div |= (cfg_data->div0 & E4210_DIV0_ATB_MASK);
254 div_mask |= E4210_DIV0_ATB_MASK;
255 }
256
257 exynos_set_safe_div(base, div, div_mask);
258 spin_unlock_irqrestore(cpuclk->lock, flags);
259 return 0;
260}
261
262
263
264
265
266
267static void exynos5433_set_safe_div(void __iomem *base, unsigned long div,
268 unsigned long mask)
269{
270 unsigned long div0;
271
272 div0 = readl(base + E5433_DIV_CPU0);
273 div0 = (div0 & ~mask) | (div & mask);
274 writel(div0, base + E5433_DIV_CPU0);
275 wait_until_divider_stable(base + E5433_DIV_STAT_CPU0, mask);
276}
277
278
279static int exynos5433_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
280 struct exynos_cpuclk *cpuclk, void __iomem *base)
281{
282 const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
283 unsigned long alt_prate = clk_get_rate(cpuclk->alt_parent);
284 unsigned long alt_div = 0, alt_div_mask = DIV_MASK;
285 unsigned long div0, div1 = 0, mux_reg;
286 unsigned long flags;
287
288
289 while ((cfg_data->prate * 1000) != ndata->new_rate) {
290 if (cfg_data->prate == 0)
291 return -EINVAL;
292 cfg_data++;
293 }
294
295 spin_lock_irqsave(cpuclk->lock, flags);
296
297
298
299
300
301 div0 = cfg_data->div0;
302 div1 = cfg_data->div1;
303
304
305
306
307
308
309
310
311
312 if (alt_prate > ndata->old_rate || ndata->old_rate > ndata->new_rate) {
313 unsigned long tmp_rate = min(ndata->old_rate, ndata->new_rate);
314
315 alt_div = DIV_ROUND_UP(alt_prate, tmp_rate) - 1;
316 WARN_ON(alt_div >= MAX_DIV);
317
318 exynos5433_set_safe_div(base, alt_div, alt_div_mask);
319 div0 |= alt_div;
320 }
321
322
323 mux_reg = readl(base + E5433_MUX_SEL2);
324 writel(mux_reg | 1, base + E5433_MUX_SEL2);
325 wait_until_mux_stable(base + E5433_MUX_STAT2, 0, 2);
326
327
328 writel(div0, base + E5433_DIV_CPU0);
329 wait_until_divider_stable(base + E5433_DIV_STAT_CPU0, DIV_MASK_ALL);
330
331 writel(div1, base + E5433_DIV_CPU1);
332 wait_until_divider_stable(base + E5433_DIV_STAT_CPU1, DIV_MASK_ALL);
333
334 spin_unlock_irqrestore(cpuclk->lock, flags);
335 return 0;
336}
337
338
339static int exynos5433_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
340 struct exynos_cpuclk *cpuclk, void __iomem *base)
341{
342 unsigned long div = 0, div_mask = DIV_MASK;
343 unsigned long mux_reg;
344 unsigned long flags;
345
346 spin_lock_irqsave(cpuclk->lock, flags);
347
348
349 mux_reg = readl(base + E5433_MUX_SEL2);
350 writel(mux_reg & ~1, base + E5433_MUX_SEL2);
351 wait_until_mux_stable(base + E5433_MUX_STAT2, 0, 1);
352
353 exynos5433_set_safe_div(base, div, div_mask);
354 spin_unlock_irqrestore(cpuclk->lock, flags);
355 return 0;
356}
357
358
359
360
361
362static int exynos_cpuclk_notifier_cb(struct notifier_block *nb,
363 unsigned long event, void *data)
364{
365 struct clk_notifier_data *ndata = data;
366 struct exynos_cpuclk *cpuclk;
367 void __iomem *base;
368 int err = 0;
369
370 cpuclk = container_of(nb, struct exynos_cpuclk, clk_nb);
371 base = cpuclk->ctrl_base;
372
373 if (event == PRE_RATE_CHANGE)
374 err = exynos_cpuclk_pre_rate_change(ndata, cpuclk, base);
375 else if (event == POST_RATE_CHANGE)
376 err = exynos_cpuclk_post_rate_change(ndata, cpuclk, base);
377
378 return notifier_from_errno(err);
379}
380
381
382
383
384
385static int exynos5433_cpuclk_notifier_cb(struct notifier_block *nb,
386 unsigned long event, void *data)
387{
388 struct clk_notifier_data *ndata = data;
389 struct exynos_cpuclk *cpuclk;
390 void __iomem *base;
391 int err = 0;
392
393 cpuclk = container_of(nb, struct exynos_cpuclk, clk_nb);
394 base = cpuclk->ctrl_base;
395
396 if (event == PRE_RATE_CHANGE)
397 err = exynos5433_cpuclk_pre_rate_change(ndata, cpuclk, base);
398 else if (event == POST_RATE_CHANGE)
399 err = exynos5433_cpuclk_post_rate_change(ndata, cpuclk, base);
400
401 return notifier_from_errno(err);
402}
403
404
405int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx,
406 unsigned int lookup_id, const char *name, const char *parent,
407 const char *alt_parent, unsigned long offset,
408 const struct exynos_cpuclk_cfg_data *cfg,
409 unsigned long num_cfgs, unsigned long flags)
410{
411 struct exynos_cpuclk *cpuclk;
412 struct clk_init_data init;
413 struct clk *parent_clk;
414 int ret = 0;
415
416 cpuclk = kzalloc(sizeof(*cpuclk), GFP_KERNEL);
417 if (!cpuclk)
418 return -ENOMEM;
419
420 init.name = name;
421 init.flags = CLK_SET_RATE_PARENT;
422 init.parent_names = &parent;
423 init.num_parents = 1;
424 init.ops = &exynos_cpuclk_clk_ops;
425
426 cpuclk->hw.init = &init;
427 cpuclk->ctrl_base = ctx->reg_base + offset;
428 cpuclk->lock = &ctx->lock;
429 cpuclk->flags = flags;
430 if (flags & CLK_CPU_HAS_E5433_REGS_LAYOUT)
431 cpuclk->clk_nb.notifier_call = exynos5433_cpuclk_notifier_cb;
432 else
433 cpuclk->clk_nb.notifier_call = exynos_cpuclk_notifier_cb;
434
435 cpuclk->alt_parent = __clk_lookup(alt_parent);
436 if (!cpuclk->alt_parent) {
437 pr_err("%s: could not lookup alternate parent %s\n",
438 __func__, alt_parent);
439 ret = -EINVAL;
440 goto free_cpuclk;
441 }
442
443 parent_clk = __clk_lookup(parent);
444 if (!parent_clk) {
445 pr_err("%s: could not lookup parent clock %s\n",
446 __func__, parent);
447 ret = -EINVAL;
448 goto free_cpuclk;
449 }
450
451 ret = clk_notifier_register(parent_clk, &cpuclk->clk_nb);
452 if (ret) {
453 pr_err("%s: failed to register clock notifier for %s\n",
454 __func__, name);
455 goto free_cpuclk;
456 }
457
458 cpuclk->cfg = kmemdup(cfg, sizeof(*cfg) * num_cfgs, GFP_KERNEL);
459 if (!cpuclk->cfg) {
460 ret = -ENOMEM;
461 goto unregister_clk_nb;
462 }
463
464 ret = clk_hw_register(NULL, &cpuclk->hw);
465 if (ret) {
466 pr_err("%s: could not register cpuclk %s\n", __func__, name);
467 goto free_cpuclk_data;
468 }
469
470 samsung_clk_add_lookup(ctx, &cpuclk->hw, lookup_id);
471 return 0;
472
473free_cpuclk_data:
474 kfree(cpuclk->cfg);
475unregister_clk_nb:
476 clk_notifier_unregister(parent_clk, &cpuclk->clk_nb);
477free_cpuclk:
478 kfree(cpuclk);
479 return ret;
480}
481