1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#include <linux/errno.h>
33#include <linux/io.h>
34#include <linux/slab.h>
35#include <linux/clk.h>
36#include <linux/clk-provider.h>
37#include "clk-cpu.h"
38
39#define E4210_SRC_CPU 0x0
40#define E4210_STAT_CPU 0x200
41#define E4210_DIV_CPU0 0x300
42#define E4210_DIV_CPU1 0x304
43#define E4210_DIV_STAT_CPU0 0x400
44#define E4210_DIV_STAT_CPU1 0x404
45
46#define E5433_MUX_SEL2 0x008
47#define E5433_MUX_STAT2 0x208
48#define E5433_DIV_CPU0 0x400
49#define E5433_DIV_CPU1 0x404
50#define E5433_DIV_STAT_CPU0 0x500
51#define E5433_DIV_STAT_CPU1 0x504
52
53#define E4210_DIV0_RATIO0_MASK 0x7
54#define E4210_DIV1_HPM_MASK (0x7 << 4)
55#define E4210_DIV1_COPY_MASK (0x7 << 0)
56#define E4210_MUX_HPM_MASK (1 << 20)
57#define E4210_DIV0_ATB_SHIFT 16
58#define E4210_DIV0_ATB_MASK (DIV_MASK << E4210_DIV0_ATB_SHIFT)
59
60#define MAX_DIV 8
61#define DIV_MASK 7
62#define DIV_MASK_ALL 0xffffffff
63#define MUX_MASK 7
64
65
66
67
68
69static void wait_until_divider_stable(void __iomem *div_reg, unsigned long mask)
70{
71 unsigned long timeout = jiffies + msecs_to_jiffies(10);
72
73 do {
74 if (!(readl(div_reg) & mask))
75 return;
76 } while (time_before(jiffies, timeout));
77
78 if (!(readl(div_reg) & mask))
79 return;
80
81 pr_err("%s: timeout in divider stablization\n", __func__);
82}
83
84
85
86
87
88static void wait_until_mux_stable(void __iomem *mux_reg, u32 mux_pos,
89 unsigned long mux_value)
90{
91 unsigned long timeout = jiffies + msecs_to_jiffies(10);
92
93 do {
94 if (((readl(mux_reg) >> mux_pos) & MUX_MASK) == mux_value)
95 return;
96 } while (time_before(jiffies, timeout));
97
98 if (((readl(mux_reg) >> mux_pos) & MUX_MASK) == mux_value)
99 return;
100
101 pr_err("%s: re-parenting mux timed-out\n", __func__);
102}
103
104
105static long exynos_cpuclk_round_rate(struct clk_hw *hw,
106 unsigned long drate, unsigned long *prate)
107{
108 struct clk_hw *parent = clk_hw_get_parent(hw);
109 *prate = clk_hw_round_rate(parent, drate);
110 return *prate;
111}
112
113
114static unsigned long exynos_cpuclk_recalc_rate(struct clk_hw *hw,
115 unsigned long parent_rate)
116{
117
118
119
120
121
122
123
124 return parent_rate;
125}
126
127static const struct clk_ops exynos_cpuclk_clk_ops = {
128 .recalc_rate = exynos_cpuclk_recalc_rate,
129 .round_rate = exynos_cpuclk_round_rate,
130};
131
132
133
134
135
136
137static void exynos_set_safe_div(void __iomem *base, unsigned long div,
138 unsigned long mask)
139{
140 unsigned long div0;
141
142 div0 = readl(base + E4210_DIV_CPU0);
143 div0 = (div0 & ~mask) | (div & mask);
144 writel(div0, base + E4210_DIV_CPU0);
145 wait_until_divider_stable(base + E4210_DIV_STAT_CPU0, mask);
146}
147
148
149static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
150 struct exynos_cpuclk *cpuclk, void __iomem *base)
151{
152 const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
153 unsigned long alt_prate = clk_hw_get_rate(cpuclk->alt_parent);
154 unsigned long alt_div = 0, alt_div_mask = DIV_MASK;
155 unsigned long div0, div1 = 0, mux_reg;
156 unsigned long flags;
157
158
159 while ((cfg_data->prate * 1000) != ndata->new_rate) {
160 if (cfg_data->prate == 0)
161 return -EINVAL;
162 cfg_data++;
163 }
164
165 spin_lock_irqsave(cpuclk->lock, flags);
166
167
168
169
170
171
172 div0 = cfg_data->div0;
173 if (cpuclk->flags & CLK_CPU_HAS_DIV1) {
174 div1 = cfg_data->div1;
175 if (readl(base + E4210_SRC_CPU) & E4210_MUX_HPM_MASK)
176 div1 = readl(base + E4210_DIV_CPU1) &
177 (E4210_DIV1_HPM_MASK | E4210_DIV1_COPY_MASK);
178 }
179
180
181
182
183
184
185
186
187
188 if (alt_prate > ndata->old_rate || ndata->old_rate > ndata->new_rate) {
189 unsigned long tmp_rate = min(ndata->old_rate, ndata->new_rate);
190
191 alt_div = DIV_ROUND_UP(alt_prate, tmp_rate) - 1;
192 WARN_ON(alt_div >= MAX_DIV);
193
194 if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
195
196
197
198
199 alt_div |= E4210_DIV0_ATB_MASK;
200 alt_div_mask |= E4210_DIV0_ATB_MASK;
201 }
202 exynos_set_safe_div(base, alt_div, alt_div_mask);
203 div0 |= alt_div;
204 }
205
206
207 mux_reg = readl(base + E4210_SRC_CPU);
208 writel(mux_reg | (1 << 16), base + E4210_SRC_CPU);
209 wait_until_mux_stable(base + E4210_STAT_CPU, 16, 2);
210
211
212 writel(div0, base + E4210_DIV_CPU0);
213 wait_until_divider_stable(base + E4210_DIV_STAT_CPU0, DIV_MASK_ALL);
214
215 if (cpuclk->flags & CLK_CPU_HAS_DIV1) {
216 writel(div1, base + E4210_DIV_CPU1);
217 wait_until_divider_stable(base + E4210_DIV_STAT_CPU1,
218 DIV_MASK_ALL);
219 }
220
221 spin_unlock_irqrestore(cpuclk->lock, flags);
222 return 0;
223}
224
225
226static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
227 struct exynos_cpuclk *cpuclk, void __iomem *base)
228{
229 const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
230 unsigned long div = 0, div_mask = DIV_MASK;
231 unsigned long mux_reg;
232 unsigned long flags;
233
234
235 if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
236 while ((cfg_data->prate * 1000) != ndata->new_rate) {
237 if (cfg_data->prate == 0)
238 return -EINVAL;
239 cfg_data++;
240 }
241 }
242
243 spin_lock_irqsave(cpuclk->lock, flags);
244
245
246 mux_reg = readl(base + E4210_SRC_CPU);
247 writel(mux_reg & ~(1 << 16), base + E4210_SRC_CPU);
248 wait_until_mux_stable(base + E4210_STAT_CPU, 16, 1);
249
250 if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
251 div |= (cfg_data->div0 & E4210_DIV0_ATB_MASK);
252 div_mask |= E4210_DIV0_ATB_MASK;
253 }
254
255 exynos_set_safe_div(base, div, div_mask);
256 spin_unlock_irqrestore(cpuclk->lock, flags);
257 return 0;
258}
259
260
261
262
263
264
265static void exynos5433_set_safe_div(void __iomem *base, unsigned long div,
266 unsigned long mask)
267{
268 unsigned long div0;
269
270 div0 = readl(base + E5433_DIV_CPU0);
271 div0 = (div0 & ~mask) | (div & mask);
272 writel(div0, base + E5433_DIV_CPU0);
273 wait_until_divider_stable(base + E5433_DIV_STAT_CPU0, mask);
274}
275
276
277static int exynos5433_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
278 struct exynos_cpuclk *cpuclk, void __iomem *base)
279{
280 const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
281 unsigned long alt_prate = clk_hw_get_rate(cpuclk->alt_parent);
282 unsigned long alt_div = 0, alt_div_mask = DIV_MASK;
283 unsigned long div0, div1 = 0, mux_reg;
284 unsigned long flags;
285
286
287 while ((cfg_data->prate * 1000) != ndata->new_rate) {
288 if (cfg_data->prate == 0)
289 return -EINVAL;
290 cfg_data++;
291 }
292
293 spin_lock_irqsave(cpuclk->lock, flags);
294
295
296
297
298
299 div0 = cfg_data->div0;
300 div1 = cfg_data->div1;
301
302
303
304
305
306
307
308
309
310 if (alt_prate > ndata->old_rate || ndata->old_rate > ndata->new_rate) {
311 unsigned long tmp_rate = min(ndata->old_rate, ndata->new_rate);
312
313 alt_div = DIV_ROUND_UP(alt_prate, tmp_rate) - 1;
314 WARN_ON(alt_div >= MAX_DIV);
315
316 exynos5433_set_safe_div(base, alt_div, alt_div_mask);
317 div0 |= alt_div;
318 }
319
320
321 mux_reg = readl(base + E5433_MUX_SEL2);
322 writel(mux_reg | 1, base + E5433_MUX_SEL2);
323 wait_until_mux_stable(base + E5433_MUX_STAT2, 0, 2);
324
325
326 writel(div0, base + E5433_DIV_CPU0);
327 wait_until_divider_stable(base + E5433_DIV_STAT_CPU0, DIV_MASK_ALL);
328
329 writel(div1, base + E5433_DIV_CPU1);
330 wait_until_divider_stable(base + E5433_DIV_STAT_CPU1, DIV_MASK_ALL);
331
332 spin_unlock_irqrestore(cpuclk->lock, flags);
333 return 0;
334}
335
336
337static int exynos5433_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
338 struct exynos_cpuclk *cpuclk, void __iomem *base)
339{
340 unsigned long div = 0, div_mask = DIV_MASK;
341 unsigned long mux_reg;
342 unsigned long flags;
343
344 spin_lock_irqsave(cpuclk->lock, flags);
345
346
347 mux_reg = readl(base + E5433_MUX_SEL2);
348 writel(mux_reg & ~1, base + E5433_MUX_SEL2);
349 wait_until_mux_stable(base + E5433_MUX_STAT2, 0, 1);
350
351 exynos5433_set_safe_div(base, div, div_mask);
352 spin_unlock_irqrestore(cpuclk->lock, flags);
353 return 0;
354}
355
356
357
358
359
360static int exynos_cpuclk_notifier_cb(struct notifier_block *nb,
361 unsigned long event, void *data)
362{
363 struct clk_notifier_data *ndata = data;
364 struct exynos_cpuclk *cpuclk;
365 void __iomem *base;
366 int err = 0;
367
368 cpuclk = container_of(nb, struct exynos_cpuclk, clk_nb);
369 base = cpuclk->ctrl_base;
370
371 if (event == PRE_RATE_CHANGE)
372 err = exynos_cpuclk_pre_rate_change(ndata, cpuclk, base);
373 else if (event == POST_RATE_CHANGE)
374 err = exynos_cpuclk_post_rate_change(ndata, cpuclk, base);
375
376 return notifier_from_errno(err);
377}
378
379
380
381
382
383static int exynos5433_cpuclk_notifier_cb(struct notifier_block *nb,
384 unsigned long event, void *data)
385{
386 struct clk_notifier_data *ndata = data;
387 struct exynos_cpuclk *cpuclk;
388 void __iomem *base;
389 int err = 0;
390
391 cpuclk = container_of(nb, struct exynos_cpuclk, clk_nb);
392 base = cpuclk->ctrl_base;
393
394 if (event == PRE_RATE_CHANGE)
395 err = exynos5433_cpuclk_pre_rate_change(ndata, cpuclk, base);
396 else if (event == POST_RATE_CHANGE)
397 err = exynos5433_cpuclk_post_rate_change(ndata, cpuclk, base);
398
399 return notifier_from_errno(err);
400}
401
402
403int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx,
404 unsigned int lookup_id, const char *name,
405 const struct clk_hw *parent, const struct clk_hw *alt_parent,
406 unsigned long offset, const struct exynos_cpuclk_cfg_data *cfg,
407 unsigned long num_cfgs, unsigned long flags)
408{
409 struct exynos_cpuclk *cpuclk;
410 struct clk_init_data init;
411 const char *parent_name;
412 int ret = 0;
413
414 if (IS_ERR(parent) || IS_ERR(alt_parent)) {
415 pr_err("%s: invalid parent clock(s)\n", __func__);
416 return -EINVAL;
417 }
418
419 cpuclk = kzalloc(sizeof(*cpuclk), GFP_KERNEL);
420 if (!cpuclk)
421 return -ENOMEM;
422
423 parent_name = clk_hw_get_name(parent);
424
425 init.name = name;
426 init.flags = CLK_SET_RATE_PARENT;
427 init.parent_names = &parent_name;
428 init.num_parents = 1;
429 init.ops = &exynos_cpuclk_clk_ops;
430
431 cpuclk->alt_parent = alt_parent;
432 cpuclk->hw.init = &init;
433 cpuclk->ctrl_base = ctx->reg_base + offset;
434 cpuclk->lock = &ctx->lock;
435 cpuclk->flags = flags;
436 if (flags & CLK_CPU_HAS_E5433_REGS_LAYOUT)
437 cpuclk->clk_nb.notifier_call = exynos5433_cpuclk_notifier_cb;
438 else
439 cpuclk->clk_nb.notifier_call = exynos_cpuclk_notifier_cb;
440
441
442 ret = clk_notifier_register(parent->clk, &cpuclk->clk_nb);
443 if (ret) {
444 pr_err("%s: failed to register clock notifier for %s\n",
445 __func__, name);
446 goto free_cpuclk;
447 }
448
449 cpuclk->cfg = kmemdup(cfg, sizeof(*cfg) * num_cfgs, GFP_KERNEL);
450 if (!cpuclk->cfg) {
451 ret = -ENOMEM;
452 goto unregister_clk_nb;
453 }
454
455 ret = clk_hw_register(NULL, &cpuclk->hw);
456 if (ret) {
457 pr_err("%s: could not register cpuclk %s\n", __func__, name);
458 goto free_cpuclk_data;
459 }
460
461 samsung_clk_add_lookup(ctx, &cpuclk->hw, lookup_id);
462 return 0;
463
464free_cpuclk_data:
465 kfree(cpuclk->cfg);
466unregister_clk_nb:
467 clk_notifier_unregister(parent->clk, &cpuclk->clk_nb);
468free_cpuclk:
469 kfree(cpuclk);
470 return ret;
471}
472