1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/clk-provider.h>
19#include <linux/slab.h>
20#include <linux/of.h>
21#include <linux/of_address.h>
22#include <linux/clk/ti.h>
23#include <linux/delay.h>
24#include "clock.h"
25
26#define NO_IDLEST 0x1
27
28#define OMAP4_MODULEMODE_MASK 0x3
29
30#define MODULEMODE_HWCTRL 0x1
31#define MODULEMODE_SWCTRL 0x2
32
33#define OMAP4_IDLEST_MASK (0x3 << 16)
34#define OMAP4_IDLEST_SHIFT 16
35
36#define CLKCTRL_IDLEST_FUNCTIONAL 0x0
37#define CLKCTRL_IDLEST_INTERFACE_IDLE 0x2
38#define CLKCTRL_IDLEST_DISABLED 0x3
39
40
41#define OMAP4_MAX_MODULE_READY_TIME 2000
42#define OMAP4_MAX_MODULE_DISABLE_TIME 5000
43
44static bool _early_timeout = true;
45
46struct omap_clkctrl_provider {
47 void __iomem *base;
48 struct list_head clocks;
49};
50
51struct omap_clkctrl_clk {
52 struct clk_hw *clk;
53 u16 reg_offset;
54 int bit_offset;
55 struct list_head node;
56};
57
58union omap4_timeout {
59 u32 cycles;
60 ktime_t start;
61};
62
63static const struct omap_clkctrl_data default_clkctrl_data[] __initconst = {
64 { 0 },
65};
66
67static u32 _omap4_idlest(u32 val)
68{
69 val &= OMAP4_IDLEST_MASK;
70 val >>= OMAP4_IDLEST_SHIFT;
71
72 return val;
73}
74
75static bool _omap4_is_idle(u32 val)
76{
77 val = _omap4_idlest(val);
78
79 return val == CLKCTRL_IDLEST_DISABLED;
80}
81
82static bool _omap4_is_ready(u32 val)
83{
84 val = _omap4_idlest(val);
85
86 return val == CLKCTRL_IDLEST_FUNCTIONAL ||
87 val == CLKCTRL_IDLEST_INTERFACE_IDLE;
88}
89
90static bool _omap4_is_timeout(union omap4_timeout *time, u32 timeout)
91{
92 if (unlikely(_early_timeout)) {
93 if (time->cycles++ < timeout) {
94 udelay(1);
95 return false;
96 }
97 } else {
98 if (!ktime_to_ns(time->start)) {
99 time->start = ktime_get();
100 return false;
101 }
102
103 if (ktime_us_delta(ktime_get(), time->start) < timeout) {
104 cpu_relax();
105 return false;
106 }
107 }
108
109 return true;
110}
111
112static int __init _omap4_disable_early_timeout(void)
113{
114 _early_timeout = false;
115
116 return 0;
117}
118arch_initcall(_omap4_disable_early_timeout);
119
120static int _omap4_clkctrl_clk_enable(struct clk_hw *hw)
121{
122 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
123 u32 val;
124 int ret;
125 union omap4_timeout timeout = { 0 };
126
127 if (!clk->enable_bit)
128 return 0;
129
130 if (clk->clkdm) {
131 ret = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk);
132 if (ret) {
133 WARN(1,
134 "%s: could not enable %s's clockdomain %s: %d\n",
135 __func__, clk_hw_get_name(hw),
136 clk->clkdm_name, ret);
137 return ret;
138 }
139 }
140
141 val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
142
143 val &= ~OMAP4_MODULEMODE_MASK;
144 val |= clk->enable_bit;
145
146 ti_clk_ll_ops->clk_writel(val, &clk->enable_reg);
147
148 if (clk->flags & NO_IDLEST)
149 return 0;
150
151
152 while (!_omap4_is_ready(ti_clk_ll_ops->clk_readl(&clk->enable_reg))) {
153 if (_omap4_is_timeout(&timeout, OMAP4_MAX_MODULE_READY_TIME)) {
154 pr_err("%s: failed to enable\n", clk_hw_get_name(hw));
155 return -EBUSY;
156 }
157 }
158
159 return 0;
160}
161
162static void _omap4_clkctrl_clk_disable(struct clk_hw *hw)
163{
164 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
165 u32 val;
166 union omap4_timeout timeout = { 0 };
167
168 if (!clk->enable_bit)
169 return;
170
171 val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
172
173 val &= ~OMAP4_MODULEMODE_MASK;
174
175 ti_clk_ll_ops->clk_writel(val, &clk->enable_reg);
176
177 if (clk->flags & NO_IDLEST)
178 goto exit;
179
180
181 while (!_omap4_is_idle(ti_clk_ll_ops->clk_readl(&clk->enable_reg))) {
182 if (_omap4_is_timeout(&timeout,
183 OMAP4_MAX_MODULE_DISABLE_TIME)) {
184 pr_err("%s: failed to disable\n", clk_hw_get_name(hw));
185 break;
186 }
187 }
188
189exit:
190 if (clk->clkdm)
191 ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk);
192}
193
194static int _omap4_clkctrl_clk_is_enabled(struct clk_hw *hw)
195{
196 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
197 u32 val;
198
199 val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
200
201 if (val & clk->enable_bit)
202 return 1;
203
204 return 0;
205}
206
207static const struct clk_ops omap4_clkctrl_clk_ops = {
208 .enable = _omap4_clkctrl_clk_enable,
209 .disable = _omap4_clkctrl_clk_disable,
210 .is_enabled = _omap4_clkctrl_clk_is_enabled,
211};
212
213static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec,
214 void *data)
215{
216 struct omap_clkctrl_provider *provider = data;
217 struct omap_clkctrl_clk *entry;
218
219 if (clkspec->args_count != 2)
220 return ERR_PTR(-EINVAL);
221
222 pr_debug("%s: looking for %x:%x\n", __func__,
223 clkspec->args[0], clkspec->args[1]);
224
225 list_for_each_entry(entry, &provider->clocks, node) {
226 if (entry->reg_offset == clkspec->args[0] &&
227 entry->bit_offset == clkspec->args[1])
228 break;
229 }
230
231 if (!entry)
232 return ERR_PTR(-EINVAL);
233
234 return entry->clk;
235}
236
237static int __init
238_ti_clkctrl_clk_register(struct omap_clkctrl_provider *provider,
239 struct device_node *node, struct clk_hw *clk_hw,
240 u16 offset, u8 bit, const char * const *parents,
241 int num_parents, const struct clk_ops *ops)
242{
243 struct clk_init_data init = { NULL };
244 struct clk *clk;
245 struct omap_clkctrl_clk *clkctrl_clk;
246 int ret = 0;
247
248 init.name = kasprintf(GFP_KERNEL, "%s:%s:%04x:%d", node->parent->name,
249 node->name, offset, bit);
250 clkctrl_clk = kzalloc(sizeof(*clkctrl_clk), GFP_KERNEL);
251 if (!init.name || !clkctrl_clk) {
252 ret = -ENOMEM;
253 goto cleanup;
254 }
255
256 clk_hw->init = &init;
257 init.parent_names = parents;
258 init.num_parents = num_parents;
259 init.ops = ops;
260 init.flags = CLK_IS_BASIC;
261
262 clk = ti_clk_register(NULL, clk_hw, init.name);
263 if (IS_ERR_OR_NULL(clk)) {
264 ret = -EINVAL;
265 goto cleanup;
266 }
267
268 clkctrl_clk->reg_offset = offset;
269 clkctrl_clk->bit_offset = bit;
270 clkctrl_clk->clk = clk_hw;
271
272 list_add(&clkctrl_clk->node, &provider->clocks);
273
274 return 0;
275
276cleanup:
277 kfree(init.name);
278 kfree(clkctrl_clk);
279 return ret;
280}
281
282static void __init
283_ti_clkctrl_setup_gate(struct omap_clkctrl_provider *provider,
284 struct device_node *node, u16 offset,
285 const struct omap_clkctrl_bit_data *data,
286 void __iomem *reg)
287{
288 struct clk_hw_omap *clk_hw;
289
290 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
291 if (!clk_hw)
292 return;
293
294 clk_hw->enable_bit = data->bit;
295 clk_hw->enable_reg.ptr = reg;
296
297 if (_ti_clkctrl_clk_register(provider, node, &clk_hw->hw, offset,
298 data->bit, data->parents, 1,
299 &omap_gate_clk_ops))
300 kfree(clk_hw);
301}
302
303static void __init
304_ti_clkctrl_setup_mux(struct omap_clkctrl_provider *provider,
305 struct device_node *node, u16 offset,
306 const struct omap_clkctrl_bit_data *data,
307 void __iomem *reg)
308{
309 struct clk_omap_mux *mux;
310 int num_parents = 0;
311 const char * const *pname;
312
313 mux = kzalloc(sizeof(*mux), GFP_KERNEL);
314 if (!mux)
315 return;
316
317 pname = data->parents;
318 while (*pname) {
319 num_parents++;
320 pname++;
321 }
322
323 mux->mask = num_parents;
324 mux->mask = (1 << fls(mux->mask)) - 1;
325
326 mux->shift = data->bit;
327 mux->reg.ptr = reg;
328
329 if (_ti_clkctrl_clk_register(provider, node, &mux->hw, offset,
330 data->bit, data->parents, num_parents,
331 &ti_clk_mux_ops))
332 kfree(mux);
333}
334
335static void __init
336_ti_clkctrl_setup_div(struct omap_clkctrl_provider *provider,
337 struct device_node *node, u16 offset,
338 const struct omap_clkctrl_bit_data *data,
339 void __iomem *reg)
340{
341 struct clk_omap_divider *div;
342 const struct omap_clkctrl_div_data *div_data = data->data;
343
344 div = kzalloc(sizeof(*div), GFP_KERNEL);
345 if (!div)
346 return;
347
348 div->reg.ptr = reg;
349 div->shift = data->bit;
350
351 if (ti_clk_parse_divider_data((int *)div_data->dividers,
352 div_data->max_div, 0, 0,
353 &div->width, &div->table)) {
354 pr_err("%s: Data parsing for %s:%04x:%d failed\n", __func__,
355 node->name, offset, data->bit);
356 kfree(div);
357 return;
358 }
359
360 if (_ti_clkctrl_clk_register(provider, node, &div->hw, offset,
361 data->bit, data->parents, 1,
362 &ti_clk_divider_ops))
363 kfree(div);
364}
365
366static void __init
367_ti_clkctrl_setup_subclks(struct omap_clkctrl_provider *provider,
368 struct device_node *node,
369 const struct omap_clkctrl_reg_data *data,
370 void __iomem *reg)
371{
372 const struct omap_clkctrl_bit_data *bits = data->bit_data;
373
374 if (!bits)
375 return;
376
377 while (bits->bit) {
378 switch (bits->type) {
379 case TI_CLK_GATE:
380 _ti_clkctrl_setup_gate(provider, node, data->offset,
381 bits, reg);
382 break;
383
384 case TI_CLK_DIVIDER:
385 _ti_clkctrl_setup_div(provider, node, data->offset,
386 bits, reg);
387 break;
388
389 case TI_CLK_MUX:
390 _ti_clkctrl_setup_mux(provider, node, data->offset,
391 bits, reg);
392 break;
393
394 default:
395 pr_err("%s: bad subclk type: %d\n", __func__,
396 bits->type);
397 return;
398 }
399 bits++;
400 }
401}
402
403static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
404{
405 struct omap_clkctrl_provider *provider;
406 const struct omap_clkctrl_data *data = default_clkctrl_data;
407 const struct omap_clkctrl_reg_data *reg_data;
408 struct clk_init_data init = { NULL };
409 struct clk_hw_omap *hw;
410 struct clk *clk;
411 struct omap_clkctrl_clk *clkctrl_clk;
412 const __be32 *addrp;
413 u32 addr;
414
415 addrp = of_get_address(node, 0, NULL, NULL);
416 addr = (u32)of_translate_address(node, addrp);
417
418#ifdef CONFIG_ARCH_OMAP4
419 if (of_machine_is_compatible("ti,omap4"))
420 data = omap4_clkctrl_data;
421#endif
422
423 while (data->addr) {
424 if (addr == data->addr)
425 break;
426
427 data++;
428 }
429
430 if (!data->addr) {
431 pr_err("%s not found from clkctrl data.\n", node->name);
432 return;
433 }
434
435 provider = kzalloc(sizeof(*provider), GFP_KERNEL);
436 if (!provider)
437 return;
438
439 provider->base = of_iomap(node, 0);
440
441 INIT_LIST_HEAD(&provider->clocks);
442
443
444 reg_data = data->regs;
445
446 while (reg_data->parent) {
447 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
448 if (!hw)
449 return;
450
451 hw->enable_reg.ptr = provider->base + reg_data->offset;
452
453 _ti_clkctrl_setup_subclks(provider, node, reg_data,
454 hw->enable_reg.ptr);
455
456 if (reg_data->flags & CLKF_SW_SUP)
457 hw->enable_bit = MODULEMODE_SWCTRL;
458 if (reg_data->flags & CLKF_HW_SUP)
459 hw->enable_bit = MODULEMODE_HWCTRL;
460 if (reg_data->flags & CLKF_NO_IDLEST)
461 hw->flags |= NO_IDLEST;
462
463 init.parent_names = ®_data->parent;
464 init.num_parents = 1;
465 init.flags = 0;
466 init.name = kasprintf(GFP_KERNEL, "%s:%s:%04x:%d",
467 node->parent->name, node->name,
468 reg_data->offset, 0);
469 clkctrl_clk = kzalloc(sizeof(*clkctrl_clk), GFP_KERNEL);
470 if (!init.name || !clkctrl_clk)
471 goto cleanup;
472
473 init.ops = &omap4_clkctrl_clk_ops;
474 hw->hw.init = &init;
475
476 clk = ti_clk_register(NULL, &hw->hw, init.name);
477 if (IS_ERR_OR_NULL(clk))
478 goto cleanup;
479
480 clkctrl_clk->reg_offset = reg_data->offset;
481 clkctrl_clk->clk = &hw->hw;
482
483 list_add(&clkctrl_clk->node, &provider->clocks);
484
485 reg_data++;
486 }
487
488 of_clk_add_hw_provider(node, _ti_omap4_clkctrl_xlate, provider);
489 return;
490
491cleanup:
492 kfree(hw);
493 kfree(init.name);
494 kfree(clkctrl_clk);
495}
496CLK_OF_DECLARE(ti_omap4_clkctrl_clock, "ti,clkctrl",
497 _ti_omap4_clkctrl_setup);
498