1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/clk-provider.h>
15#include <linux/clkdev.h>
16#include <linux/clk/at91_pmc.h>
17#include <linux/of.h>
18#include <linux/mfd/syscon.h>
19#include <linux/regmap.h>
20
21#include "pmc.h"
22
23#define PERIPHERAL_MAX 64
24#define PERIPHERAL_ID_MIN 2
25
26#define GENERATED_SOURCE_MAX 6
27#define GENERATED_MAX_DIV 255
28
29struct clk_generated {
30 struct clk_hw hw;
31 struct regmap *regmap;
32 struct clk_range range;
33 spinlock_t *lock;
34 u32 id;
35 u32 gckdiv;
36 u8 parent_id;
37};
38
39#define to_clk_generated(hw) \
40 container_of(hw, struct clk_generated, hw)
41
42static int clk_generated_enable(struct clk_hw *hw)
43{
44 struct clk_generated *gck = to_clk_generated(hw);
45 unsigned long flags;
46
47 pr_debug("GCLK: %s, gckdiv = %d, parent id = %d\n",
48 __func__, gck->gckdiv, gck->parent_id);
49
50 spin_lock_irqsave(gck->lock, flags);
51 regmap_write(gck->regmap, AT91_PMC_PCR,
52 (gck->id & AT91_PMC_PCR_PID_MASK));
53 regmap_update_bits(gck->regmap, AT91_PMC_PCR,
54 AT91_PMC_PCR_GCKDIV_MASK | AT91_PMC_PCR_GCKCSS_MASK |
55 AT91_PMC_PCR_CMD | AT91_PMC_PCR_GCKEN,
56 AT91_PMC_PCR_GCKCSS(gck->parent_id) |
57 AT91_PMC_PCR_CMD |
58 AT91_PMC_PCR_GCKDIV(gck->gckdiv) |
59 AT91_PMC_PCR_GCKEN);
60 spin_unlock_irqrestore(gck->lock, flags);
61 return 0;
62}
63
64static void clk_generated_disable(struct clk_hw *hw)
65{
66 struct clk_generated *gck = to_clk_generated(hw);
67 unsigned long flags;
68
69 spin_lock_irqsave(gck->lock, flags);
70 regmap_write(gck->regmap, AT91_PMC_PCR,
71 (gck->id & AT91_PMC_PCR_PID_MASK));
72 regmap_update_bits(gck->regmap, AT91_PMC_PCR,
73 AT91_PMC_PCR_CMD | AT91_PMC_PCR_GCKEN,
74 AT91_PMC_PCR_CMD);
75 spin_unlock_irqrestore(gck->lock, flags);
76}
77
78static int clk_generated_is_enabled(struct clk_hw *hw)
79{
80 struct clk_generated *gck = to_clk_generated(hw);
81 unsigned long flags;
82 unsigned int status;
83
84 spin_lock_irqsave(gck->lock, flags);
85 regmap_write(gck->regmap, AT91_PMC_PCR,
86 (gck->id & AT91_PMC_PCR_PID_MASK));
87 regmap_read(gck->regmap, AT91_PMC_PCR, &status);
88 spin_unlock_irqrestore(gck->lock, flags);
89
90 return status & AT91_PMC_PCR_GCKEN ? 1 : 0;
91}
92
93static unsigned long
94clk_generated_recalc_rate(struct clk_hw *hw,
95 unsigned long parent_rate)
96{
97 struct clk_generated *gck = to_clk_generated(hw);
98
99 return DIV_ROUND_CLOSEST(parent_rate, gck->gckdiv + 1);
100}
101
102static int clk_generated_determine_rate(struct clk_hw *hw,
103 struct clk_rate_request *req)
104{
105 struct clk_generated *gck = to_clk_generated(hw);
106 struct clk_hw *parent = NULL;
107 long best_rate = -EINVAL;
108 unsigned long tmp_rate, min_rate;
109 int best_diff = -1;
110 int tmp_diff;
111 int i;
112
113 for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
114 u32 div;
115 unsigned long parent_rate;
116
117 parent = clk_hw_get_parent_by_index(hw, i);
118 if (!parent)
119 continue;
120
121 parent_rate = clk_hw_get_rate(parent);
122 min_rate = DIV_ROUND_CLOSEST(parent_rate, GENERATED_MAX_DIV + 1);
123 if (!parent_rate ||
124 (gck->range.max && min_rate > gck->range.max))
125 continue;
126
127 for (div = 1; div < GENERATED_MAX_DIV + 2; div++) {
128 tmp_rate = DIV_ROUND_CLOSEST(parent_rate, div);
129 tmp_diff = abs(req->rate - tmp_rate);
130
131 if (best_diff < 0 || best_diff > tmp_diff) {
132 best_rate = tmp_rate;
133 best_diff = tmp_diff;
134 req->best_parent_rate = parent_rate;
135 req->best_parent_hw = parent;
136 }
137
138 if (!best_diff || tmp_rate < req->rate)
139 break;
140 }
141
142 if (!best_diff)
143 break;
144 }
145
146 pr_debug("GCLK: %s, best_rate = %ld, parent clk: %s @ %ld\n",
147 __func__, best_rate,
148 __clk_get_name((req->best_parent_hw)->clk),
149 req->best_parent_rate);
150
151 if (best_rate < 0)
152 return best_rate;
153
154 req->rate = best_rate;
155 return 0;
156}
157
158
159static int clk_generated_set_parent(struct clk_hw *hw, u8 index)
160{
161 struct clk_generated *gck = to_clk_generated(hw);
162
163 if (index >= clk_hw_get_num_parents(hw))
164 return -EINVAL;
165
166 gck->parent_id = index;
167 return 0;
168}
169
170static u8 clk_generated_get_parent(struct clk_hw *hw)
171{
172 struct clk_generated *gck = to_clk_generated(hw);
173
174 return gck->parent_id;
175}
176
177
178static int clk_generated_set_rate(struct clk_hw *hw,
179 unsigned long rate,
180 unsigned long parent_rate)
181{
182 struct clk_generated *gck = to_clk_generated(hw);
183 u32 div;
184
185 if (!rate)
186 return -EINVAL;
187
188 if (gck->range.max && rate > gck->range.max)
189 return -EINVAL;
190
191 div = DIV_ROUND_CLOSEST(parent_rate, rate);
192 if (div > GENERATED_MAX_DIV + 1 || !div)
193 return -EINVAL;
194
195 gck->gckdiv = div - 1;
196 return 0;
197}
198
199static const struct clk_ops generated_ops = {
200 .enable = clk_generated_enable,
201 .disable = clk_generated_disable,
202 .is_enabled = clk_generated_is_enabled,
203 .recalc_rate = clk_generated_recalc_rate,
204 .determine_rate = clk_generated_determine_rate,
205 .get_parent = clk_generated_get_parent,
206 .set_parent = clk_generated_set_parent,
207 .set_rate = clk_generated_set_rate,
208};
209
210
211
212
213
214
215
216
217
218
219static void clk_generated_startup(struct clk_generated *gck)
220{
221 u32 tmp;
222 unsigned long flags;
223
224 spin_lock_irqsave(gck->lock, flags);
225 regmap_write(gck->regmap, AT91_PMC_PCR,
226 (gck->id & AT91_PMC_PCR_PID_MASK));
227 regmap_read(gck->regmap, AT91_PMC_PCR, &tmp);
228 spin_unlock_irqrestore(gck->lock, flags);
229
230 gck->parent_id = (tmp & AT91_PMC_PCR_GCKCSS_MASK)
231 >> AT91_PMC_PCR_GCKCSS_OFFSET;
232 gck->gckdiv = (tmp & AT91_PMC_PCR_GCKDIV_MASK)
233 >> AT91_PMC_PCR_GCKDIV_OFFSET;
234}
235
236static struct clk_hw * __init
237at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock,
238 const char *name, const char **parent_names,
239 u8 num_parents, u8 id,
240 const struct clk_range *range)
241{
242 struct clk_generated *gck;
243 struct clk_init_data init;
244 struct clk_hw *hw;
245 int ret;
246
247 gck = kzalloc(sizeof(*gck), GFP_KERNEL);
248 if (!gck)
249 return ERR_PTR(-ENOMEM);
250
251 init.name = name;
252 init.ops = &generated_ops;
253 init.parent_names = parent_names;
254 init.num_parents = num_parents;
255 init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
256
257 gck->id = id;
258 gck->hw.init = &init;
259 gck->regmap = regmap;
260 gck->lock = lock;
261 gck->range = *range;
262
263 hw = &gck->hw;
264 ret = clk_hw_register(NULL, &gck->hw);
265 if (ret) {
266 kfree(gck);
267 hw = ERR_PTR(ret);
268 } else
269 clk_generated_startup(gck);
270
271 return hw;
272}
273
274static void __init of_sama5d2_clk_generated_setup(struct device_node *np)
275{
276 int num;
277 u32 id;
278 const char *name;
279 struct clk_hw *hw;
280 unsigned int num_parents;
281 const char *parent_names[GENERATED_SOURCE_MAX];
282 struct device_node *gcknp;
283 struct clk_range range = CLK_RANGE(0, 0);
284 struct regmap *regmap;
285
286 num_parents = of_clk_get_parent_count(np);
287 if (num_parents == 0 || num_parents > GENERATED_SOURCE_MAX)
288 return;
289
290 of_clk_parent_fill(np, parent_names, num_parents);
291
292 num = of_get_child_count(np);
293 if (!num || num > PERIPHERAL_MAX)
294 return;
295
296 regmap = syscon_node_to_regmap(of_get_parent(np));
297 if (IS_ERR(regmap))
298 return;
299
300 for_each_child_of_node(np, gcknp) {
301 if (of_property_read_u32(gcknp, "reg", &id))
302 continue;
303
304 if (id < PERIPHERAL_ID_MIN || id >= PERIPHERAL_MAX)
305 continue;
306
307 if (of_property_read_string(np, "clock-output-names", &name))
308 name = gcknp->name;
309
310 of_at91_get_clk_range(gcknp, "atmel,clk-output-range",
311 &range);
312
313 hw = at91_clk_register_generated(regmap, &pmc_pcr_lock, name,
314 parent_names, num_parents,
315 id, &range);
316 if (IS_ERR(hw))
317 continue;
318
319 of_clk_add_hw_provider(gcknp, of_clk_hw_simple_get, hw);
320 }
321}
322CLK_OF_DECLARE(of_sama5d2_clk_generated_setup, "atmel,sama5d2-clk-generated",
323 of_sama5d2_clk_generated_setup);
324