1
2
3
4
5
6
7
8
9
10#include <linux/platform_device.h>
11#include <linux/clk.h>
12#include <linux/clk-provider.h>
13#include <linux/slab.h>
14#include <linux/io.h>
15#include <linux/of.h>
16#include <linux/module.h>
17#include <linux/err.h>
18#include <linux/iopoll.h>
19
20#define WZRD_NUM_OUTPUTS 7
21#define WZRD_ACLK_MAX_FREQ 250000000UL
22
23#define WZRD_CLK_CFG_REG(n) (0x200 + 4 * (n))
24
25#define WZRD_CLKOUT0_FRAC_EN BIT(18)
26#define WZRD_CLKFBOUT_FRAC_EN BIT(26)
27
28#define WZRD_CLKFBOUT_MULT_SHIFT 8
29#define WZRD_CLKFBOUT_MULT_MASK GENMASK(15, 8)
30#define WZRD_CLKFBOUT_FRAC_SHIFT 16
31#define WZRD_CLKFBOUT_FRAC_MASK (0x3ff << WZRD_CLKFBOUT_FRAC_SHIFT)
32#define WZRD_DIVCLK_DIVIDE_SHIFT 0
33#define WZRD_DIVCLK_DIVIDE_MASK GENMASK(7, 0)
34#define WZRD_CLKOUT_DIVIDE_SHIFT 0
35#define WZRD_CLKOUT_DIVIDE_WIDTH 8
36#define WZRD_CLKOUT_DIVIDE_MASK GENMASK(7, 0)
37#define WZRD_CLKOUT_FRAC_SHIFT 8
38#define WZRD_CLKOUT_FRAC_MASK 0x3ff
39#define WZRD_CLKOUT0_FRAC_MASK GENMASK(17, 8)
40
41#define WZRD_DR_MAX_INT_DIV_VALUE 255
42#define WZRD_DR_STATUS_REG_OFFSET 0x04
43#define WZRD_DR_LOCK_BIT_MASK 0x00000001
44#define WZRD_DR_INIT_REG_OFFSET 0x25C
45#define WZRD_DR_DIV_TO_PHASE_OFFSET 4
46#define WZRD_DR_BEGIN_DYNA_RECONF 0x03
47#define WZRD_DR_BEGIN_DYNA_RECONF_5_2 0x07
48#define WZRD_DR_BEGIN_DYNA_RECONF1_5_2 0x02
49
50#define WZRD_USEC_POLL 10
51#define WZRD_TIMEOUT_POLL 1000
52
53
54#define DIV_O 0x01
55#define DIV_ALL 0x03
56
57#define WZRD_M_MIN 2
58#define WZRD_M_MAX 128
59#define WZRD_D_MIN 1
60#define WZRD_D_MAX 106
61#define WZRD_VCO_MIN 800000000
62#define WZRD_VCO_MAX 1600000000
63#define WZRD_O_MIN 1
64#define WZRD_O_MAX 128
65#define WZRD_MIN_ERR 20000
66#define WZRD_FRAC_POINTS 1000
67
68
69#define div_mask(width) ((1 << (width)) - 1)
70
71
72#define to_clk_wzrd_divider(_hw) container_of(_hw, struct clk_wzrd_divider, hw)
73
74enum clk_wzrd_int_clks {
75 wzrd_clk_mul = 0,
76 wzrd_clk_mul_div = 1,
77 wzrd_clk_mul_frac = 2,
78 wzrd_clk_int_max = 3,
79};
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122struct clk_wzrd {
123 struct clk_onecell_data clk_data;
124 struct notifier_block nb;
125 void __iomem *base;
126 struct clk *clk_in1;
127 struct clk *axi_clk;
128 struct clk *clks_internal[wzrd_clk_int_max];
129 struct clk *clkout[WZRD_NUM_OUTPUTS];
130 unsigned int speed_grade;
131 bool suspended;
132};
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149struct clk_wzrd_divider {
150 struct clk_hw hw;
151 void __iomem *base;
152 u16 offset;
153 u8 shift;
154 u8 width;
155 u8 flags;
156 const struct clk_div_table *table;
157 u32 valuem;
158 u32 valued;
159 u32 valueo;
160 spinlock_t *lock;
161};
162
163#define to_clk_wzrd(_nb) container_of(_nb, struct clk_wzrd, nb)
164
165
166static const unsigned long clk_wzrd_max_freq[] = {
167 800000000UL,
168 933000000UL,
169 1066000000UL
170};
171
172
173static DEFINE_SPINLOCK(clkwzrd_lock);
174
175static unsigned long clk_wzrd_recalc_rate(struct clk_hw *hw,
176 unsigned long parent_rate)
177{
178 struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
179 void __iomem *div_addr = divider->base + divider->offset;
180 unsigned int val;
181
182 val = readl(div_addr) >> divider->shift;
183 val &= div_mask(divider->width);
184
185 return divider_recalc_rate(hw, parent_rate, val, divider->table,
186 divider->flags, divider->width);
187}
188
189static int clk_wzrd_dynamic_reconfig(struct clk_hw *hw, unsigned long rate,
190 unsigned long parent_rate)
191{
192 int err = 0;
193 u32 value;
194 unsigned long flags = 0;
195 struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
196 void __iomem *div_addr = divider->base + divider->offset;
197
198 if (divider->lock)
199 spin_lock_irqsave(divider->lock, flags);
200 else
201 __acquire(divider->lock);
202
203 value = DIV_ROUND_CLOSEST(parent_rate, rate);
204
205
206 min_t(u32, value, WZRD_DR_MAX_INT_DIV_VALUE);
207
208
209 writel(value, div_addr);
210 writel(0x00, div_addr + WZRD_DR_DIV_TO_PHASE_OFFSET);
211
212
213 err = readl_poll_timeout(divider->base + WZRD_DR_STATUS_REG_OFFSET,
214 value, value & WZRD_DR_LOCK_BIT_MASK,
215 WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
216 if (err)
217 goto err_reconfig;
218
219
220 writel(WZRD_DR_BEGIN_DYNA_RECONF_5_2,
221 divider->base + WZRD_DR_INIT_REG_OFFSET);
222 writel(WZRD_DR_BEGIN_DYNA_RECONF1_5_2,
223 divider->base + WZRD_DR_INIT_REG_OFFSET);
224
225
226 err = readl_poll_timeout(divider->base + WZRD_DR_STATUS_REG_OFFSET,
227 value, value & WZRD_DR_LOCK_BIT_MASK,
228 WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
229err_reconfig:
230 if (divider->lock)
231 spin_unlock_irqrestore(divider->lock, flags);
232 else
233 __release(divider->lock);
234
235 return err;
236}
237
238static long clk_wzrd_round_rate(struct clk_hw *hw, unsigned long rate,
239 unsigned long *prate)
240{
241 u8 div;
242
243
244
245
246
247 div = DIV_ROUND_CLOSEST(*prate, rate);
248
249 return (*prate / div);
250}
251
252static u64 clk_wzrd_get_divisors(struct clk_hw *hw, unsigned long rate,
253 unsigned long parent_rate)
254{
255 struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
256 u64 vco_freq, freq, diff;
257 u32 m, d, o;
258 u64 diff2 = U64_MAX;
259
260 for (m = WZRD_M_MIN; m <= WZRD_M_MAX; m++) {
261 for (d = WZRD_D_MIN; d <= WZRD_D_MAX; d++) {
262 vco_freq = DIV_ROUND_CLOSEST((parent_rate * m), d);
263 if (vco_freq >= WZRD_VCO_MIN && vco_freq <= WZRD_VCO_MAX) {
264 for (o = WZRD_O_MIN; o <= WZRD_O_MAX; o++) {
265 freq = DIV_ROUND_CLOSEST(vco_freq, o);
266 diff = abs(freq - rate);
267
268 if (diff < WZRD_MIN_ERR) {
269 divider->valuem = m;
270 divider->valued = d;
271 divider->valueo = o;
272 diff2 = diff;
273 return 0;
274 }
275 if (diff < diff2) {
276 divider->valuem = m;
277 divider->valued = d;
278 divider->valueo = o;
279 diff2 = diff;
280 }
281 }
282 }
283 }
284 }
285 return -EBUSY;
286}
287
288static int clk_wzrd_dynamic_all_nolock(struct clk_hw *hw, unsigned long rate,
289 unsigned long parent_rate)
290{
291 struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
292 u32 reg, pre;
293 u32 value;
294 int err;
295 u64 vco_freq, rate_div, f, clockout0_div;
296
297 err = clk_wzrd_get_divisors(hw, rate, parent_rate);
298 if (err)
299 pr_err("failed to get divisors\n");
300
301 vco_freq = DIV_ROUND_CLOSEST((parent_rate * divider->valuem), divider->valued);
302 rate_div = DIV_ROUND_CLOSEST((vco_freq * WZRD_FRAC_POINTS), rate);
303
304 clockout0_div = rate_div / WZRD_FRAC_POINTS;
305
306 pre = DIV_ROUND_CLOSEST((vco_freq * WZRD_FRAC_POINTS), rate);
307 f = (u32)(pre - (clockout0_div * WZRD_FRAC_POINTS));
308 f = f & WZRD_CLKOUT_FRAC_MASK;
309
310 reg = FIELD_PREP(WZRD_CLKOUT_DIVIDE_MASK, clockout0_div) |
311 FIELD_PREP(WZRD_CLKOUT0_FRAC_MASK, f);
312
313 writel(reg, divider->base + WZRD_CLK_CFG_REG(2));
314
315 reg = FIELD_PREP(WZRD_CLKFBOUT_MULT_MASK, divider->valuem) |
316 FIELD_PREP(WZRD_DIVCLK_DIVIDE_MASK, divider->valued);
317 writel(reg, divider->base + WZRD_CLK_CFG_REG(0));
318 writel(divider->valueo, divider->base + WZRD_CLK_CFG_REG(2));
319 writel(0, divider->base + WZRD_CLK_CFG_REG(3));
320
321 err = readl_poll_timeout(divider->base + WZRD_DR_STATUS_REG_OFFSET, value,
322 value & WZRD_DR_LOCK_BIT_MASK,
323 WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
324 if (err)
325 return -ETIMEDOUT;
326
327
328 writel(WZRD_DR_BEGIN_DYNA_RECONF,
329 divider->base + WZRD_DR_INIT_REG_OFFSET);
330
331
332 err = readl_poll_timeout(divider->base + WZRD_DR_STATUS_REG_OFFSET, value,
333 value & WZRD_DR_LOCK_BIT_MASK,
334 WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
335 if (err)
336 return -ETIMEDOUT;
337
338 return 0;
339}
340
341static int clk_wzrd_dynamic_all(struct clk_hw *hw, unsigned long rate,
342 unsigned long parent_rate)
343{
344 struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
345 unsigned long flags = 0;
346 int ret;
347
348 if (divider->lock)
349 spin_lock_irqsave(divider->lock, flags);
350 else
351 __acquire(divider->lock);
352
353 ret = clk_wzrd_dynamic_all_nolock(hw, rate, parent_rate);
354
355 if (divider->lock)
356 spin_unlock_irqrestore(divider->lock, flags);
357 else
358 __release(divider->lock);
359
360 return ret;
361}
362
363static unsigned long clk_wzrd_recalc_rate_all(struct clk_hw *hw,
364 unsigned long parent_rate)
365{
366 struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
367 u32 m, d, o, div, reg, f;
368
369 reg = readl(divider->base + WZRD_CLK_CFG_REG(0));
370 d = FIELD_GET(WZRD_DIVCLK_DIVIDE_MASK, reg);
371 m = FIELD_GET(WZRD_CLKFBOUT_MULT_MASK, reg);
372 reg = readl(divider->base + WZRD_CLK_CFG_REG(2));
373 o = FIELD_GET(WZRD_DIVCLK_DIVIDE_MASK, reg);
374 f = FIELD_GET(WZRD_CLKOUT0_FRAC_MASK, reg);
375
376 div = DIV_ROUND_CLOSEST(d * (WZRD_FRAC_POINTS * o + f), WZRD_FRAC_POINTS);
377 return divider_recalc_rate(hw, parent_rate * m, div, divider->table,
378 divider->flags, divider->width);
379}
380
381static long clk_wzrd_round_rate_all(struct clk_hw *hw, unsigned long rate,
382 unsigned long *prate)
383{
384 return rate;
385}
386
387static const struct clk_ops clk_wzrd_clk_divider_ops = {
388 .round_rate = clk_wzrd_round_rate,
389 .set_rate = clk_wzrd_dynamic_reconfig,
390 .recalc_rate = clk_wzrd_recalc_rate,
391};
392
393static const struct clk_ops clk_wzrd_clk_div_all_ops = {
394 .round_rate = clk_wzrd_round_rate_all,
395 .set_rate = clk_wzrd_dynamic_all,
396 .recalc_rate = clk_wzrd_recalc_rate_all,
397};
398
399static unsigned long clk_wzrd_recalc_ratef(struct clk_hw *hw,
400 unsigned long parent_rate)
401{
402 unsigned int val;
403 u32 div, frac;
404 struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
405 void __iomem *div_addr = divider->base + divider->offset;
406
407 val = readl(div_addr);
408 div = val & div_mask(divider->width);
409 frac = (val >> WZRD_CLKOUT_FRAC_SHIFT) & WZRD_CLKOUT_FRAC_MASK;
410
411 return mult_frac(parent_rate, 1000, (div * 1000) + frac);
412}
413
414static int clk_wzrd_dynamic_reconfig_f(struct clk_hw *hw, unsigned long rate,
415 unsigned long parent_rate)
416{
417 int err = 0;
418 u32 value, pre;
419 unsigned long rate_div, f, clockout0_div;
420 struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
421 void __iomem *div_addr = divider->base + divider->offset;
422
423 rate_div = DIV_ROUND_DOWN_ULL(parent_rate * 1000, rate);
424 clockout0_div = rate_div / 1000;
425
426 pre = DIV_ROUND_CLOSEST((parent_rate * 1000), rate);
427 f = (u32)(pre - (clockout0_div * 1000));
428 f = f & WZRD_CLKOUT_FRAC_MASK;
429 f = f << WZRD_CLKOUT_DIVIDE_WIDTH;
430
431 value = (f | (clockout0_div & WZRD_CLKOUT_DIVIDE_MASK));
432
433
434 writel(value, div_addr);
435 writel(0x0, div_addr + WZRD_DR_DIV_TO_PHASE_OFFSET);
436
437
438 err = readl_poll_timeout(divider->base + WZRD_DR_STATUS_REG_OFFSET, value,
439 value & WZRD_DR_LOCK_BIT_MASK,
440 WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
441 if (err)
442 return err;
443
444
445 writel(WZRD_DR_BEGIN_DYNA_RECONF_5_2,
446 divider->base + WZRD_DR_INIT_REG_OFFSET);
447 writel(WZRD_DR_BEGIN_DYNA_RECONF1_5_2,
448 divider->base + WZRD_DR_INIT_REG_OFFSET);
449
450
451 return readl_poll_timeout(divider->base + WZRD_DR_STATUS_REG_OFFSET, value,
452 value & WZRD_DR_LOCK_BIT_MASK,
453 WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
454}
455
456static long clk_wzrd_round_rate_f(struct clk_hw *hw, unsigned long rate,
457 unsigned long *prate)
458{
459 return rate;
460}
461
462static const struct clk_ops clk_wzrd_clk_divider_ops_f = {
463 .round_rate = clk_wzrd_round_rate_f,
464 .set_rate = clk_wzrd_dynamic_reconfig_f,
465 .recalc_rate = clk_wzrd_recalc_ratef,
466};
467
468static struct clk *clk_wzrd_register_divf(struct device *dev,
469 const char *name,
470 const char *parent_name,
471 unsigned long flags,
472 void __iomem *base, u16 offset,
473 u8 shift, u8 width,
474 u8 clk_divider_flags,
475 u32 div_type,
476 spinlock_t *lock)
477{
478 struct clk_wzrd_divider *div;
479 struct clk_hw *hw;
480 struct clk_init_data init;
481 int ret;
482
483 div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
484 if (!div)
485 return ERR_PTR(-ENOMEM);
486
487 init.name = name;
488
489 init.ops = &clk_wzrd_clk_divider_ops_f;
490
491 init.flags = flags;
492 init.parent_names = &parent_name;
493 init.num_parents = 1;
494
495 div->base = base;
496 div->offset = offset;
497 div->shift = shift;
498 div->width = width;
499 div->flags = clk_divider_flags;
500 div->lock = lock;
501 div->hw.init = &init;
502 div->table = NULL;
503
504 hw = &div->hw;
505 ret = devm_clk_hw_register(dev, hw);
506 if (ret)
507 return ERR_PTR(ret);
508
509 return hw->clk;
510}
511
512static struct clk *clk_wzrd_register_divider(struct device *dev,
513 const char *name,
514 const char *parent_name,
515 unsigned long flags,
516 void __iomem *base, u16 offset,
517 u8 shift, u8 width,
518 u8 clk_divider_flags,
519 u32 div_type,
520 spinlock_t *lock)
521{
522 struct clk_wzrd_divider *div;
523 struct clk_hw *hw;
524 struct clk_init_data init;
525 int ret;
526
527 div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
528 if (!div)
529 return ERR_PTR(-ENOMEM);
530
531 init.name = name;
532 if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
533 init.ops = &clk_divider_ro_ops;
534 else if (div_type == DIV_O)
535 init.ops = &clk_wzrd_clk_divider_ops;
536 else
537 init.ops = &clk_wzrd_clk_div_all_ops;
538 init.flags = flags;
539 init.parent_names = &parent_name;
540 init.num_parents = 1;
541
542 div->base = base;
543 div->offset = offset;
544 div->shift = shift;
545 div->width = width;
546 div->flags = clk_divider_flags;
547 div->lock = lock;
548 div->hw.init = &init;
549 div->table = NULL;
550
551 hw = &div->hw;
552 ret = devm_clk_hw_register(dev, hw);
553 if (ret)
554 hw = ERR_PTR(ret);
555
556 return hw->clk;
557}
558
559static int clk_wzrd_clk_notifier(struct notifier_block *nb, unsigned long event,
560 void *data)
561{
562 unsigned long max;
563 struct clk_notifier_data *ndata = data;
564 struct clk_wzrd *clk_wzrd = to_clk_wzrd(nb);
565
566 if (clk_wzrd->suspended)
567 return NOTIFY_OK;
568
569 if (ndata->clk == clk_wzrd->clk_in1)
570 max = clk_wzrd_max_freq[clk_wzrd->speed_grade - 1];
571 else if (ndata->clk == clk_wzrd->axi_clk)
572 max = WZRD_ACLK_MAX_FREQ;
573 else
574 return NOTIFY_DONE;
575
576 switch (event) {
577 case PRE_RATE_CHANGE:
578 if (ndata->new_rate > max)
579 return NOTIFY_BAD;
580 return NOTIFY_OK;
581 case POST_RATE_CHANGE:
582 case ABORT_RATE_CHANGE:
583 default:
584 return NOTIFY_DONE;
585 }
586}
587
588static int __maybe_unused clk_wzrd_suspend(struct device *dev)
589{
590 struct clk_wzrd *clk_wzrd = dev_get_drvdata(dev);
591
592 clk_disable_unprepare(clk_wzrd->axi_clk);
593 clk_wzrd->suspended = true;
594
595 return 0;
596}
597
598static int __maybe_unused clk_wzrd_resume(struct device *dev)
599{
600 int ret;
601 struct clk_wzrd *clk_wzrd = dev_get_drvdata(dev);
602
603 ret = clk_prepare_enable(clk_wzrd->axi_clk);
604 if (ret) {
605 dev_err(dev, "unable to enable s_axi_aclk\n");
606 return ret;
607 }
608
609 clk_wzrd->suspended = false;
610
611 return 0;
612}
613
614static SIMPLE_DEV_PM_OPS(clk_wzrd_dev_pm_ops, clk_wzrd_suspend,
615 clk_wzrd_resume);
616
617static int clk_wzrd_probe(struct platform_device *pdev)
618{
619 int i, ret;
620 u32 reg, reg_f, mult;
621 unsigned long rate;
622 const char *clk_name;
623 void __iomem *ctrl_reg;
624 struct clk_wzrd *clk_wzrd;
625 int outputs;
626 unsigned long flags = 0;
627 const char *clkout_name;
628 struct device_node *np = pdev->dev.of_node;
629
630 clk_wzrd = devm_kzalloc(&pdev->dev, sizeof(*clk_wzrd), GFP_KERNEL);
631 if (!clk_wzrd)
632 return -ENOMEM;
633 platform_set_drvdata(pdev, clk_wzrd);
634
635 clk_wzrd->base = devm_platform_ioremap_resource(pdev, 0);
636 if (IS_ERR(clk_wzrd->base))
637 return PTR_ERR(clk_wzrd->base);
638
639 ret = of_property_read_u32(np, "xlnx,speed-grade", &clk_wzrd->speed_grade);
640 if (!ret) {
641 if (clk_wzrd->speed_grade < 1 || clk_wzrd->speed_grade > 3) {
642 dev_warn(&pdev->dev, "invalid speed grade '%d'\n",
643 clk_wzrd->speed_grade);
644 clk_wzrd->speed_grade = 0;
645 }
646 }
647
648 clk_wzrd->clk_in1 = devm_clk_get(&pdev->dev, "clk_in1");
649 if (IS_ERR(clk_wzrd->clk_in1)) {
650 if (clk_wzrd->clk_in1 != ERR_PTR(-EPROBE_DEFER))
651 dev_err(&pdev->dev, "clk_in1 not found\n");
652 return PTR_ERR(clk_wzrd->clk_in1);
653 }
654
655 clk_wzrd->axi_clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
656 if (IS_ERR(clk_wzrd->axi_clk)) {
657 if (clk_wzrd->axi_clk != ERR_PTR(-EPROBE_DEFER))
658 dev_err(&pdev->dev, "s_axi_aclk not found\n");
659 return PTR_ERR(clk_wzrd->axi_clk);
660 }
661 ret = clk_prepare_enable(clk_wzrd->axi_clk);
662 if (ret) {
663 dev_err(&pdev->dev, "enabling s_axi_aclk failed\n");
664 return ret;
665 }
666 rate = clk_get_rate(clk_wzrd->axi_clk);
667 if (rate > WZRD_ACLK_MAX_FREQ) {
668 dev_err(&pdev->dev, "s_axi_aclk frequency (%lu) too high\n",
669 rate);
670 ret = -EINVAL;
671 goto err_disable_clk;
672 }
673
674 outputs = of_property_count_strings(np, "clock-output-names");
675 clk_name = kasprintf(GFP_KERNEL, "%s_mul_div", dev_name(&pdev->dev));
676 if (!clk_name) {
677 ret = -ENOMEM;
678 goto err_rm_int_clk;
679 }
680
681 if (outputs == 1) {
682 if (of_property_read_string_index(np, "clock-output-names", 0,
683 &clkout_name)) {
684 dev_err(&pdev->dev,
685 "clock output name not specified\n");
686 ret = -EINVAL;
687 goto err_rm_int_clks;
688 }
689
690 clk_wzrd->clkout[0] = clk_wzrd_register_divider
691 (&pdev->dev, clkout_name,
692 __clk_get_name(clk_wzrd->clk_in1), 0,
693 clk_wzrd->base, WZRD_CLK_CFG_REG(3),
694 WZRD_CLKOUT_DIVIDE_SHIFT,
695 WZRD_CLKOUT_DIVIDE_WIDTH,
696 CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
697 DIV_ALL, &clkwzrd_lock);
698
699 goto out;
700 }
701
702
703 reg = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(0)) &
704 WZRD_CLKFBOUT_MULT_MASK) >> WZRD_CLKFBOUT_MULT_SHIFT;
705 reg_f = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(0)) &
706 WZRD_CLKFBOUT_FRAC_MASK) >> WZRD_CLKFBOUT_FRAC_SHIFT;
707
708 mult = ((reg * 1000) + reg_f);
709 clk_name = kasprintf(GFP_KERNEL, "%s_mul", dev_name(&pdev->dev));
710 if (!clk_name) {
711 ret = -ENOMEM;
712 goto err_disable_clk;
713 }
714 clk_wzrd->clks_internal[wzrd_clk_mul] = clk_register_fixed_factor
715 (&pdev->dev, clk_name,
716 __clk_get_name(clk_wzrd->clk_in1),
717 0, mult, 1000);
718 kfree(clk_name);
719 if (IS_ERR(clk_wzrd->clks_internal[wzrd_clk_mul])) {
720 dev_err(&pdev->dev, "unable to register fixed-factor clock\n");
721 ret = PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul]);
722 goto err_disable_clk;
723 }
724
725 clk_name = kasprintf(GFP_KERNEL, "%s_mul_div", dev_name(&pdev->dev));
726 if (!clk_name) {
727 ret = -ENOMEM;
728 goto err_rm_int_clk;
729 }
730
731 ctrl_reg = clk_wzrd->base + WZRD_CLK_CFG_REG(0);
732
733 clk_wzrd->clks_internal[wzrd_clk_mul_div] = clk_register_divider
734 (&pdev->dev, clk_name,
735 __clk_get_name(clk_wzrd->clks_internal[wzrd_clk_mul]),
736 flags, ctrl_reg, 0, 8, CLK_DIVIDER_ONE_BASED |
737 CLK_DIVIDER_ALLOW_ZERO, &clkwzrd_lock);
738 if (IS_ERR(clk_wzrd->clks_internal[wzrd_clk_mul_div])) {
739 dev_err(&pdev->dev, "unable to register divider clock\n");
740 ret = PTR_ERR(clk_wzrd->clks_internal[wzrd_clk_mul_div]);
741 goto out;
742 }
743
744
745 for (i = outputs - 1; i >= 0 ; i--) {
746 if (of_property_read_string_index(np, "clock-output-names", i,
747 &clkout_name)) {
748 dev_err(&pdev->dev,
749 "clock output name not specified\n");
750 ret = -EINVAL;
751 goto err_rm_int_clks;
752 }
753 if (!i)
754 clk_wzrd->clkout[i] = clk_wzrd_register_divf
755 (&pdev->dev, clkout_name,
756 clk_name, flags,
757 clk_wzrd->base, (WZRD_CLK_CFG_REG(2) + i * 12),
758 WZRD_CLKOUT_DIVIDE_SHIFT,
759 WZRD_CLKOUT_DIVIDE_WIDTH,
760 CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
761 DIV_O, &clkwzrd_lock);
762 else
763 clk_wzrd->clkout[i] = clk_wzrd_register_divider
764 (&pdev->dev, clkout_name,
765 clk_name, 0,
766 clk_wzrd->base, (WZRD_CLK_CFG_REG(2) + i * 12),
767 WZRD_CLKOUT_DIVIDE_SHIFT,
768 WZRD_CLKOUT_DIVIDE_WIDTH,
769 CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
770 DIV_O, &clkwzrd_lock);
771 if (IS_ERR(clk_wzrd->clkout[i])) {
772 int j;
773
774 for (j = i + 1; j < outputs; j++)
775 clk_unregister(clk_wzrd->clkout[j]);
776 dev_err(&pdev->dev,
777 "unable to register divider clock\n");
778 ret = PTR_ERR(clk_wzrd->clkout[i]);
779 goto err_rm_int_clks;
780 }
781 }
782
783 kfree(clk_name);
784out:
785 clk_wzrd->clk_data.clks = clk_wzrd->clkout;
786 clk_wzrd->clk_data.clk_num = ARRAY_SIZE(clk_wzrd->clkout);
787 of_clk_add_provider(np, of_clk_src_onecell_get, &clk_wzrd->clk_data);
788
789 if (clk_wzrd->speed_grade) {
790 clk_wzrd->nb.notifier_call = clk_wzrd_clk_notifier;
791
792 ret = clk_notifier_register(clk_wzrd->clk_in1,
793 &clk_wzrd->nb);
794 if (ret)
795 dev_warn(&pdev->dev,
796 "unable to register clock notifier\n");
797
798 ret = clk_notifier_register(clk_wzrd->axi_clk, &clk_wzrd->nb);
799 if (ret)
800 dev_warn(&pdev->dev,
801 "unable to register clock notifier\n");
802 }
803
804 return 0;
805
806err_rm_int_clks:
807 clk_unregister(clk_wzrd->clks_internal[1]);
808err_rm_int_clk:
809 kfree(clk_name);
810 clk_unregister(clk_wzrd->clks_internal[0]);
811err_disable_clk:
812 clk_disable_unprepare(clk_wzrd->axi_clk);
813
814 return ret;
815}
816
817static int clk_wzrd_remove(struct platform_device *pdev)
818{
819 int i;
820 struct clk_wzrd *clk_wzrd = platform_get_drvdata(pdev);
821
822 of_clk_del_provider(pdev->dev.of_node);
823
824 for (i = 0; i < WZRD_NUM_OUTPUTS; i++)
825 clk_unregister(clk_wzrd->clkout[i]);
826 for (i = 0; i < wzrd_clk_int_max; i++)
827 clk_unregister(clk_wzrd->clks_internal[i]);
828
829 if (clk_wzrd->speed_grade) {
830 clk_notifier_unregister(clk_wzrd->axi_clk, &clk_wzrd->nb);
831 clk_notifier_unregister(clk_wzrd->clk_in1, &clk_wzrd->nb);
832 }
833
834 clk_disable_unprepare(clk_wzrd->axi_clk);
835
836 return 0;
837}
838
839static const struct of_device_id clk_wzrd_ids[] = {
840 { .compatible = "xlnx,clocking-wizard" },
841 { .compatible = "xlnx,clocking-wizard-v5.2" },
842 { .compatible = "xlnx,clocking-wizard-v6.0" },
843 { },
844};
845MODULE_DEVICE_TABLE(of, clk_wzrd_ids);
846
847static struct platform_driver clk_wzrd_driver = {
848 .driver = {
849 .name = "clk-wizard",
850 .of_match_table = clk_wzrd_ids,
851 .pm = &clk_wzrd_dev_pm_ops,
852 },
853 .probe = clk_wzrd_probe,
854 .remove = clk_wzrd_remove,
855};
856module_platform_driver(clk_wzrd_driver);
857
858MODULE_LICENSE("GPL");
859MODULE_AUTHOR("Soeren Brinkmann <soren.brinkmann@xilinx.com");
860MODULE_DESCRIPTION("Driver for the Xilinx Clocking Wizard IP core");
861