1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/slab.h>
17#include <linux/of_address.h>
18#include <linux/clk.h>
19#include <linux/clk-provider.h>
20
21#include "clkgen.h"
22
23
24
25
26
27
28#define QUADFS_NDIV_THRESHOLD 30000000
29
30#define PLL_BW_GOODREF (0L)
31#define PLL_BW_VBADREF (1L)
32#define PLL_BW_BADREF (2L)
33#define PLL_BW_VGOODREF (3L)
34
35#define QUADFS_MAX_CHAN 4
36
37struct stm_fs {
38 unsigned long ndiv;
39 unsigned long mdiv;
40 unsigned long pe;
41 unsigned long sdiv;
42 unsigned long nsdiv;
43};
44
45struct clkgen_quadfs_data {
46 bool reset_present;
47 bool bwfilter_present;
48 bool lockstatus_present;
49 bool powerup_polarity;
50 bool standby_polarity;
51 bool nsdiv_present;
52 bool nrst_present;
53 struct clkgen_field ndiv;
54 struct clkgen_field ref_bw;
55 struct clkgen_field nreset;
56 struct clkgen_field npda;
57 struct clkgen_field lock_status;
58
59 struct clkgen_field nrst[QUADFS_MAX_CHAN];
60 struct clkgen_field nsb[QUADFS_MAX_CHAN];
61 struct clkgen_field en[QUADFS_MAX_CHAN];
62 struct clkgen_field mdiv[QUADFS_MAX_CHAN];
63 struct clkgen_field pe[QUADFS_MAX_CHAN];
64 struct clkgen_field sdiv[QUADFS_MAX_CHAN];
65 struct clkgen_field nsdiv[QUADFS_MAX_CHAN];
66
67 const struct clk_ops *pll_ops;
68 int (*get_params)(unsigned long, unsigned long, struct stm_fs *);
69 int (*get_rate)(unsigned long , const struct stm_fs *,
70 unsigned long *);
71};
72
73static const struct clk_ops st_quadfs_pll_c32_ops;
74static const struct clk_ops st_quadfs_fs660c32_ops;
75
76static int clk_fs660c32_dig_get_params(unsigned long input,
77 unsigned long output, struct stm_fs *fs);
78static int clk_fs660c32_dig_get_rate(unsigned long, const struct stm_fs *,
79 unsigned long *);
80
81static const struct clkgen_quadfs_data st_fs660c32_C = {
82 .nrst_present = true,
83 .nrst = { CLKGEN_FIELD(0x2f0, 0x1, 0),
84 CLKGEN_FIELD(0x2f0, 0x1, 1),
85 CLKGEN_FIELD(0x2f0, 0x1, 2),
86 CLKGEN_FIELD(0x2f0, 0x1, 3) },
87 .npda = CLKGEN_FIELD(0x2f0, 0x1, 12),
88 .nsb = { CLKGEN_FIELD(0x2f0, 0x1, 8),
89 CLKGEN_FIELD(0x2f0, 0x1, 9),
90 CLKGEN_FIELD(0x2f0, 0x1, 10),
91 CLKGEN_FIELD(0x2f0, 0x1, 11) },
92 .nsdiv_present = true,
93 .nsdiv = { CLKGEN_FIELD(0x304, 0x1, 24),
94 CLKGEN_FIELD(0x308, 0x1, 24),
95 CLKGEN_FIELD(0x30c, 0x1, 24),
96 CLKGEN_FIELD(0x310, 0x1, 24) },
97 .mdiv = { CLKGEN_FIELD(0x304, 0x1f, 15),
98 CLKGEN_FIELD(0x308, 0x1f, 15),
99 CLKGEN_FIELD(0x30c, 0x1f, 15),
100 CLKGEN_FIELD(0x310, 0x1f, 15) },
101 .en = { CLKGEN_FIELD(0x2fc, 0x1, 0),
102 CLKGEN_FIELD(0x2fc, 0x1, 1),
103 CLKGEN_FIELD(0x2fc, 0x1, 2),
104 CLKGEN_FIELD(0x2fc, 0x1, 3) },
105 .ndiv = CLKGEN_FIELD(0x2f4, 0x7, 16),
106 .pe = { CLKGEN_FIELD(0x304, 0x7fff, 0),
107 CLKGEN_FIELD(0x308, 0x7fff, 0),
108 CLKGEN_FIELD(0x30c, 0x7fff, 0),
109 CLKGEN_FIELD(0x310, 0x7fff, 0) },
110 .sdiv = { CLKGEN_FIELD(0x304, 0xf, 20),
111 CLKGEN_FIELD(0x308, 0xf, 20),
112 CLKGEN_FIELD(0x30c, 0xf, 20),
113 CLKGEN_FIELD(0x310, 0xf, 20) },
114 .lockstatus_present = true,
115 .lock_status = CLKGEN_FIELD(0x2f0, 0x1, 24),
116 .powerup_polarity = 1,
117 .standby_polarity = 1,
118 .pll_ops = &st_quadfs_pll_c32_ops,
119 .get_params = clk_fs660c32_dig_get_params,
120 .get_rate = clk_fs660c32_dig_get_rate,
121};
122
123static const struct clkgen_quadfs_data st_fs660c32_D = {
124 .nrst_present = true,
125 .nrst = { CLKGEN_FIELD(0x2a0, 0x1, 0),
126 CLKGEN_FIELD(0x2a0, 0x1, 1),
127 CLKGEN_FIELD(0x2a0, 0x1, 2),
128 CLKGEN_FIELD(0x2a0, 0x1, 3) },
129 .ndiv = CLKGEN_FIELD(0x2a4, 0x7, 16),
130 .pe = { CLKGEN_FIELD(0x2b4, 0x7fff, 0),
131 CLKGEN_FIELD(0x2b8, 0x7fff, 0),
132 CLKGEN_FIELD(0x2bc, 0x7fff, 0),
133 CLKGEN_FIELD(0x2c0, 0x7fff, 0) },
134 .sdiv = { CLKGEN_FIELD(0x2b4, 0xf, 20),
135 CLKGEN_FIELD(0x2b8, 0xf, 20),
136 CLKGEN_FIELD(0x2bc, 0xf, 20),
137 CLKGEN_FIELD(0x2c0, 0xf, 20) },
138 .npda = CLKGEN_FIELD(0x2a0, 0x1, 12),
139 .nsb = { CLKGEN_FIELD(0x2a0, 0x1, 8),
140 CLKGEN_FIELD(0x2a0, 0x1, 9),
141 CLKGEN_FIELD(0x2a0, 0x1, 10),
142 CLKGEN_FIELD(0x2a0, 0x1, 11) },
143 .nsdiv_present = true,
144 .nsdiv = { CLKGEN_FIELD(0x2b4, 0x1, 24),
145 CLKGEN_FIELD(0x2b8, 0x1, 24),
146 CLKGEN_FIELD(0x2bc, 0x1, 24),
147 CLKGEN_FIELD(0x2c0, 0x1, 24) },
148 .mdiv = { CLKGEN_FIELD(0x2b4, 0x1f, 15),
149 CLKGEN_FIELD(0x2b8, 0x1f, 15),
150 CLKGEN_FIELD(0x2bc, 0x1f, 15),
151 CLKGEN_FIELD(0x2c0, 0x1f, 15) },
152 .en = { CLKGEN_FIELD(0x2ac, 0x1, 0),
153 CLKGEN_FIELD(0x2ac, 0x1, 1),
154 CLKGEN_FIELD(0x2ac, 0x1, 2),
155 CLKGEN_FIELD(0x2ac, 0x1, 3) },
156 .lockstatus_present = true,
157 .lock_status = CLKGEN_FIELD(0x2A0, 0x1, 24),
158 .powerup_polarity = 1,
159 .standby_polarity = 1,
160 .pll_ops = &st_quadfs_pll_c32_ops,
161 .get_params = clk_fs660c32_dig_get_params,
162 .get_rate = clk_fs660c32_dig_get_rate,};
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185struct st_clk_quadfs_pll {
186 struct clk_hw hw;
187 void __iomem *regs_base;
188 spinlock_t *lock;
189 struct clkgen_quadfs_data *data;
190 u32 ndiv;
191};
192
193#define to_quadfs_pll(_hw) container_of(_hw, struct st_clk_quadfs_pll, hw)
194
195static int quadfs_pll_enable(struct clk_hw *hw)
196{
197 struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw);
198 unsigned long flags = 0, timeout = jiffies + msecs_to_jiffies(10);
199
200 if (pll->lock)
201 spin_lock_irqsave(pll->lock, flags);
202
203
204
205
206 if (pll->data->reset_present)
207 CLKGEN_WRITE(pll, nreset, 1);
208
209
210
211
212 if (pll->data->bwfilter_present)
213 CLKGEN_WRITE(pll, ref_bw, PLL_BW_GOODREF);
214
215
216 CLKGEN_WRITE(pll, ndiv, pll->ndiv);
217
218
219
220
221 CLKGEN_WRITE(pll, npda, !pll->data->powerup_polarity);
222
223 if (pll->lock)
224 spin_unlock_irqrestore(pll->lock, flags);
225
226 if (pll->data->lockstatus_present)
227 while (!CLKGEN_READ(pll, lock_status)) {
228 if (time_after(jiffies, timeout))
229 return -ETIMEDOUT;
230 cpu_relax();
231 }
232
233 return 0;
234}
235
236static void quadfs_pll_disable(struct clk_hw *hw)
237{
238 struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw);
239 unsigned long flags = 0;
240
241 if (pll->lock)
242 spin_lock_irqsave(pll->lock, flags);
243
244
245
246
247
248 CLKGEN_WRITE(pll, npda, pll->data->powerup_polarity);
249
250 if (pll->data->reset_present)
251 CLKGEN_WRITE(pll, nreset, 0);
252
253 if (pll->lock)
254 spin_unlock_irqrestore(pll->lock, flags);
255}
256
257static int quadfs_pll_is_enabled(struct clk_hw *hw)
258{
259 struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw);
260 u32 npda = CLKGEN_READ(pll, npda);
261
262 return pll->data->powerup_polarity ? !npda : !!npda;
263}
264
265static int clk_fs660c32_vco_get_rate(unsigned long input, struct stm_fs *fs,
266 unsigned long *rate)
267{
268 unsigned long nd = fs->ndiv + 16;
269
270 *rate = input * nd;
271
272 return 0;
273}
274
275static unsigned long quadfs_pll_fs660c32_recalc_rate(struct clk_hw *hw,
276 unsigned long parent_rate)
277{
278 struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw);
279 unsigned long rate = 0;
280 struct stm_fs params;
281
282 params.ndiv = CLKGEN_READ(pll, ndiv);
283 if (clk_fs660c32_vco_get_rate(parent_rate, ¶ms, &rate))
284 pr_err("%s:%s error calculating rate\n",
285 clk_hw_get_name(hw), __func__);
286
287 pll->ndiv = params.ndiv;
288
289 return rate;
290}
291
292static int clk_fs660c32_vco_get_params(unsigned long input,
293 unsigned long output, struct stm_fs *fs)
294{
295
296
297
298
299 unsigned long pdiv = 1, n;
300
301
302 if (output < 384000000 || output > 660000000)
303 return -EINVAL;
304
305 if (input > 40000000)
306
307
308 return -EINVAL;
309
310 input /= 1000;
311 output /= 1000;
312
313 n = output * pdiv / input;
314 if (n < 16)
315 n = 16;
316 fs->ndiv = n - 16;
317
318 return 0;
319}
320
321static long quadfs_pll_fs660c32_round_rate(struct clk_hw *hw,
322 unsigned long rate,
323 unsigned long *prate)
324{
325 struct stm_fs params;
326
327 if (clk_fs660c32_vco_get_params(*prate, rate, ¶ms))
328 return rate;
329
330 clk_fs660c32_vco_get_rate(*prate, ¶ms, &rate);
331
332 pr_debug("%s: %s new rate %ld [ndiv=%u]\n",
333 __func__, clk_hw_get_name(hw),
334 rate, (unsigned int)params.ndiv);
335
336 return rate;
337}
338
339static int quadfs_pll_fs660c32_set_rate(struct clk_hw *hw, unsigned long rate,
340 unsigned long parent_rate)
341{
342 struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw);
343 struct stm_fs params;
344 long hwrate = 0;
345 unsigned long flags = 0;
346 int ret;
347
348 if (!rate || !parent_rate)
349 return -EINVAL;
350
351 ret = clk_fs660c32_vco_get_params(parent_rate, rate, ¶ms);
352 if (ret)
353 return ret;
354
355 clk_fs660c32_vco_get_rate(parent_rate, ¶ms, &hwrate);
356
357 pr_debug("%s: %s new rate %ld [ndiv=0x%x]\n",
358 __func__, clk_hw_get_name(hw),
359 hwrate, (unsigned int)params.ndiv);
360
361 if (!hwrate)
362 return -EINVAL;
363
364 pll->ndiv = params.ndiv;
365
366 if (pll->lock)
367 spin_lock_irqsave(pll->lock, flags);
368
369 CLKGEN_WRITE(pll, ndiv, pll->ndiv);
370
371 if (pll->lock)
372 spin_unlock_irqrestore(pll->lock, flags);
373
374 return 0;
375}
376
377static const struct clk_ops st_quadfs_pll_c32_ops = {
378 .enable = quadfs_pll_enable,
379 .disable = quadfs_pll_disable,
380 .is_enabled = quadfs_pll_is_enabled,
381 .recalc_rate = quadfs_pll_fs660c32_recalc_rate,
382 .round_rate = quadfs_pll_fs660c32_round_rate,
383 .set_rate = quadfs_pll_fs660c32_set_rate,
384};
385
386static struct clk * __init st_clk_register_quadfs_pll(
387 const char *name, const char *parent_name,
388 struct clkgen_quadfs_data *quadfs, void __iomem *reg,
389 spinlock_t *lock)
390{
391 struct st_clk_quadfs_pll *pll;
392 struct clk *clk;
393 struct clk_init_data init;
394
395
396
397
398 if (WARN_ON(!name || !parent_name))
399 return ERR_PTR(-EINVAL);
400
401 pll = kzalloc(sizeof(*pll), GFP_KERNEL);
402 if (!pll)
403 return ERR_PTR(-ENOMEM);
404
405 init.name = name;
406 init.ops = quadfs->pll_ops;
407 init.flags = CLK_IS_BASIC | CLK_GET_RATE_NOCACHE;
408 init.parent_names = &parent_name;
409 init.num_parents = 1;
410
411 pll->data = quadfs;
412 pll->regs_base = reg;
413 pll->lock = lock;
414 pll->hw.init = &init;
415
416 clk = clk_register(NULL, &pll->hw);
417
418 if (IS_ERR(clk))
419 kfree(pll);
420
421 return clk;
422}
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449struct st_clk_quadfs_fsynth {
450 struct clk_hw hw;
451 void __iomem *regs_base;
452 spinlock_t *lock;
453 struct clkgen_quadfs_data *data;
454
455 u32 chan;
456
457
458
459
460
461
462
463
464
465
466
467 u32 md;
468 u32 pe;
469 u32 sdiv;
470 u32 nsdiv;
471};
472
473#define to_quadfs_fsynth(_hw) \
474 container_of(_hw, struct st_clk_quadfs_fsynth, hw)
475
476static void quadfs_fsynth_program_enable(struct st_clk_quadfs_fsynth *fs)
477{
478
479
480
481
482 CLKGEN_WRITE(fs, en[fs->chan], 1);
483 CLKGEN_WRITE(fs, en[fs->chan], 0);
484}
485
486static void quadfs_fsynth_program_rate(struct st_clk_quadfs_fsynth *fs)
487{
488 unsigned long flags = 0;
489
490
491
492
493
494
495 CLKGEN_WRITE(fs, en[fs->chan], 0);
496
497 CLKGEN_WRITE(fs, mdiv[fs->chan], fs->md);
498 CLKGEN_WRITE(fs, pe[fs->chan], fs->pe);
499 CLKGEN_WRITE(fs, sdiv[fs->chan], fs->sdiv);
500
501 if (fs->lock)
502 spin_lock_irqsave(fs->lock, flags);
503
504 if (fs->data->nsdiv_present)
505 CLKGEN_WRITE(fs, nsdiv[fs->chan], fs->nsdiv);
506
507 if (fs->lock)
508 spin_unlock_irqrestore(fs->lock, flags);
509}
510
511static int quadfs_fsynth_enable(struct clk_hw *hw)
512{
513 struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
514 unsigned long flags = 0;
515
516 pr_debug("%s: %s\n", __func__, clk_hw_get_name(hw));
517
518 quadfs_fsynth_program_rate(fs);
519
520 if (fs->lock)
521 spin_lock_irqsave(fs->lock, flags);
522
523 CLKGEN_WRITE(fs, nsb[fs->chan], !fs->data->standby_polarity);
524
525 if (fs->data->nrst_present)
526 CLKGEN_WRITE(fs, nrst[fs->chan], 0);
527
528 if (fs->lock)
529 spin_unlock_irqrestore(fs->lock, flags);
530
531 quadfs_fsynth_program_enable(fs);
532
533 return 0;
534}
535
536static void quadfs_fsynth_disable(struct clk_hw *hw)
537{
538 struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
539 unsigned long flags = 0;
540
541 pr_debug("%s: %s\n", __func__, clk_hw_get_name(hw));
542
543 if (fs->lock)
544 spin_lock_irqsave(fs->lock, flags);
545
546 CLKGEN_WRITE(fs, nsb[fs->chan], fs->data->standby_polarity);
547
548 if (fs->lock)
549 spin_unlock_irqrestore(fs->lock, flags);
550}
551
552static int quadfs_fsynth_is_enabled(struct clk_hw *hw)
553{
554 struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
555 u32 nsb = CLKGEN_READ(fs, nsb[fs->chan]);
556
557 pr_debug("%s: %s enable bit = 0x%x\n",
558 __func__, clk_hw_get_name(hw), nsb);
559
560 return fs->data->standby_polarity ? !nsb : !!nsb;
561}
562
563#define P20 (uint64_t)(1 << 20)
564
565static int clk_fs660c32_dig_get_rate(unsigned long input,
566 const struct stm_fs *fs, unsigned long *rate)
567{
568 unsigned long s = (1 << fs->sdiv);
569 unsigned long ns;
570 uint64_t res;
571
572
573
574
575
576
577
578
579
580 ns = (fs->nsdiv == 1) ? 1 : 3;
581
582 res = (P20 * (32 + fs->mdiv) + 32 * fs->pe) * s * ns;
583 *rate = (unsigned long)div64_u64(input * P20 * 32, res);
584
585 return 0;
586}
587
588
589static int clk_fs660c32_get_pe(int m, int si, unsigned long *deviation,
590 signed long input, unsigned long output, uint64_t *p,
591 struct stm_fs *fs)
592{
593 unsigned long new_freq, new_deviation;
594 struct stm_fs fs_tmp;
595 uint64_t val;
596
597 val = (uint64_t)output << si;
598
599 *p = (uint64_t)input * P20 - (32LL + (uint64_t)m) * val * (P20 / 32LL);
600
601 *p = div64_u64(*p, val);
602
603 if (*p > 32767LL)
604 return 1;
605
606 fs_tmp.mdiv = (unsigned long) m;
607 fs_tmp.pe = (unsigned long)*p;
608 fs_tmp.sdiv = si;
609 fs_tmp.nsdiv = 1;
610
611 clk_fs660c32_dig_get_rate(input, &fs_tmp, &new_freq);
612
613 new_deviation = abs(output - new_freq);
614
615 if (new_deviation < *deviation) {
616 fs->mdiv = m;
617 fs->pe = (unsigned long)*p;
618 fs->sdiv = si;
619 fs->nsdiv = 1;
620 *deviation = new_deviation;
621 }
622 return 0;
623}
624
625static int clk_fs660c32_dig_get_params(unsigned long input,
626 unsigned long output, struct stm_fs *fs)
627{
628 int si;
629 int m;
630 unsigned long new_freq, new_deviation;
631
632 unsigned long deviation = ~0;
633 uint64_t p, p1, p2;
634 int r1, r2;
635
636 struct stm_fs fs_tmp;
637
638 for (si = 0; (si <= 8) && deviation; si++) {
639
640
641 r1 = clk_fs660c32_get_pe(0, si, &deviation,
642 input, output, &p1, fs);
643 r2 = clk_fs660c32_get_pe(31, si, &deviation,
644 input, output, &p2, fs);
645
646
647 if (r1 && r2 && (p1 > p2))
648 continue;
649
650
651 for (m = 1; (m < 31) && deviation; m++)
652 clk_fs660c32_get_pe(m, si, &deviation,
653 input, output, &p, fs);
654
655 }
656
657 if (deviation == ~0)
658 return -1;
659
660
661 if (deviation) {
662 fs_tmp.mdiv = fs->mdiv;
663 fs_tmp.sdiv = fs->sdiv;
664 fs_tmp.nsdiv = fs->nsdiv;
665
666 if (fs->pe > 2)
667 p2 = fs->pe - 2;
668 else
669 p2 = 0;
670
671 for (; p2 < 32768ll && (p2 <= (fs->pe + 2)); p2++) {
672 fs_tmp.pe = (unsigned long)p2;
673
674 clk_fs660c32_dig_get_rate(input, &fs_tmp, &new_freq);
675
676 new_deviation = abs(output - new_freq);
677
678
679 if (new_deviation < deviation) {
680 fs->pe = (unsigned long)p2;
681 deviation = new_deviation;
682
683 }
684 }
685 }
686 return 0;
687}
688
689static int quadfs_fsynt_get_hw_value_for_recalc(struct st_clk_quadfs_fsynth *fs,
690 struct stm_fs *params)
691{
692
693
694
695 params->mdiv = CLKGEN_READ(fs, mdiv[fs->chan]);
696 params->pe = CLKGEN_READ(fs, pe[fs->chan]);
697 params->sdiv = CLKGEN_READ(fs, sdiv[fs->chan]);
698
699 if (fs->data->nsdiv_present)
700 params->nsdiv = CLKGEN_READ(fs, nsdiv[fs->chan]);
701 else
702 params->nsdiv = 1;
703
704
705
706
707 if (!params->mdiv && !params->pe && !params->sdiv)
708 return 1;
709
710 fs->md = params->mdiv;
711 fs->pe = params->pe;
712 fs->sdiv = params->sdiv;
713 fs->nsdiv = params->nsdiv;
714
715 return 0;
716}
717
718static long quadfs_find_best_rate(struct clk_hw *hw, unsigned long drate,
719 unsigned long prate, struct stm_fs *params)
720{
721 struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
722 int (*clk_fs_get_rate)(unsigned long ,
723 const struct stm_fs *, unsigned long *);
724 int (*clk_fs_get_params)(unsigned long, unsigned long, struct stm_fs *);
725 unsigned long rate = 0;
726
727 clk_fs_get_rate = fs->data->get_rate;
728 clk_fs_get_params = fs->data->get_params;
729
730 if (!clk_fs_get_params(prate, drate, params))
731 clk_fs_get_rate(prate, params, &rate);
732
733 return rate;
734}
735
736static unsigned long quadfs_recalc_rate(struct clk_hw *hw,
737 unsigned long parent_rate)
738{
739 struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
740 unsigned long rate = 0;
741 struct stm_fs params;
742 int (*clk_fs_get_rate)(unsigned long ,
743 const struct stm_fs *, unsigned long *);
744
745 clk_fs_get_rate = fs->data->get_rate;
746
747 if (quadfs_fsynt_get_hw_value_for_recalc(fs, ¶ms))
748 return 0;
749
750 if (clk_fs_get_rate(parent_rate, ¶ms, &rate)) {
751 pr_err("%s:%s error calculating rate\n",
752 clk_hw_get_name(hw), __func__);
753 }
754
755 pr_debug("%s:%s rate %lu\n", clk_hw_get_name(hw), __func__, rate);
756
757 return rate;
758}
759
760static long quadfs_round_rate(struct clk_hw *hw, unsigned long rate,
761 unsigned long *prate)
762{
763 struct stm_fs params;
764
765 rate = quadfs_find_best_rate(hw, rate, *prate, ¶ms);
766
767 pr_debug("%s: %s new rate %ld [sdiv=0x%x,md=0x%x,pe=0x%x,nsdiv3=%u]\n",
768 __func__, clk_hw_get_name(hw),
769 rate, (unsigned int)params.sdiv, (unsigned int)params.mdiv,
770 (unsigned int)params.pe, (unsigned int)params.nsdiv);
771
772 return rate;
773}
774
775
776static void quadfs_program_and_enable(struct st_clk_quadfs_fsynth *fs,
777 struct stm_fs *params)
778{
779 fs->md = params->mdiv;
780 fs->pe = params->pe;
781 fs->sdiv = params->sdiv;
782 fs->nsdiv = params->nsdiv;
783
784
785
786
787
788 quadfs_fsynth_program_rate(fs);
789 quadfs_fsynth_program_enable(fs);
790}
791
792static int quadfs_set_rate(struct clk_hw *hw, unsigned long rate,
793 unsigned long parent_rate)
794{
795 struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
796 struct stm_fs params;
797 long hwrate;
798 int uninitialized_var(i);
799
800 if (!rate || !parent_rate)
801 return -EINVAL;
802
803 memset(¶ms, 0, sizeof(struct stm_fs));
804
805 hwrate = quadfs_find_best_rate(hw, rate, parent_rate, ¶ms);
806 if (!hwrate)
807 return -EINVAL;
808
809 quadfs_program_and_enable(fs, ¶ms);
810
811 return 0;
812}
813
814
815
816static const struct clk_ops st_quadfs_ops = {
817 .enable = quadfs_fsynth_enable,
818 .disable = quadfs_fsynth_disable,
819 .is_enabled = quadfs_fsynth_is_enabled,
820 .round_rate = quadfs_round_rate,
821 .set_rate = quadfs_set_rate,
822 .recalc_rate = quadfs_recalc_rate,
823};
824
825static struct clk * __init st_clk_register_quadfs_fsynth(
826 const char *name, const char *parent_name,
827 struct clkgen_quadfs_data *quadfs, void __iomem *reg, u32 chan,
828 unsigned long flags, spinlock_t *lock)
829{
830 struct st_clk_quadfs_fsynth *fs;
831 struct clk *clk;
832 struct clk_init_data init;
833
834
835
836
837 if (WARN_ON(!name || !parent_name))
838 return ERR_PTR(-EINVAL);
839
840 fs = kzalloc(sizeof(*fs), GFP_KERNEL);
841 if (!fs)
842 return ERR_PTR(-ENOMEM);
843
844 init.name = name;
845 init.ops = &st_quadfs_ops;
846 init.flags = flags | CLK_GET_RATE_NOCACHE | CLK_IS_BASIC;
847 init.parent_names = &parent_name;
848 init.num_parents = 1;
849
850 fs->data = quadfs;
851 fs->regs_base = reg;
852 fs->chan = chan;
853 fs->lock = lock;
854 fs->hw.init = &init;
855
856 clk = clk_register(NULL, &fs->hw);
857
858 if (IS_ERR(clk))
859 kfree(fs);
860
861 return clk;
862}
863
864static void __init st_of_create_quadfs_fsynths(
865 struct device_node *np, const char *pll_name,
866 struct clkgen_quadfs_data *quadfs, void __iomem *reg,
867 spinlock_t *lock)
868{
869 struct clk_onecell_data *clk_data;
870 int fschan;
871
872 clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
873 if (!clk_data)
874 return;
875
876 clk_data->clk_num = QUADFS_MAX_CHAN;
877 clk_data->clks = kzalloc(QUADFS_MAX_CHAN * sizeof(struct clk *),
878 GFP_KERNEL);
879
880 if (!clk_data->clks) {
881 kfree(clk_data);
882 return;
883 }
884
885 for (fschan = 0; fschan < QUADFS_MAX_CHAN; fschan++) {
886 struct clk *clk;
887 const char *clk_name;
888 unsigned long flags = 0;
889
890 if (of_property_read_string_index(np, "clock-output-names",
891 fschan, &clk_name)) {
892 break;
893 }
894
895
896
897
898 if (*clk_name == '\0')
899 continue;
900
901 of_clk_detect_critical(np, fschan, &flags);
902
903 clk = st_clk_register_quadfs_fsynth(clk_name, pll_name,
904 quadfs, reg, fschan,
905 flags, lock);
906
907
908
909
910
911 if (!IS_ERR(clk)) {
912 clk_data->clks[fschan] = clk;
913 pr_debug("%s: parent %s rate %u\n",
914 __clk_get_name(clk),
915 __clk_get_name(clk_get_parent(clk)),
916 (unsigned int)clk_get_rate(clk));
917 }
918 }
919
920 of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
921}
922
923static void __init st_of_quadfs_setup(struct device_node *np,
924 struct clkgen_quadfs_data *data)
925{
926 struct clk *clk;
927 const char *pll_name, *clk_parent_name;
928 void __iomem *reg;
929 spinlock_t *lock;
930
931 reg = of_iomap(np, 0);
932 if (!reg)
933 return;
934
935 clk_parent_name = of_clk_get_parent_name(np, 0);
936 if (!clk_parent_name)
937 return;
938
939 pll_name = kasprintf(GFP_KERNEL, "%s.pll", np->name);
940 if (!pll_name)
941 return;
942
943 lock = kzalloc(sizeof(*lock), GFP_KERNEL);
944 if (!lock)
945 goto err_exit;
946
947 spin_lock_init(lock);
948
949 clk = st_clk_register_quadfs_pll(pll_name, clk_parent_name, data,
950 reg, lock);
951 if (IS_ERR(clk))
952 goto err_exit;
953 else
954 pr_debug("%s: parent %s rate %u\n",
955 __clk_get_name(clk),
956 __clk_get_name(clk_get_parent(clk)),
957 (unsigned int)clk_get_rate(clk));
958
959 st_of_create_quadfs_fsynths(np, pll_name, data, reg, lock);
960
961err_exit:
962 kfree(pll_name);
963}
964
965static void __init st_of_quadfs660C_setup(struct device_node *np)
966{
967 st_of_quadfs_setup(np, (struct clkgen_quadfs_data *) &st_fs660c32_C);
968}
969CLK_OF_DECLARE(quadfs660C, "st,quadfs-pll", st_of_quadfs660C_setup);
970
971static void __init st_of_quadfs660D_setup(struct device_node *np)
972{
973 st_of_quadfs_setup(np, (struct clkgen_quadfs_data *) &st_fs660c32_D);
974}
975CLK_OF_DECLARE(quadfs660D, "st,quadfs", st_of_quadfs660D_setup);
976