1
2
3
4
5
6
7
8
9
10
11
12#include <linux/slab.h>
13#include <linux/of_address.h>
14#include <linux/clk.h>
15#include <linux/clk-provider.h>
16
17#include "clkgen.h"
18
19
20
21
22
23
24#define QUADFS_NDIV_THRESHOLD 30000000
25
26#define PLL_BW_GOODREF (0L)
27#define PLL_BW_VBADREF (1L)
28#define PLL_BW_BADREF (2L)
29#define PLL_BW_VGOODREF (3L)
30
31#define QUADFS_MAX_CHAN 4
32
33struct stm_fs {
34 unsigned long ndiv;
35 unsigned long mdiv;
36 unsigned long pe;
37 unsigned long sdiv;
38 unsigned long nsdiv;
39};
40
41struct clkgen_quadfs_data {
42 bool reset_present;
43 bool bwfilter_present;
44 bool lockstatus_present;
45 bool powerup_polarity;
46 bool standby_polarity;
47 bool nsdiv_present;
48 bool nrst_present;
49 struct clkgen_field ndiv;
50 struct clkgen_field ref_bw;
51 struct clkgen_field nreset;
52 struct clkgen_field npda;
53 struct clkgen_field lock_status;
54
55 struct clkgen_field nrst[QUADFS_MAX_CHAN];
56 struct clkgen_field nsb[QUADFS_MAX_CHAN];
57 struct clkgen_field en[QUADFS_MAX_CHAN];
58 struct clkgen_field mdiv[QUADFS_MAX_CHAN];
59 struct clkgen_field pe[QUADFS_MAX_CHAN];
60 struct clkgen_field sdiv[QUADFS_MAX_CHAN];
61 struct clkgen_field nsdiv[QUADFS_MAX_CHAN];
62
63 const struct clk_ops *pll_ops;
64 int (*get_params)(unsigned long, unsigned long, struct stm_fs *);
65 int (*get_rate)(unsigned long , const struct stm_fs *,
66 unsigned long *);
67};
68
69static const struct clk_ops st_quadfs_pll_c32_ops;
70
71static int clk_fs660c32_dig_get_params(unsigned long input,
72 unsigned long output, struct stm_fs *fs);
73static int clk_fs660c32_dig_get_rate(unsigned long, const struct stm_fs *,
74 unsigned long *);
75
76static const struct clkgen_quadfs_data st_fs660c32_C = {
77 .nrst_present = true,
78 .nrst = { CLKGEN_FIELD(0x2f0, 0x1, 0),
79 CLKGEN_FIELD(0x2f0, 0x1, 1),
80 CLKGEN_FIELD(0x2f0, 0x1, 2),
81 CLKGEN_FIELD(0x2f0, 0x1, 3) },
82 .npda = CLKGEN_FIELD(0x2f0, 0x1, 12),
83 .nsb = { CLKGEN_FIELD(0x2f0, 0x1, 8),
84 CLKGEN_FIELD(0x2f0, 0x1, 9),
85 CLKGEN_FIELD(0x2f0, 0x1, 10),
86 CLKGEN_FIELD(0x2f0, 0x1, 11) },
87 .nsdiv_present = true,
88 .nsdiv = { CLKGEN_FIELD(0x304, 0x1, 24),
89 CLKGEN_FIELD(0x308, 0x1, 24),
90 CLKGEN_FIELD(0x30c, 0x1, 24),
91 CLKGEN_FIELD(0x310, 0x1, 24) },
92 .mdiv = { CLKGEN_FIELD(0x304, 0x1f, 15),
93 CLKGEN_FIELD(0x308, 0x1f, 15),
94 CLKGEN_FIELD(0x30c, 0x1f, 15),
95 CLKGEN_FIELD(0x310, 0x1f, 15) },
96 .en = { CLKGEN_FIELD(0x2fc, 0x1, 0),
97 CLKGEN_FIELD(0x2fc, 0x1, 1),
98 CLKGEN_FIELD(0x2fc, 0x1, 2),
99 CLKGEN_FIELD(0x2fc, 0x1, 3) },
100 .ndiv = CLKGEN_FIELD(0x2f4, 0x7, 16),
101 .pe = { CLKGEN_FIELD(0x304, 0x7fff, 0),
102 CLKGEN_FIELD(0x308, 0x7fff, 0),
103 CLKGEN_FIELD(0x30c, 0x7fff, 0),
104 CLKGEN_FIELD(0x310, 0x7fff, 0) },
105 .sdiv = { CLKGEN_FIELD(0x304, 0xf, 20),
106 CLKGEN_FIELD(0x308, 0xf, 20),
107 CLKGEN_FIELD(0x30c, 0xf, 20),
108 CLKGEN_FIELD(0x310, 0xf, 20) },
109 .lockstatus_present = true,
110 .lock_status = CLKGEN_FIELD(0x2f0, 0x1, 24),
111 .powerup_polarity = 1,
112 .standby_polarity = 1,
113 .pll_ops = &st_quadfs_pll_c32_ops,
114 .get_params = clk_fs660c32_dig_get_params,
115 .get_rate = clk_fs660c32_dig_get_rate,
116};
117
118static const struct clkgen_quadfs_data st_fs660c32_D = {
119 .nrst_present = true,
120 .nrst = { CLKGEN_FIELD(0x2a0, 0x1, 0),
121 CLKGEN_FIELD(0x2a0, 0x1, 1),
122 CLKGEN_FIELD(0x2a0, 0x1, 2),
123 CLKGEN_FIELD(0x2a0, 0x1, 3) },
124 .ndiv = CLKGEN_FIELD(0x2a4, 0x7, 16),
125 .pe = { CLKGEN_FIELD(0x2b4, 0x7fff, 0),
126 CLKGEN_FIELD(0x2b8, 0x7fff, 0),
127 CLKGEN_FIELD(0x2bc, 0x7fff, 0),
128 CLKGEN_FIELD(0x2c0, 0x7fff, 0) },
129 .sdiv = { CLKGEN_FIELD(0x2b4, 0xf, 20),
130 CLKGEN_FIELD(0x2b8, 0xf, 20),
131 CLKGEN_FIELD(0x2bc, 0xf, 20),
132 CLKGEN_FIELD(0x2c0, 0xf, 20) },
133 .npda = CLKGEN_FIELD(0x2a0, 0x1, 12),
134 .nsb = { CLKGEN_FIELD(0x2a0, 0x1, 8),
135 CLKGEN_FIELD(0x2a0, 0x1, 9),
136 CLKGEN_FIELD(0x2a0, 0x1, 10),
137 CLKGEN_FIELD(0x2a0, 0x1, 11) },
138 .nsdiv_present = true,
139 .nsdiv = { CLKGEN_FIELD(0x2b4, 0x1, 24),
140 CLKGEN_FIELD(0x2b8, 0x1, 24),
141 CLKGEN_FIELD(0x2bc, 0x1, 24),
142 CLKGEN_FIELD(0x2c0, 0x1, 24) },
143 .mdiv = { CLKGEN_FIELD(0x2b4, 0x1f, 15),
144 CLKGEN_FIELD(0x2b8, 0x1f, 15),
145 CLKGEN_FIELD(0x2bc, 0x1f, 15),
146 CLKGEN_FIELD(0x2c0, 0x1f, 15) },
147 .en = { CLKGEN_FIELD(0x2ac, 0x1, 0),
148 CLKGEN_FIELD(0x2ac, 0x1, 1),
149 CLKGEN_FIELD(0x2ac, 0x1, 2),
150 CLKGEN_FIELD(0x2ac, 0x1, 3) },
151 .lockstatus_present = true,
152 .lock_status = CLKGEN_FIELD(0x2A0, 0x1, 24),
153 .powerup_polarity = 1,
154 .standby_polarity = 1,
155 .pll_ops = &st_quadfs_pll_c32_ops,
156 .get_params = clk_fs660c32_dig_get_params,
157 .get_rate = clk_fs660c32_dig_get_rate,};
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180struct st_clk_quadfs_pll {
181 struct clk_hw hw;
182 void __iomem *regs_base;
183 spinlock_t *lock;
184 struct clkgen_quadfs_data *data;
185 u32 ndiv;
186};
187
188#define to_quadfs_pll(_hw) container_of(_hw, struct st_clk_quadfs_pll, hw)
189
190static int quadfs_pll_enable(struct clk_hw *hw)
191{
192 struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw);
193 unsigned long flags = 0, timeout = jiffies + msecs_to_jiffies(10);
194
195 if (pll->lock)
196 spin_lock_irqsave(pll->lock, flags);
197
198
199
200
201 if (pll->data->reset_present)
202 CLKGEN_WRITE(pll, nreset, 1);
203
204
205
206
207 if (pll->data->bwfilter_present)
208 CLKGEN_WRITE(pll, ref_bw, PLL_BW_GOODREF);
209
210
211 CLKGEN_WRITE(pll, ndiv, pll->ndiv);
212
213
214
215
216 CLKGEN_WRITE(pll, npda, !pll->data->powerup_polarity);
217
218 if (pll->lock)
219 spin_unlock_irqrestore(pll->lock, flags);
220
221 if (pll->data->lockstatus_present)
222 while (!CLKGEN_READ(pll, lock_status)) {
223 if (time_after(jiffies, timeout))
224 return -ETIMEDOUT;
225 cpu_relax();
226 }
227
228 return 0;
229}
230
231static void quadfs_pll_disable(struct clk_hw *hw)
232{
233 struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw);
234 unsigned long flags = 0;
235
236 if (pll->lock)
237 spin_lock_irqsave(pll->lock, flags);
238
239
240
241
242
243 CLKGEN_WRITE(pll, npda, pll->data->powerup_polarity);
244
245 if (pll->data->reset_present)
246 CLKGEN_WRITE(pll, nreset, 0);
247
248 if (pll->lock)
249 spin_unlock_irqrestore(pll->lock, flags);
250}
251
252static int quadfs_pll_is_enabled(struct clk_hw *hw)
253{
254 struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw);
255 u32 npda = CLKGEN_READ(pll, npda);
256
257 return pll->data->powerup_polarity ? !npda : !!npda;
258}
259
260static int clk_fs660c32_vco_get_rate(unsigned long input, struct stm_fs *fs,
261 unsigned long *rate)
262{
263 unsigned long nd = fs->ndiv + 16;
264
265 *rate = input * nd;
266
267 return 0;
268}
269
270static unsigned long quadfs_pll_fs660c32_recalc_rate(struct clk_hw *hw,
271 unsigned long parent_rate)
272{
273 struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw);
274 unsigned long rate = 0;
275 struct stm_fs params;
276
277 params.ndiv = CLKGEN_READ(pll, ndiv);
278 if (clk_fs660c32_vco_get_rate(parent_rate, ¶ms, &rate))
279 pr_err("%s:%s error calculating rate\n",
280 clk_hw_get_name(hw), __func__);
281
282 pll->ndiv = params.ndiv;
283
284 return rate;
285}
286
287static int clk_fs660c32_vco_get_params(unsigned long input,
288 unsigned long output, struct stm_fs *fs)
289{
290
291
292
293
294 unsigned long pdiv = 1, n;
295
296
297 if (output < 384000000 || output > 660000000)
298 return -EINVAL;
299
300 if (input > 40000000)
301
302
303 return -EINVAL;
304
305 input /= 1000;
306 output /= 1000;
307
308 n = output * pdiv / input;
309 if (n < 16)
310 n = 16;
311 fs->ndiv = n - 16;
312
313 return 0;
314}
315
316static long quadfs_pll_fs660c32_round_rate(struct clk_hw *hw,
317 unsigned long rate,
318 unsigned long *prate)
319{
320 struct stm_fs params;
321
322 if (clk_fs660c32_vco_get_params(*prate, rate, ¶ms))
323 return rate;
324
325 clk_fs660c32_vco_get_rate(*prate, ¶ms, &rate);
326
327 pr_debug("%s: %s new rate %ld [ndiv=%u]\n",
328 __func__, clk_hw_get_name(hw),
329 rate, (unsigned int)params.ndiv);
330
331 return rate;
332}
333
334static int quadfs_pll_fs660c32_set_rate(struct clk_hw *hw, unsigned long rate,
335 unsigned long parent_rate)
336{
337 struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw);
338 struct stm_fs params;
339 long hwrate = 0;
340 unsigned long flags = 0;
341 int ret;
342
343 if (!rate || !parent_rate)
344 return -EINVAL;
345
346 ret = clk_fs660c32_vco_get_params(parent_rate, rate, ¶ms);
347 if (ret)
348 return ret;
349
350 clk_fs660c32_vco_get_rate(parent_rate, ¶ms, &hwrate);
351
352 pr_debug("%s: %s new rate %ld [ndiv=0x%x]\n",
353 __func__, clk_hw_get_name(hw),
354 hwrate, (unsigned int)params.ndiv);
355
356 if (!hwrate)
357 return -EINVAL;
358
359 pll->ndiv = params.ndiv;
360
361 if (pll->lock)
362 spin_lock_irqsave(pll->lock, flags);
363
364 CLKGEN_WRITE(pll, ndiv, pll->ndiv);
365
366 if (pll->lock)
367 spin_unlock_irqrestore(pll->lock, flags);
368
369 return 0;
370}
371
372static const struct clk_ops st_quadfs_pll_c32_ops = {
373 .enable = quadfs_pll_enable,
374 .disable = quadfs_pll_disable,
375 .is_enabled = quadfs_pll_is_enabled,
376 .recalc_rate = quadfs_pll_fs660c32_recalc_rate,
377 .round_rate = quadfs_pll_fs660c32_round_rate,
378 .set_rate = quadfs_pll_fs660c32_set_rate,
379};
380
381static struct clk * __init st_clk_register_quadfs_pll(
382 const char *name, const char *parent_name,
383 struct clkgen_quadfs_data *quadfs, void __iomem *reg,
384 spinlock_t *lock)
385{
386 struct st_clk_quadfs_pll *pll;
387 struct clk *clk;
388 struct clk_init_data init;
389
390
391
392
393 if (WARN_ON(!name || !parent_name))
394 return ERR_PTR(-EINVAL);
395
396 pll = kzalloc(sizeof(*pll), GFP_KERNEL);
397 if (!pll)
398 return ERR_PTR(-ENOMEM);
399
400 init.name = name;
401 init.ops = quadfs->pll_ops;
402 init.flags = CLK_GET_RATE_NOCACHE;
403 init.parent_names = &parent_name;
404 init.num_parents = 1;
405
406 pll->data = quadfs;
407 pll->regs_base = reg;
408 pll->lock = lock;
409 pll->hw.init = &init;
410
411 clk = clk_register(NULL, &pll->hw);
412
413 if (IS_ERR(clk))
414 kfree(pll);
415
416 return clk;
417}
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444struct st_clk_quadfs_fsynth {
445 struct clk_hw hw;
446 void __iomem *regs_base;
447 spinlock_t *lock;
448 struct clkgen_quadfs_data *data;
449
450 u32 chan;
451
452
453
454
455
456
457
458
459
460
461
462 u32 md;
463 u32 pe;
464 u32 sdiv;
465 u32 nsdiv;
466};
467
468#define to_quadfs_fsynth(_hw) \
469 container_of(_hw, struct st_clk_quadfs_fsynth, hw)
470
471static void quadfs_fsynth_program_enable(struct st_clk_quadfs_fsynth *fs)
472{
473
474
475
476
477 CLKGEN_WRITE(fs, en[fs->chan], 1);
478 CLKGEN_WRITE(fs, en[fs->chan], 0);
479}
480
481static void quadfs_fsynth_program_rate(struct st_clk_quadfs_fsynth *fs)
482{
483 unsigned long flags = 0;
484
485
486
487
488
489
490 CLKGEN_WRITE(fs, en[fs->chan], 0);
491
492 CLKGEN_WRITE(fs, mdiv[fs->chan], fs->md);
493 CLKGEN_WRITE(fs, pe[fs->chan], fs->pe);
494 CLKGEN_WRITE(fs, sdiv[fs->chan], fs->sdiv);
495
496 if (fs->lock)
497 spin_lock_irqsave(fs->lock, flags);
498
499 if (fs->data->nsdiv_present)
500 CLKGEN_WRITE(fs, nsdiv[fs->chan], fs->nsdiv);
501
502 if (fs->lock)
503 spin_unlock_irqrestore(fs->lock, flags);
504}
505
506static int quadfs_fsynth_enable(struct clk_hw *hw)
507{
508 struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
509 unsigned long flags = 0;
510
511 pr_debug("%s: %s\n", __func__, clk_hw_get_name(hw));
512
513 quadfs_fsynth_program_rate(fs);
514
515 if (fs->lock)
516 spin_lock_irqsave(fs->lock, flags);
517
518 CLKGEN_WRITE(fs, nsb[fs->chan], !fs->data->standby_polarity);
519
520 if (fs->data->nrst_present)
521 CLKGEN_WRITE(fs, nrst[fs->chan], 0);
522
523 if (fs->lock)
524 spin_unlock_irqrestore(fs->lock, flags);
525
526 quadfs_fsynth_program_enable(fs);
527
528 return 0;
529}
530
531static void quadfs_fsynth_disable(struct clk_hw *hw)
532{
533 struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
534 unsigned long flags = 0;
535
536 pr_debug("%s: %s\n", __func__, clk_hw_get_name(hw));
537
538 if (fs->lock)
539 spin_lock_irqsave(fs->lock, flags);
540
541 CLKGEN_WRITE(fs, nsb[fs->chan], fs->data->standby_polarity);
542
543 if (fs->lock)
544 spin_unlock_irqrestore(fs->lock, flags);
545}
546
547static int quadfs_fsynth_is_enabled(struct clk_hw *hw)
548{
549 struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
550 u32 nsb = CLKGEN_READ(fs, nsb[fs->chan]);
551
552 pr_debug("%s: %s enable bit = 0x%x\n",
553 __func__, clk_hw_get_name(hw), nsb);
554
555 return fs->data->standby_polarity ? !nsb : !!nsb;
556}
557
558#define P20 (uint64_t)(1 << 20)
559
560static int clk_fs660c32_dig_get_rate(unsigned long input,
561 const struct stm_fs *fs, unsigned long *rate)
562{
563 unsigned long s = (1 << fs->sdiv);
564 unsigned long ns;
565 uint64_t res;
566
567
568
569
570
571
572
573
574
575 ns = (fs->nsdiv == 1) ? 1 : 3;
576
577 res = (P20 * (32 + fs->mdiv) + 32 * fs->pe) * s * ns;
578 *rate = (unsigned long)div64_u64(input * P20 * 32, res);
579
580 return 0;
581}
582
583
584static int clk_fs660c32_get_pe(int m, int si, unsigned long *deviation,
585 signed long input, unsigned long output, uint64_t *p,
586 struct stm_fs *fs)
587{
588 unsigned long new_freq, new_deviation;
589 struct stm_fs fs_tmp;
590 uint64_t val;
591
592 val = (uint64_t)output << si;
593
594 *p = (uint64_t)input * P20 - (32LL + (uint64_t)m) * val * (P20 / 32LL);
595
596 *p = div64_u64(*p, val);
597
598 if (*p > 32767LL)
599 return 1;
600
601 fs_tmp.mdiv = (unsigned long) m;
602 fs_tmp.pe = (unsigned long)*p;
603 fs_tmp.sdiv = si;
604 fs_tmp.nsdiv = 1;
605
606 clk_fs660c32_dig_get_rate(input, &fs_tmp, &new_freq);
607
608 new_deviation = abs(output - new_freq);
609
610 if (new_deviation < *deviation) {
611 fs->mdiv = m;
612 fs->pe = (unsigned long)*p;
613 fs->sdiv = si;
614 fs->nsdiv = 1;
615 *deviation = new_deviation;
616 }
617 return 0;
618}
619
620static int clk_fs660c32_dig_get_params(unsigned long input,
621 unsigned long output, struct stm_fs *fs)
622{
623 int si;
624 int m;
625 unsigned long new_freq, new_deviation;
626
627 unsigned long deviation = ~0;
628 uint64_t p, p1, p2;
629 int r1, r2;
630
631 struct stm_fs fs_tmp;
632
633 for (si = 0; (si <= 8) && deviation; si++) {
634
635
636 r1 = clk_fs660c32_get_pe(0, si, &deviation,
637 input, output, &p1, fs);
638 r2 = clk_fs660c32_get_pe(31, si, &deviation,
639 input, output, &p2, fs);
640
641
642 if (r1 && r2 && (p1 > p2))
643 continue;
644
645
646 for (m = 1; (m < 31) && deviation; m++)
647 clk_fs660c32_get_pe(m, si, &deviation,
648 input, output, &p, fs);
649
650 }
651
652 if (deviation == ~0)
653 return -1;
654
655
656 if (deviation) {
657 fs_tmp.mdiv = fs->mdiv;
658 fs_tmp.sdiv = fs->sdiv;
659 fs_tmp.nsdiv = fs->nsdiv;
660
661 if (fs->pe > 2)
662 p2 = fs->pe - 2;
663 else
664 p2 = 0;
665
666 for (; p2 < 32768ll && (p2 <= (fs->pe + 2)); p2++) {
667 fs_tmp.pe = (unsigned long)p2;
668
669 clk_fs660c32_dig_get_rate(input, &fs_tmp, &new_freq);
670
671 new_deviation = abs(output - new_freq);
672
673
674 if (new_deviation < deviation) {
675 fs->pe = (unsigned long)p2;
676 deviation = new_deviation;
677
678 }
679 }
680 }
681 return 0;
682}
683
684static int quadfs_fsynt_get_hw_value_for_recalc(struct st_clk_quadfs_fsynth *fs,
685 struct stm_fs *params)
686{
687
688
689
690 params->mdiv = CLKGEN_READ(fs, mdiv[fs->chan]);
691 params->pe = CLKGEN_READ(fs, pe[fs->chan]);
692 params->sdiv = CLKGEN_READ(fs, sdiv[fs->chan]);
693
694 if (fs->data->nsdiv_present)
695 params->nsdiv = CLKGEN_READ(fs, nsdiv[fs->chan]);
696 else
697 params->nsdiv = 1;
698
699
700
701
702 if (!params->mdiv && !params->pe && !params->sdiv)
703 return 1;
704
705 fs->md = params->mdiv;
706 fs->pe = params->pe;
707 fs->sdiv = params->sdiv;
708 fs->nsdiv = params->nsdiv;
709
710 return 0;
711}
712
713static long quadfs_find_best_rate(struct clk_hw *hw, unsigned long drate,
714 unsigned long prate, struct stm_fs *params)
715{
716 struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
717 int (*clk_fs_get_rate)(unsigned long ,
718 const struct stm_fs *, unsigned long *);
719 int (*clk_fs_get_params)(unsigned long, unsigned long, struct stm_fs *);
720 unsigned long rate = 0;
721
722 clk_fs_get_rate = fs->data->get_rate;
723 clk_fs_get_params = fs->data->get_params;
724
725 if (!clk_fs_get_params(prate, drate, params))
726 clk_fs_get_rate(prate, params, &rate);
727
728 return rate;
729}
730
731static unsigned long quadfs_recalc_rate(struct clk_hw *hw,
732 unsigned long parent_rate)
733{
734 struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
735 unsigned long rate = 0;
736 struct stm_fs params;
737 int (*clk_fs_get_rate)(unsigned long ,
738 const struct stm_fs *, unsigned long *);
739
740 clk_fs_get_rate = fs->data->get_rate;
741
742 if (quadfs_fsynt_get_hw_value_for_recalc(fs, ¶ms))
743 return 0;
744
745 if (clk_fs_get_rate(parent_rate, ¶ms, &rate)) {
746 pr_err("%s:%s error calculating rate\n",
747 clk_hw_get_name(hw), __func__);
748 }
749
750 pr_debug("%s:%s rate %lu\n", clk_hw_get_name(hw), __func__, rate);
751
752 return rate;
753}
754
755static long quadfs_round_rate(struct clk_hw *hw, unsigned long rate,
756 unsigned long *prate)
757{
758 struct stm_fs params;
759
760 rate = quadfs_find_best_rate(hw, rate, *prate, ¶ms);
761
762 pr_debug("%s: %s new rate %ld [sdiv=0x%x,md=0x%x,pe=0x%x,nsdiv3=%u]\n",
763 __func__, clk_hw_get_name(hw),
764 rate, (unsigned int)params.sdiv, (unsigned int)params.mdiv,
765 (unsigned int)params.pe, (unsigned int)params.nsdiv);
766
767 return rate;
768}
769
770
771static void quadfs_program_and_enable(struct st_clk_quadfs_fsynth *fs,
772 struct stm_fs *params)
773{
774 fs->md = params->mdiv;
775 fs->pe = params->pe;
776 fs->sdiv = params->sdiv;
777 fs->nsdiv = params->nsdiv;
778
779
780
781
782
783 quadfs_fsynth_program_rate(fs);
784 quadfs_fsynth_program_enable(fs);
785}
786
787static int quadfs_set_rate(struct clk_hw *hw, unsigned long rate,
788 unsigned long parent_rate)
789{
790 struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
791 struct stm_fs params;
792 long hwrate;
793
794 if (!rate || !parent_rate)
795 return -EINVAL;
796
797 memset(¶ms, 0, sizeof(struct stm_fs));
798
799 hwrate = quadfs_find_best_rate(hw, rate, parent_rate, ¶ms);
800 if (!hwrate)
801 return -EINVAL;
802
803 quadfs_program_and_enable(fs, ¶ms);
804
805 return 0;
806}
807
808
809
810static const struct clk_ops st_quadfs_ops = {
811 .enable = quadfs_fsynth_enable,
812 .disable = quadfs_fsynth_disable,
813 .is_enabled = quadfs_fsynth_is_enabled,
814 .round_rate = quadfs_round_rate,
815 .set_rate = quadfs_set_rate,
816 .recalc_rate = quadfs_recalc_rate,
817};
818
819static struct clk * __init st_clk_register_quadfs_fsynth(
820 const char *name, const char *parent_name,
821 struct clkgen_quadfs_data *quadfs, void __iomem *reg, u32 chan,
822 unsigned long flags, spinlock_t *lock)
823{
824 struct st_clk_quadfs_fsynth *fs;
825 struct clk *clk;
826 struct clk_init_data init;
827
828
829
830
831 if (WARN_ON(!name || !parent_name))
832 return ERR_PTR(-EINVAL);
833
834 fs = kzalloc(sizeof(*fs), GFP_KERNEL);
835 if (!fs)
836 return ERR_PTR(-ENOMEM);
837
838 init.name = name;
839 init.ops = &st_quadfs_ops;
840 init.flags = flags | CLK_GET_RATE_NOCACHE;
841 init.parent_names = &parent_name;
842 init.num_parents = 1;
843
844 fs->data = quadfs;
845 fs->regs_base = reg;
846 fs->chan = chan;
847 fs->lock = lock;
848 fs->hw.init = &init;
849
850 clk = clk_register(NULL, &fs->hw);
851
852 if (IS_ERR(clk))
853 kfree(fs);
854
855 return clk;
856}
857
858static void __init st_of_create_quadfs_fsynths(
859 struct device_node *np, const char *pll_name,
860 struct clkgen_quadfs_data *quadfs, void __iomem *reg,
861 spinlock_t *lock)
862{
863 struct clk_onecell_data *clk_data;
864 int fschan;
865
866 clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
867 if (!clk_data)
868 return;
869
870 clk_data->clk_num = QUADFS_MAX_CHAN;
871 clk_data->clks = kcalloc(QUADFS_MAX_CHAN, sizeof(struct clk *),
872 GFP_KERNEL);
873
874 if (!clk_data->clks) {
875 kfree(clk_data);
876 return;
877 }
878
879 for (fschan = 0; fschan < QUADFS_MAX_CHAN; fschan++) {
880 struct clk *clk;
881 const char *clk_name;
882 unsigned long flags = 0;
883
884 if (of_property_read_string_index(np, "clock-output-names",
885 fschan, &clk_name)) {
886 break;
887 }
888
889
890
891
892 if (*clk_name == '\0')
893 continue;
894
895 of_clk_detect_critical(np, fschan, &flags);
896
897 clk = st_clk_register_quadfs_fsynth(clk_name, pll_name,
898 quadfs, reg, fschan,
899 flags, lock);
900
901
902
903
904
905 if (!IS_ERR(clk)) {
906 clk_data->clks[fschan] = clk;
907 pr_debug("%s: parent %s rate %u\n",
908 __clk_get_name(clk),
909 __clk_get_name(clk_get_parent(clk)),
910 (unsigned int)clk_get_rate(clk));
911 }
912 }
913
914 of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
915}
916
917static void __init st_of_quadfs_setup(struct device_node *np,
918 struct clkgen_quadfs_data *data)
919{
920 struct clk *clk;
921 const char *pll_name, *clk_parent_name;
922 void __iomem *reg;
923 spinlock_t *lock;
924
925 reg = of_iomap(np, 0);
926 if (!reg)
927 return;
928
929 clk_parent_name = of_clk_get_parent_name(np, 0);
930 if (!clk_parent_name)
931 return;
932
933 pll_name = kasprintf(GFP_KERNEL, "%pOFn.pll", np);
934 if (!pll_name)
935 return;
936
937 lock = kzalloc(sizeof(*lock), GFP_KERNEL);
938 if (!lock)
939 goto err_exit;
940
941 spin_lock_init(lock);
942
943 clk = st_clk_register_quadfs_pll(pll_name, clk_parent_name, data,
944 reg, lock);
945 if (IS_ERR(clk))
946 goto err_exit;
947 else
948 pr_debug("%s: parent %s rate %u\n",
949 __clk_get_name(clk),
950 __clk_get_name(clk_get_parent(clk)),
951 (unsigned int)clk_get_rate(clk));
952
953 st_of_create_quadfs_fsynths(np, pll_name, data, reg, lock);
954
955err_exit:
956 kfree(pll_name);
957}
958
959static void __init st_of_quadfs660C_setup(struct device_node *np)
960{
961 st_of_quadfs_setup(np, (struct clkgen_quadfs_data *) &st_fs660c32_C);
962}
963CLK_OF_DECLARE(quadfs660C, "st,quadfs-pll", st_of_quadfs660C_setup);
964
965static void __init st_of_quadfs660D_setup(struct device_node *np)
966{
967 st_of_quadfs_setup(np, (struct clkgen_quadfs_data *) &st_fs660c32_D);
968}
969CLK_OF_DECLARE(quadfs660D, "st,quadfs", st_of_quadfs660D_setup);
970