1
2
3
4
5
6
7#include <linux/clkdev.h>
8#include <linux/delay.h>
9#include <linux/io.h>
10#include <linux/of_device.h>
11#include "sifive-prci.h"
12#include "fu540-prci.h"
13#include "fu740-prci.h"
14
15static const struct prci_clk_desc prci_clk_fu540 = {
16 .clks = __prci_init_clocks_fu540,
17 .num_clks = ARRAY_SIZE(__prci_init_clocks_fu540),
18};
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37static u32 __prci_readl(struct __prci_data *pd, u32 offs)
38{
39 return readl_relaxed(pd->va + offs);
40}
41
42static void __prci_writel(u32 v, u32 offs, struct __prci_data *pd)
43{
44 writel_relaxed(v, pd->va + offs);
45}
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63static void __prci_wrpll_unpack(struct wrpll_cfg *c, u32 r)
64{
65 u32 v;
66
67 v = r & PRCI_COREPLLCFG0_DIVR_MASK;
68 v >>= PRCI_COREPLLCFG0_DIVR_SHIFT;
69 c->divr = v;
70
71 v = r & PRCI_COREPLLCFG0_DIVF_MASK;
72 v >>= PRCI_COREPLLCFG0_DIVF_SHIFT;
73 c->divf = v;
74
75 v = r & PRCI_COREPLLCFG0_DIVQ_MASK;
76 v >>= PRCI_COREPLLCFG0_DIVQ_SHIFT;
77 c->divq = v;
78
79 v = r & PRCI_COREPLLCFG0_RANGE_MASK;
80 v >>= PRCI_COREPLLCFG0_RANGE_SHIFT;
81 c->range = v;
82
83 c->flags &=
84 (WRPLL_FLAGS_INT_FEEDBACK_MASK | WRPLL_FLAGS_EXT_FEEDBACK_MASK);
85
86
87 c->flags |= WRPLL_FLAGS_INT_FEEDBACK_MASK;
88}
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105static u32 __prci_wrpll_pack(const struct wrpll_cfg *c)
106{
107 u32 r = 0;
108
109 r |= c->divr << PRCI_COREPLLCFG0_DIVR_SHIFT;
110 r |= c->divf << PRCI_COREPLLCFG0_DIVF_SHIFT;
111 r |= c->divq << PRCI_COREPLLCFG0_DIVQ_SHIFT;
112 r |= c->range << PRCI_COREPLLCFG0_RANGE_SHIFT;
113
114
115 r |= PRCI_COREPLLCFG0_FSE_MASK;
116
117 return r;
118}
119
120
121
122
123
124
125
126
127
128
129
130
131
132static void __prci_wrpll_read_cfg0(struct __prci_data *pd,
133 struct __prci_wrpll_data *pwd)
134{
135 __prci_wrpll_unpack(&pwd->c, __prci_readl(pd, pwd->cfg0_offs));
136}
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152static void __prci_wrpll_write_cfg0(struct __prci_data *pd,
153 struct __prci_wrpll_data *pwd,
154 struct wrpll_cfg *c)
155{
156 __prci_writel(__prci_wrpll_pack(c), pwd->cfg0_offs, pd);
157
158 memcpy(&pwd->c, c, sizeof(*c));
159}
160
161
162
163
164
165
166
167
168static void __prci_wrpll_write_cfg1(struct __prci_data *pd,
169 struct __prci_wrpll_data *pwd,
170 u32 enable)
171{
172 __prci_writel(enable, pwd->cfg1_offs, pd);
173}
174
175
176
177
178
179
180
181
182unsigned long sifive_prci_wrpll_recalc_rate(struct clk_hw *hw,
183 unsigned long parent_rate)
184{
185 struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
186 struct __prci_wrpll_data *pwd = pc->pwd;
187
188 return wrpll_calc_output_rate(&pwd->c, parent_rate);
189}
190
191long sifive_prci_wrpll_round_rate(struct clk_hw *hw,
192 unsigned long rate,
193 unsigned long *parent_rate)
194{
195 struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
196 struct __prci_wrpll_data *pwd = pc->pwd;
197 struct wrpll_cfg c;
198
199 memcpy(&c, &pwd->c, sizeof(c));
200
201 wrpll_configure_for_rate(&c, rate, *parent_rate);
202
203 return wrpll_calc_output_rate(&c, *parent_rate);
204}
205
206int sifive_prci_wrpll_set_rate(struct clk_hw *hw,
207 unsigned long rate, unsigned long parent_rate)
208{
209 struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
210 struct __prci_wrpll_data *pwd = pc->pwd;
211 struct __prci_data *pd = pc->pd;
212 int r;
213
214 r = wrpll_configure_for_rate(&pwd->c, rate, parent_rate);
215 if (r)
216 return r;
217
218 if (pwd->enable_bypass)
219 pwd->enable_bypass(pd);
220
221 __prci_wrpll_write_cfg0(pd, pwd, &pwd->c);
222
223 udelay(wrpll_calc_max_lock_us(&pwd->c));
224
225 return 0;
226}
227
228int sifive_clk_is_enabled(struct clk_hw *hw)
229{
230 struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
231 struct __prci_wrpll_data *pwd = pc->pwd;
232 struct __prci_data *pd = pc->pd;
233 u32 r;
234
235 r = __prci_readl(pd, pwd->cfg1_offs);
236
237 if (r & PRCI_COREPLLCFG1_CKE_MASK)
238 return 1;
239 else
240 return 0;
241}
242
243int sifive_prci_clock_enable(struct clk_hw *hw)
244{
245 struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
246 struct __prci_wrpll_data *pwd = pc->pwd;
247 struct __prci_data *pd = pc->pd;
248
249 if (sifive_clk_is_enabled(hw))
250 return 0;
251
252 __prci_wrpll_write_cfg1(pd, pwd, PRCI_COREPLLCFG1_CKE_MASK);
253
254 if (pwd->disable_bypass)
255 pwd->disable_bypass(pd);
256
257 return 0;
258}
259
260void sifive_prci_clock_disable(struct clk_hw *hw)
261{
262 struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
263 struct __prci_wrpll_data *pwd = pc->pwd;
264 struct __prci_data *pd = pc->pd;
265 u32 r;
266
267 if (pwd->enable_bypass)
268 pwd->enable_bypass(pd);
269
270 r = __prci_readl(pd, pwd->cfg1_offs);
271 r &= ~PRCI_COREPLLCFG1_CKE_MASK;
272
273 __prci_wrpll_write_cfg1(pd, pwd, r);
274}
275
276
277
278unsigned long sifive_prci_tlclksel_recalc_rate(struct clk_hw *hw,
279 unsigned long parent_rate)
280{
281 struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
282 struct __prci_data *pd = pc->pd;
283 u32 v;
284 u8 div;
285
286 v = __prci_readl(pd, PRCI_CLKMUXSTATUSREG_OFFSET);
287 v &= PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_MASK;
288 div = v ? 1 : 2;
289
290 return div_u64(parent_rate, div);
291}
292
293
294
295unsigned long sifive_prci_hfpclkplldiv_recalc_rate(struct clk_hw *hw,
296 unsigned long parent_rate)
297{
298 struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
299 struct __prci_data *pd = pc->pd;
300 u32 div = __prci_readl(pd, PRCI_HFPCLKPLLDIV_OFFSET);
301
302 return div_u64(parent_rate, div + 2);
303}
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318void sifive_prci_coreclksel_use_hfclk(struct __prci_data *pd)
319{
320 u32 r;
321
322 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
323 r |= PRCI_CORECLKSEL_CORECLKSEL_MASK;
324 __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
325
326 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
327}
328
329
330
331
332
333
334
335
336
337
338
339void sifive_prci_coreclksel_use_corepll(struct __prci_data *pd)
340{
341 u32 r;
342
343 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
344 r &= ~PRCI_CORECLKSEL_CORECLKSEL_MASK;
345 __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
346
347 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
348}
349
350
351
352
353
354
355
356
357
358
359
360
361void sifive_prci_coreclksel_use_final_corepll(struct __prci_data *pd)
362{
363 u32 r;
364
365 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
366 r &= ~PRCI_CORECLKSEL_CORECLKSEL_MASK;
367 __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
368
369 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
370}
371
372
373
374
375
376
377
378
379
380
381
382void sifive_prci_corepllsel_use_dvfscorepll(struct __prci_data *pd)
383{
384 u32 r;
385
386 r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET);
387 r |= PRCI_COREPLLSEL_COREPLLSEL_MASK;
388 __prci_writel(r, PRCI_COREPLLSEL_OFFSET, pd);
389
390 r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET);
391}
392
393
394
395
396
397
398
399
400
401
402
403void sifive_prci_corepllsel_use_corepll(struct __prci_data *pd)
404{
405 u32 r;
406
407 r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET);
408 r &= ~PRCI_COREPLLSEL_COREPLLSEL_MASK;
409 __prci_writel(r, PRCI_COREPLLSEL_OFFSET, pd);
410
411 r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET);
412}
413
414
415
416
417
418
419
420
421
422
423
424void sifive_prci_hfpclkpllsel_use_hfclk(struct __prci_data *pd)
425{
426 u32 r;
427
428 r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET);
429 r |= PRCI_HFPCLKPLLSEL_HFPCLKPLLSEL_MASK;
430 __prci_writel(r, PRCI_HFPCLKPLLSEL_OFFSET, pd);
431
432 r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET);
433}
434
435
436
437
438
439
440
441
442
443
444
445void sifive_prci_hfpclkpllsel_use_hfpclkpll(struct __prci_data *pd)
446{
447 u32 r;
448
449 r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET);
450 r &= ~PRCI_HFPCLKPLLSEL_HFPCLKPLLSEL_MASK;
451 __prci_writel(r, PRCI_HFPCLKPLLSEL_OFFSET, pd);
452
453 r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET);
454}
455
456
457int sifive_prci_pcie_aux_clock_is_enabled(struct clk_hw *hw)
458{
459 struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
460 struct __prci_data *pd = pc->pd;
461 u32 r;
462
463 r = __prci_readl(pd, PRCI_PCIE_AUX_OFFSET);
464
465 if (r & PRCI_PCIE_AUX_EN_MASK)
466 return 1;
467 else
468 return 0;
469}
470
471int sifive_prci_pcie_aux_clock_enable(struct clk_hw *hw)
472{
473 struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
474 struct __prci_data *pd = pc->pd;
475 u32 r __maybe_unused;
476
477 if (sifive_prci_pcie_aux_clock_is_enabled(hw))
478 return 0;
479
480 __prci_writel(1, PRCI_PCIE_AUX_OFFSET, pd);
481 r = __prci_readl(pd, PRCI_PCIE_AUX_OFFSET);
482
483 return 0;
484}
485
486void sifive_prci_pcie_aux_clock_disable(struct clk_hw *hw)
487{
488 struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
489 struct __prci_data *pd = pc->pd;
490 u32 r __maybe_unused;
491
492 __prci_writel(0, PRCI_PCIE_AUX_OFFSET, pd);
493 r = __prci_readl(pd, PRCI_PCIE_AUX_OFFSET);
494
495}
496
497
498
499
500
501
502
503
504
505
506
507
508static int __prci_register_clocks(struct device *dev, struct __prci_data *pd,
509 const struct prci_clk_desc *desc)
510{
511 struct clk_init_data init = { };
512 struct __prci_clock *pic;
513 int parent_count, i, r;
514
515 parent_count = of_clk_get_parent_count(dev->of_node);
516 if (parent_count != EXPECTED_CLK_PARENT_COUNT) {
517 dev_err(dev, "expected only two parent clocks, found %d\n",
518 parent_count);
519 return -EINVAL;
520 }
521
522
523 for (i = 0; i < desc->num_clks; ++i) {
524 pic = &(desc->clks[i]);
525
526 init.name = pic->name;
527 init.parent_names = &pic->parent_name;
528 init.num_parents = 1;
529 init.ops = pic->ops;
530 pic->hw.init = &init;
531
532 pic->pd = pd;
533
534 if (pic->pwd)
535 __prci_wrpll_read_cfg0(pd, pic->pwd);
536
537 r = devm_clk_hw_register(dev, &pic->hw);
538 if (r) {
539 dev_warn(dev, "Failed to register clock %s: %d\n",
540 init.name, r);
541 return r;
542 }
543
544 r = clk_hw_register_clkdev(&pic->hw, pic->name, dev_name(dev));
545 if (r) {
546 dev_warn(dev, "Failed to register clkdev for %s: %d\n",
547 init.name, r);
548 return r;
549 }
550
551 pd->hw_clks.hws[i] = &pic->hw;
552 }
553
554 pd->hw_clks.num = i;
555
556 r = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
557 &pd->hw_clks);
558 if (r) {
559 dev_err(dev, "could not add hw_provider: %d\n", r);
560 return r;
561 }
562
563 return 0;
564}
565
566
567
568
569
570
571
572static int sifive_prci_probe(struct platform_device *pdev)
573{
574 struct device *dev = &pdev->dev;
575 struct resource *res;
576 struct __prci_data *pd;
577 const struct prci_clk_desc *desc;
578 int r;
579
580 desc = of_device_get_match_data(&pdev->dev);
581
582 pd = devm_kzalloc(dev, struct_size(pd, hw_clks.hws, desc->num_clks), GFP_KERNEL);
583 if (!pd)
584 return -ENOMEM;
585
586 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
587 pd->va = devm_ioremap_resource(dev, res);
588 if (IS_ERR(pd->va))
589 return PTR_ERR(pd->va);
590
591 pd->reset.rcdev.owner = THIS_MODULE;
592 pd->reset.rcdev.nr_resets = PRCI_RST_NR;
593 pd->reset.rcdev.ops = &reset_simple_ops;
594 pd->reset.rcdev.of_node = pdev->dev.of_node;
595 pd->reset.active_low = true;
596 pd->reset.membase = pd->va + PRCI_DEVICESRESETREG_OFFSET;
597 spin_lock_init(&pd->reset.lock);
598
599 r = devm_reset_controller_register(&pdev->dev, &pd->reset.rcdev);
600 if (r) {
601 dev_err(dev, "could not register reset controller: %d\n", r);
602 return r;
603 }
604 r = __prci_register_clocks(dev, pd, desc);
605 if (r) {
606 dev_err(dev, "could not register clocks: %d\n", r);
607 return r;
608 }
609
610 dev_dbg(dev, "SiFive PRCI probed\n");
611
612 return 0;
613}
614
615static const struct of_device_id sifive_prci_of_match[] = {
616 {.compatible = "sifive,fu540-c000-prci", .data = &prci_clk_fu540},
617 {.compatible = "sifive,fu740-c000-prci", .data = &prci_clk_fu740},
618 {}
619};
620
621static struct platform_driver sifive_prci_driver = {
622 .driver = {
623 .name = "sifive-clk-prci",
624 .of_match_table = sifive_prci_of_match,
625 },
626 .probe = sifive_prci_probe,
627};
628
629static int __init sifive_prci_init(void)
630{
631 return platform_driver_register(&sifive_prci_driver);
632}
633core_initcall(sifive_prci_init);
634