1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <common.h>
17#include <i2c.h>
18#include <asm/omap_common.h>
19#include <asm/gpio.h>
20#include <asm/arch/clock.h>
21#include <asm/arch/sys_proto.h>
22#include <asm/utils.h>
23#include <asm/omap_gpio.h>
24#include <asm/emif.h>
25
26#ifndef CONFIG_SPL_BUILD
27
28
29
30
31#define printf(fmt, args...)
32#define puts(s)
33#endif
34
35const u32 sys_clk_array[8] = {
36 12000000,
37 20000000,
38 16800000,
39 19200000,
40 26000000,
41 27000000,
42 38400000,
43};
44
45static inline u32 __get_sys_clk_index(void)
46{
47 s8 ind;
48
49
50
51
52
53
54
55 if (omap_revision() == OMAP4430_ES1_0)
56 ind = OMAP_SYS_CLK_IND_38_4_MHZ;
57 else {
58
59 ind = (readl((*prcm)->cm_sys_clksel) &
60 CM_SYS_CLKSEL_SYS_CLKSEL_MASK) - 1;
61 }
62 return ind;
63}
64
65u32 get_sys_clk_index(void)
66 __attribute__ ((weak, alias("__get_sys_clk_index")));
67
68u32 get_sys_clk_freq(void)
69{
70 u8 index = get_sys_clk_index();
71 return sys_clk_array[index];
72}
73
74void setup_post_dividers(u32 const base, const struct dpll_params *params)
75{
76 struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
77
78
79 if (params->m2 >= 0)
80 writel(params->m2, &dpll_regs->cm_div_m2_dpll);
81 if (params->m3 >= 0)
82 writel(params->m3, &dpll_regs->cm_div_m3_dpll);
83 if (params->m4_h11 >= 0)
84 writel(params->m4_h11, &dpll_regs->cm_div_m4_h11_dpll);
85 if (params->m5_h12 >= 0)
86 writel(params->m5_h12, &dpll_regs->cm_div_m5_h12_dpll);
87 if (params->m6_h13 >= 0)
88 writel(params->m6_h13, &dpll_regs->cm_div_m6_h13_dpll);
89 if (params->m7_h14 >= 0)
90 writel(params->m7_h14, &dpll_regs->cm_div_m7_h14_dpll);
91 if (params->h21 >= 0)
92 writel(params->h21, &dpll_regs->cm_div_h21_dpll);
93 if (params->h22 >= 0)
94 writel(params->h22, &dpll_regs->cm_div_h22_dpll);
95 if (params->h23 >= 0)
96 writel(params->h23, &dpll_regs->cm_div_h23_dpll);
97 if (params->h24 >= 0)
98 writel(params->h24, &dpll_regs->cm_div_h24_dpll);
99}
100
101static inline void do_bypass_dpll(u32 const base)
102{
103 struct dpll_regs *dpll_regs = (struct dpll_regs *)base;
104
105 clrsetbits_le32(&dpll_regs->cm_clkmode_dpll,
106 CM_CLKMODE_DPLL_DPLL_EN_MASK,
107 DPLL_EN_FAST_RELOCK_BYPASS <<
108 CM_CLKMODE_DPLL_EN_SHIFT);
109}
110
111static inline void wait_for_bypass(u32 const base)
112{
113 struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
114
115 if (!wait_on_value(ST_DPLL_CLK_MASK, 0, &dpll_regs->cm_idlest_dpll,
116 LDELAY)) {
117 printf("Bypassing DPLL failed %x\n", base);
118 }
119}
120
121static inline void do_lock_dpll(u32 const base)
122{
123 struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
124
125 clrsetbits_le32(&dpll_regs->cm_clkmode_dpll,
126 CM_CLKMODE_DPLL_DPLL_EN_MASK,
127 DPLL_EN_LOCK << CM_CLKMODE_DPLL_EN_SHIFT);
128}
129
130static inline void wait_for_lock(u32 const base)
131{
132 struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
133
134 if (!wait_on_value(ST_DPLL_CLK_MASK, ST_DPLL_CLK_MASK,
135 &dpll_regs->cm_idlest_dpll, LDELAY)) {
136 printf("DPLL locking failed for %x\n", base);
137 hang();
138 }
139}
140
141inline u32 check_for_lock(u32 const base)
142{
143 struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
144 u32 lock = readl(&dpll_regs->cm_idlest_dpll) & ST_DPLL_CLK_MASK;
145
146 return lock;
147}
148
149const struct dpll_params *get_mpu_dpll_params(struct dplls const *dpll_data)
150{
151 u32 sysclk_ind = get_sys_clk_index();
152 return &dpll_data->mpu[sysclk_ind];
153}
154
155const struct dpll_params *get_core_dpll_params(struct dplls const *dpll_data)
156{
157 u32 sysclk_ind = get_sys_clk_index();
158 return &dpll_data->core[sysclk_ind];
159}
160
161const struct dpll_params *get_per_dpll_params(struct dplls const *dpll_data)
162{
163 u32 sysclk_ind = get_sys_clk_index();
164 return &dpll_data->per[sysclk_ind];
165}
166
167const struct dpll_params *get_iva_dpll_params(struct dplls const *dpll_data)
168{
169 u32 sysclk_ind = get_sys_clk_index();
170 return &dpll_data->iva[sysclk_ind];
171}
172
173const struct dpll_params *get_usb_dpll_params(struct dplls const *dpll_data)
174{
175 u32 sysclk_ind = get_sys_clk_index();
176 return &dpll_data->usb[sysclk_ind];
177}
178
179const struct dpll_params *get_abe_dpll_params(struct dplls const *dpll_data)
180{
181#ifdef CONFIG_SYS_OMAP_ABE_SYSCK
182 u32 sysclk_ind = get_sys_clk_index();
183 return &dpll_data->abe[sysclk_ind];
184#else
185 return dpll_data->abe;
186#endif
187}
188
189static const struct dpll_params *get_ddr_dpll_params
190 (struct dplls const *dpll_data)
191{
192 u32 sysclk_ind = get_sys_clk_index();
193
194 if (!dpll_data->ddr)
195 return NULL;
196 return &dpll_data->ddr[sysclk_ind];
197}
198
199#ifdef CONFIG_DRIVER_TI_CPSW
200static const struct dpll_params *get_gmac_dpll_params
201 (struct dplls const *dpll_data)
202{
203 u32 sysclk_ind = get_sys_clk_index();
204
205 if (!dpll_data->gmac)
206 return NULL;
207 return &dpll_data->gmac[sysclk_ind];
208}
209#endif
210
211static void do_setup_dpll(u32 const base, const struct dpll_params *params,
212 u8 lock, char *dpll)
213{
214 u32 temp, M, N;
215 struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
216
217 if (!params)
218 return;
219
220 temp = readl(&dpll_regs->cm_clksel_dpll);
221
222 if (check_for_lock(base)) {
223
224
225
226
227
228 M = (temp & CM_CLKSEL_DPLL_M_MASK) >> CM_CLKSEL_DPLL_M_SHIFT;
229 N = (temp & CM_CLKSEL_DPLL_N_MASK) >> CM_CLKSEL_DPLL_N_SHIFT;
230 if ((M != (params->m)) || (N != (params->n))) {
231 debug("\n %s Dpll locked, but not for ideal M = %d,"
232 "N = %d values, current values are M = %d,"
233 "N= %d" , dpll, params->m, params->n,
234 M, N);
235 } else {
236
237 debug("\n %s Dpll already locked with ideal"
238 "nominal opp values", dpll);
239
240 bypass_dpll(base);
241 goto setup_post_dividers;
242 }
243 }
244
245 bypass_dpll(base);
246
247
248 temp &= ~CM_CLKSEL_DPLL_M_MASK;
249 temp |= (params->m << CM_CLKSEL_DPLL_M_SHIFT) & CM_CLKSEL_DPLL_M_MASK;
250
251 temp &= ~CM_CLKSEL_DPLL_N_MASK;
252 temp |= (params->n << CM_CLKSEL_DPLL_N_SHIFT) & CM_CLKSEL_DPLL_N_MASK;
253
254 writel(temp, &dpll_regs->cm_clksel_dpll);
255
256setup_post_dividers:
257 setup_post_dividers(base, params);
258
259
260 if (lock)
261 do_lock_dpll(base);
262
263
264 if (lock)
265 wait_for_lock(base);
266}
267
268u32 omap_ddr_clk(void)
269{
270 u32 ddr_clk, sys_clk_khz, omap_rev, divider;
271 const struct dpll_params *core_dpll_params;
272
273 omap_rev = omap_revision();
274 sys_clk_khz = get_sys_clk_freq() / 1000;
275
276 core_dpll_params = get_core_dpll_params(*dplls_data);
277
278 debug("sys_clk %d\n ", sys_clk_khz * 1000);
279
280
281 ddr_clk = sys_clk_khz * 2 * core_dpll_params->m /
282 (core_dpll_params->n + 1);
283
284 if (omap_rev < OMAP5430_ES1_0) {
285
286
287
288
289 divider = 4;
290 } else {
291
292
293
294
295 divider = 2;
296 }
297
298 ddr_clk = ddr_clk / divider / core_dpll_params->m2;
299 ddr_clk *= 1000;
300 debug("ddr_clk %d\n ", ddr_clk);
301
302 return ddr_clk;
303}
304
305
306
307
308
309
310
311
312
313void configure_mpu_dpll(void)
314{
315 const struct dpll_params *params;
316 struct dpll_regs *mpu_dpll_regs;
317 u32 omap_rev;
318 omap_rev = omap_revision();
319
320
321
322
323
324
325
326 if ((omap_rev >= OMAP4460_ES1_0) && (omap_rev < OMAP5430_ES1_0)) {
327 mpu_dpll_regs =
328 (struct dpll_regs *)((*prcm)->cm_clkmode_dpll_mpu);
329 bypass_dpll((*prcm)->cm_clkmode_dpll_mpu);
330 clrbits_le32((*prcm)->cm_mpu_mpu_clkctrl,
331 MPU_CLKCTRL_CLKSEL_EMIF_DIV_MODE_MASK);
332 setbits_le32((*prcm)->cm_mpu_mpu_clkctrl,
333 MPU_CLKCTRL_CLKSEL_ABE_DIV_MODE_MASK);
334 clrbits_le32(&mpu_dpll_regs->cm_clksel_dpll,
335 CM_CLKSEL_DCC_EN_MASK);
336 }
337
338 params = get_mpu_dpll_params(*dplls_data);
339
340 do_setup_dpll((*prcm)->cm_clkmode_dpll_mpu, params, DPLL_LOCK, "mpu");
341 debug("MPU DPLL locked\n");
342}
343
344#if defined(CONFIG_USB_EHCI_OMAP) || defined(CONFIG_USB_XHCI_OMAP) || \
345 defined(CONFIG_USB_MUSB_OMAP2PLUS)
346static void setup_usb_dpll(void)
347{
348 const struct dpll_params *params;
349 u32 sys_clk_khz, sd_div, num, den;
350
351 sys_clk_khz = get_sys_clk_freq() / 1000;
352
353
354
355
356
357
358
359
360 params = get_usb_dpll_params(*dplls_data);
361 num = params->m * sys_clk_khz;
362 den = (params->n + 1) * 250 * 1000;
363 num += den - 1;
364 sd_div = num / den;
365 clrsetbits_le32((*prcm)->cm_clksel_dpll_usb,
366 CM_CLKSEL_DPLL_DPLL_SD_DIV_MASK,
367 sd_div << CM_CLKSEL_DPLL_DPLL_SD_DIV_SHIFT);
368
369
370 do_setup_dpll((*prcm)->cm_clkmode_dpll_usb, params, DPLL_LOCK, "usb");
371}
372#endif
373
374static void setup_dplls(void)
375{
376 u32 temp;
377 const struct dpll_params *params;
378 struct emif_reg_struct *emif = (struct emif_reg_struct *)EMIF1_BASE;
379
380 debug("setup_dplls\n");
381
382
383 params = get_core_dpll_params(*dplls_data);
384
385
386
387
388
389 if (emif_sdram_type(readl(&emif->emif_sdram_config)) ==
390 EMIF_SDRAM_TYPE_LPDDR2)
391 do_setup_dpll((*prcm)->cm_clkmode_dpll_core, params,
392 DPLL_NO_LOCK, "core");
393 else
394 do_setup_dpll((*prcm)->cm_clkmode_dpll_core, params,
395 DPLL_LOCK, "core");
396
397 temp = (CLKSEL_CORE_X2_DIV_1 << CLKSEL_CORE_SHIFT) |
398 (CLKSEL_L3_CORE_DIV_2 << CLKSEL_L3_SHIFT) |
399 (CLKSEL_L4_L3_DIV_2 << CLKSEL_L4_SHIFT);
400 writel(temp, (*prcm)->cm_clksel_core);
401 debug("Core DPLL configured\n");
402
403
404 params = get_per_dpll_params(*dplls_data);
405 do_setup_dpll((*prcm)->cm_clkmode_dpll_per,
406 params, DPLL_LOCK, "per");
407 debug("PER DPLL locked\n");
408
409
410 configure_mpu_dpll();
411
412#if defined(CONFIG_USB_EHCI_OMAP) || defined(CONFIG_USB_XHCI_OMAP) || \
413 defined(CONFIG_USB_MUSB_OMAP2PLUS)
414 setup_usb_dpll();
415#endif
416 params = get_ddr_dpll_params(*dplls_data);
417 do_setup_dpll((*prcm)->cm_clkmode_dpll_ddrphy,
418 params, DPLL_LOCK, "ddr");
419
420#ifdef CONFIG_DRIVER_TI_CPSW
421 params = get_gmac_dpll_params(*dplls_data);
422 do_setup_dpll((*prcm)->cm_clkmode_dpll_gmac, params,
423 DPLL_LOCK, "gmac");
424#endif
425}
426
427u32 get_offset_code(u32 volt_offset, struct pmic_data *pmic)
428{
429 u32 offset_code;
430
431 volt_offset -= pmic->base_offset;
432
433 offset_code = (volt_offset + pmic->step - 1) / pmic->step;
434
435
436
437
438
439 return offset_code + pmic->start_code;
440}
441
442void do_scale_vcore(u32 vcore_reg, u32 volt_mv, struct pmic_data *pmic)
443{
444 u32 offset_code;
445 u32 offset = volt_mv;
446 int ret = 0;
447
448 if (!volt_mv)
449 return;
450
451 pmic->pmic_bus_init();
452
453 if (pmic->gpio_en)
454 ret = gpio_request(pmic->gpio, "PMIC_GPIO");
455
456 if (ret < 0) {
457 printf("%s: gpio %d request failed %d\n", __func__,
458 pmic->gpio, ret);
459 return;
460 }
461
462
463 if (pmic->gpio_en)
464 gpio_direction_output(pmic->gpio, 0);
465
466
467 offset *= 1000;
468
469 offset_code = get_offset_code(offset, pmic);
470
471 debug("do_scale_vcore: volt - %d offset_code - 0x%x\n", volt_mv,
472 offset_code);
473
474 if (pmic->pmic_write(pmic->i2c_slave_addr, vcore_reg, offset_code))
475 printf("Scaling voltage failed for 0x%x\n", vcore_reg);
476 if (pmic->gpio_en)
477 gpio_direction_output(pmic->gpio, 1);
478}
479
480static u32 optimize_vcore_voltage(struct volts const *v)
481{
482 u32 val;
483 if (!v->value)
484 return 0;
485 if (!v->efuse.reg)
486 return v->value;
487
488 switch (v->efuse.reg_bits) {
489 case 16:
490 val = readw(v->efuse.reg);
491 break;
492 case 32:
493 val = readl(v->efuse.reg);
494 break;
495 default:
496 printf("Error: efuse 0x%08x bits=%d unknown\n",
497 v->efuse.reg, v->efuse.reg_bits);
498 return v->value;
499 }
500
501 if (!val) {
502 printf("Error: efuse 0x%08x bits=%d val=0, using %d\n",
503 v->efuse.reg, v->efuse.reg_bits, v->value);
504 return v->value;
505 }
506
507 debug("%s:efuse 0x%08x bits=%d Vnom=%d, using efuse value %d\n",
508 __func__, v->efuse.reg, v->efuse.reg_bits, v->value, val);
509 return val;
510}
511
512#ifdef CONFIG_IODELAY_RECALIBRATION
513void __weak recalibrate_iodelay(void)
514{
515}
516#endif
517
518
519
520
521
522
523
524
525
526
527
528
529
530void scale_vcores(struct vcores_data const *vcores)
531{
532 int i;
533 struct volts *pv = (struct volts *)vcores;
534 struct volts *px;
535
536 for (i=0; i<(sizeof(struct vcores_data)/sizeof(struct volts)); i++) {
537 debug("%d -> ", pv->value);
538 if (pv->value) {
539
540 pv->value = optimize_vcore_voltage(pv);
541 px = (struct volts *)vcores;
542 while (px < pv) {
543
544
545
546
547
548
549
550 if (px->value) {
551 if ((pv->pmic->i2c_slave_addr ==
552 px->pmic->i2c_slave_addr) &&
553 (pv->addr == px->addr)) {
554
555 if (pv->value > px->value)
556 px->value = pv->value;
557
558 pv->value = 0;
559 }
560 }
561 px++;
562 }
563 }
564 debug("%d\n", pv->value);
565 pv++;
566 }
567
568 debug("cor: %d\n", vcores->core.value);
569 do_scale_vcore(vcores->core.addr, vcores->core.value, vcores->core.pmic);
570
571
572
573
574
575
576#ifdef CONFIG_IODELAY_RECALIBRATION
577 recalibrate_iodelay();
578#endif
579
580 debug("mpu: %d\n", vcores->mpu.value);
581 do_scale_vcore(vcores->mpu.addr, vcores->mpu.value, vcores->mpu.pmic);
582
583 abb_setup(vcores->mpu.efuse.reg,
584 (*ctrl)->control_wkup_ldovbb_mpu_voltage_ctrl,
585 (*prcm)->prm_abbldo_mpu_setup,
586 (*prcm)->prm_abbldo_mpu_ctrl,
587 (*prcm)->prm_irqstatus_mpu_2,
588 vcores->mpu.abb_tx_done_mask,
589 OMAP_ABB_FAST_OPP);
590
591 debug("mm: %d\n", vcores->mm.value);
592 do_scale_vcore(vcores->mm.addr, vcores->mm.value, vcores->mm.pmic);
593
594 abb_setup(vcores->mm.efuse.reg,
595 (*ctrl)->control_wkup_ldovbb_mm_voltage_ctrl,
596 (*prcm)->prm_abbldo_mm_setup,
597 (*prcm)->prm_abbldo_mm_ctrl,
598 (*prcm)->prm_irqstatus_mpu,
599 vcores->mm.abb_tx_done_mask,
600 OMAP_ABB_FAST_OPP);
601
602 debug("gpu: %d\n", vcores->gpu.value);
603 do_scale_vcore(vcores->gpu.addr, vcores->gpu.value, vcores->gpu.pmic);
604
605 abb_setup(vcores->gpu.efuse.reg,
606 (*ctrl)->control_wkup_ldovbb_gpu_voltage_ctrl,
607 (*prcm)->prm_abbldo_gpu_setup,
608 (*prcm)->prm_abbldo_gpu_ctrl,
609 (*prcm)->prm_irqstatus_mpu,
610 vcores->gpu.abb_tx_done_mask,
611 OMAP_ABB_FAST_OPP);
612 debug("eve: %d\n", vcores->eve.value);
613 do_scale_vcore(vcores->eve.addr, vcores->eve.value, vcores->eve.pmic);
614
615 abb_setup(vcores->eve.efuse.reg,
616 (*ctrl)->control_wkup_ldovbb_eve_voltage_ctrl,
617 (*prcm)->prm_abbldo_eve_setup,
618 (*prcm)->prm_abbldo_eve_ctrl,
619 (*prcm)->prm_irqstatus_mpu,
620 vcores->eve.abb_tx_done_mask,
621 OMAP_ABB_FAST_OPP);
622 debug("iva: %d\n", vcores->iva.value);
623 do_scale_vcore(vcores->iva.addr, vcores->iva.value, vcores->iva.pmic);
624
625 abb_setup(vcores->iva.efuse.reg,
626 (*ctrl)->control_wkup_ldovbb_iva_voltage_ctrl,
627 (*prcm)->prm_abbldo_iva_setup,
628 (*prcm)->prm_abbldo_iva_ctrl,
629 (*prcm)->prm_irqstatus_mpu,
630 vcores->iva.abb_tx_done_mask,
631 OMAP_ABB_FAST_OPP);
632}
633
634static inline void enable_clock_domain(u32 const clkctrl_reg, u32 enable_mode)
635{
636 clrsetbits_le32(clkctrl_reg, CD_CLKCTRL_CLKTRCTRL_MASK,
637 enable_mode << CD_CLKCTRL_CLKTRCTRL_SHIFT);
638 debug("Enable clock domain - %x\n", clkctrl_reg);
639}
640
641static inline void disable_clock_domain(u32 const clkctrl_reg)
642{
643 clrsetbits_le32(clkctrl_reg, CD_CLKCTRL_CLKTRCTRL_MASK,
644 CD_CLKCTRL_CLKTRCTRL_SW_SLEEP <<
645 CD_CLKCTRL_CLKTRCTRL_SHIFT);
646 debug("Disable clock domain - %x\n", clkctrl_reg);
647}
648
649static inline void wait_for_clk_enable(u32 clkctrl_addr)
650{
651 u32 clkctrl, idlest = MODULE_CLKCTRL_IDLEST_DISABLED;
652 u32 bound = LDELAY;
653
654 while ((idlest == MODULE_CLKCTRL_IDLEST_DISABLED) ||
655 (idlest == MODULE_CLKCTRL_IDLEST_TRANSITIONING)) {
656
657 clkctrl = readl(clkctrl_addr);
658 idlest = (clkctrl & MODULE_CLKCTRL_IDLEST_MASK) >>
659 MODULE_CLKCTRL_IDLEST_SHIFT;
660 if (--bound == 0) {
661 printf("Clock enable failed for 0x%x idlest 0x%x\n",
662 clkctrl_addr, clkctrl);
663 return;
664 }
665 }
666}
667
668static inline void enable_clock_module(u32 const clkctrl_addr, u32 enable_mode,
669 u32 wait_for_enable)
670{
671 clrsetbits_le32(clkctrl_addr, MODULE_CLKCTRL_MODULEMODE_MASK,
672 enable_mode << MODULE_CLKCTRL_MODULEMODE_SHIFT);
673 debug("Enable clock module - %x\n", clkctrl_addr);
674 if (wait_for_enable)
675 wait_for_clk_enable(clkctrl_addr);
676}
677
678static inline void wait_for_clk_disable(u32 clkctrl_addr)
679{
680 u32 clkctrl, idlest = MODULE_CLKCTRL_IDLEST_FULLY_FUNCTIONAL;
681 u32 bound = LDELAY;
682
683 while ((idlest != MODULE_CLKCTRL_IDLEST_DISABLED)) {
684 clkctrl = readl(clkctrl_addr);
685 idlest = (clkctrl & MODULE_CLKCTRL_IDLEST_MASK) >>
686 MODULE_CLKCTRL_IDLEST_SHIFT;
687 if (--bound == 0) {
688 printf("Clock disable failed for 0x%x idlest 0x%x\n",
689 clkctrl_addr, clkctrl);
690 return;
691 }
692 }
693}
694
695static inline void disable_clock_module(u32 const clkctrl_addr,
696 u32 wait_for_disable)
697{
698 clrsetbits_le32(clkctrl_addr, MODULE_CLKCTRL_MODULEMODE_MASK,
699 MODULE_CLKCTRL_MODULEMODE_SW_DISABLE <<
700 MODULE_CLKCTRL_MODULEMODE_SHIFT);
701 debug("Disable clock module - %x\n", clkctrl_addr);
702 if (wait_for_disable)
703 wait_for_clk_disable(clkctrl_addr);
704}
705
706void freq_update_core(void)
707{
708 u32 freq_config1 = 0;
709 const struct dpll_params *core_dpll_params;
710 u32 omap_rev = omap_revision();
711
712 core_dpll_params = get_core_dpll_params(*dplls_data);
713
714 enable_clock_domain((*prcm)->cm_memif_clkstctrl,
715 CD_CLKCTRL_CLKTRCTRL_SW_WKUP);
716 wait_for_clk_enable((*prcm)->cm_memif_emif_1_clkctrl);
717 wait_for_clk_enable((*prcm)->cm_memif_emif_2_clkctrl);
718
719 freq_config1 = SHADOW_FREQ_CONFIG1_FREQ_UPDATE_MASK |
720 SHADOW_FREQ_CONFIG1_DLL_RESET_MASK;
721
722 freq_config1 |= (DPLL_EN_LOCK << SHADOW_FREQ_CONFIG1_DPLL_EN_SHIFT) &
723 SHADOW_FREQ_CONFIG1_DPLL_EN_MASK;
724
725 freq_config1 |= (core_dpll_params->m2 <<
726 SHADOW_FREQ_CONFIG1_M2_DIV_SHIFT) &
727 SHADOW_FREQ_CONFIG1_M2_DIV_MASK;
728
729 writel(freq_config1, (*prcm)->cm_shadow_freq_config1);
730 if (!wait_on_value(SHADOW_FREQ_CONFIG1_FREQ_UPDATE_MASK, 0,
731 (u32 *) (*prcm)->cm_shadow_freq_config1, LDELAY)) {
732 puts("FREQ UPDATE procedure failed!!");
733 hang();
734 }
735
736
737
738
739
740
741 if (omap_rev != OMAP5430_ES1_0) {
742
743 enable_clock_domain((*prcm)->cm_memif_clkstctrl,
744 CD_CLKCTRL_CLKTRCTRL_HW_AUTO);
745 wait_for_clk_enable((*prcm)->cm_memif_emif_1_clkctrl);
746 wait_for_clk_enable((*prcm)->cm_memif_emif_2_clkctrl);
747 }
748}
749
750void bypass_dpll(u32 const base)
751{
752 do_bypass_dpll(base);
753 wait_for_bypass(base);
754}
755
756void lock_dpll(u32 const base)
757{
758 do_lock_dpll(base);
759 wait_for_lock(base);
760}
761
762static void setup_clocks_for_console(void)
763{
764
765 clrsetbits_le32((*prcm)->cm_l4per_clkstctrl, CD_CLKCTRL_CLKTRCTRL_MASK,
766 CD_CLKCTRL_CLKTRCTRL_SW_WKUP <<
767 CD_CLKCTRL_CLKTRCTRL_SHIFT);
768
769
770 clrsetbits_le32((*prcm)->cm_l4per_uart1_clkctrl,
771 MODULE_CLKCTRL_MODULEMODE_MASK,
772 MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN <<
773 MODULE_CLKCTRL_MODULEMODE_SHIFT);
774
775 clrsetbits_le32((*prcm)->cm_l4per_uart2_clkctrl,
776 MODULE_CLKCTRL_MODULEMODE_MASK,
777 MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN <<
778 MODULE_CLKCTRL_MODULEMODE_SHIFT);
779
780 clrsetbits_le32((*prcm)->cm_l4per_uart3_clkctrl,
781 MODULE_CLKCTRL_MODULEMODE_MASK,
782 MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN <<
783 MODULE_CLKCTRL_MODULEMODE_SHIFT);
784
785 clrsetbits_le32((*prcm)->cm_l4per_uart4_clkctrl,
786 MODULE_CLKCTRL_MODULEMODE_MASK,
787 MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN <<
788 MODULE_CLKCTRL_MODULEMODE_SHIFT);
789
790 clrsetbits_le32((*prcm)->cm_l4per_clkstctrl, CD_CLKCTRL_CLKTRCTRL_MASK,
791 CD_CLKCTRL_CLKTRCTRL_HW_AUTO <<
792 CD_CLKCTRL_CLKTRCTRL_SHIFT);
793}
794
795void do_enable_clocks(u32 const *clk_domains,
796 u32 const *clk_modules_hw_auto,
797 u32 const *clk_modules_explicit_en,
798 u8 wait_for_enable)
799{
800 u32 i, max = 100;
801
802
803 for (i = 0; (i < max) && clk_domains[i]; i++) {
804 enable_clock_domain(clk_domains[i],
805 CD_CLKCTRL_CLKTRCTRL_SW_WKUP);
806 }
807
808
809 for (i = 0; (i < max) && clk_modules_hw_auto[i]; i++) {
810 enable_clock_module(clk_modules_hw_auto[i],
811 MODULE_CLKCTRL_MODULEMODE_HW_AUTO,
812 wait_for_enable);
813 };
814
815
816 for (i = 0; (i < max) && clk_modules_explicit_en[i]; i++) {
817 enable_clock_module(clk_modules_explicit_en[i],
818 MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN,
819 wait_for_enable);
820 };
821
822
823 for (i = 0; (i < max) && clk_domains[i]; i++) {
824 enable_clock_domain(clk_domains[i],
825 CD_CLKCTRL_CLKTRCTRL_HW_AUTO);
826 }
827}
828
829void do_disable_clocks(u32 const *clk_domains,
830 u32 const *clk_modules_disable,
831 u8 wait_for_disable)
832{
833 u32 i, max = 100;
834
835
836
837 for (i = 0; (i < max) && clk_modules_disable[i]; i++)
838 disable_clock_module(clk_modules_disable[i],
839 wait_for_disable);
840
841
842 for (i = 0; (i < max) && clk_domains[i]; i++)
843 disable_clock_domain(clk_domains[i]);
844}
845
846
847
848
849
850
851
852void setup_early_clocks(void)
853{
854 switch (omap_hw_init_context()) {
855 case OMAP_INIT_CONTEXT_SPL:
856 case OMAP_INIT_CONTEXT_UBOOT_FROM_NOR:
857 case OMAP_INIT_CONTEXT_UBOOT_AFTER_CH:
858 setup_clocks_for_console();
859 enable_basic_clocks();
860 timer_init();
861
862 }
863}
864
865void prcm_init(void)
866{
867 switch (omap_hw_init_context()) {
868 case OMAP_INIT_CONTEXT_SPL:
869 case OMAP_INIT_CONTEXT_UBOOT_FROM_NOR:
870 case OMAP_INIT_CONTEXT_UBOOT_AFTER_CH:
871 scale_vcores(*omap_vcores);
872 setup_dplls();
873 setup_warmreset_time();
874 break;
875 default:
876 break;
877 }
878
879 if (OMAP_INIT_CONTEXT_SPL != omap_hw_init_context())
880 enable_basic_uboot_clocks();
881}
882
883void gpi2c_init(void)
884{
885 static int gpi2c = 1;
886
887 if (gpi2c) {
888 i2c_init(CONFIG_SYS_OMAP24_I2C_SPEED,
889 CONFIG_SYS_OMAP24_I2C_SLAVE);
890 gpi2c = 0;
891 }
892}
893