uboot/arch/arm/cpu/armv7/omap-common/clocks-common.c
<<
>>
Prefs
   1/*
   2 *
   3 * Clock initialization for OMAP4
   4 *
   5 * (C) Copyright 2010
   6 * Texas Instruments, <www.ti.com>
   7 *
   8 * Aneesh V <aneesh@ti.com>
   9 *
  10 * Based on previous work by:
  11 *      Santosh Shilimkar <santosh.shilimkar@ti.com>
  12 *      Rajendra Nayak <rnayak@ti.com>
  13 *
  14 * SPDX-License-Identifier:     GPL-2.0+
  15 */
  16#include <common.h>
  17#include <i2c.h>
  18#include <asm/omap_common.h>
  19#include <asm/gpio.h>
  20#include <asm/arch/clock.h>
  21#include <asm/arch/sys_proto.h>
  22#include <asm/utils.h>
  23#include <asm/omap_gpio.h>
  24#include <asm/emif.h>
  25
  26#ifndef CONFIG_SPL_BUILD
  27/*
  28 * printing to console doesn't work unless
  29 * this code is executed from SPL
  30 */
  31#define printf(fmt, args...)
  32#define puts(s)
  33#endif
  34
  35const u32 sys_clk_array[8] = {
  36        12000000,              /* 12 MHz */
  37        20000000,               /* 20 MHz */
  38        16800000,              /* 16.8 MHz */
  39        19200000,              /* 19.2 MHz */
  40        26000000,              /* 26 MHz */
  41        27000000,              /* 27 MHz */
  42        38400000,              /* 38.4 MHz */
  43};
  44
  45static inline u32 __get_sys_clk_index(void)
  46{
  47        s8 ind;
  48        /*
  49         * For ES1 the ROM code calibration of sys clock is not reliable
  50         * due to hw issue. So, use hard-coded value. If this value is not
  51         * correct for any board over-ride this function in board file
  52         * From ES2.0 onwards you will get this information from
  53         * CM_SYS_CLKSEL
  54         */
  55        if (omap_revision() == OMAP4430_ES1_0)
  56                ind = OMAP_SYS_CLK_IND_38_4_MHZ;
  57        else {
  58                /* SYS_CLKSEL - 1 to match the dpll param array indices */
  59                ind = (readl((*prcm)->cm_sys_clksel) &
  60                        CM_SYS_CLKSEL_SYS_CLKSEL_MASK) - 1;
  61        }
  62        return ind;
  63}
  64
  65u32 get_sys_clk_index(void)
  66        __attribute__ ((weak, alias("__get_sys_clk_index")));
  67
  68u32 get_sys_clk_freq(void)
  69{
  70        u8 index = get_sys_clk_index();
  71        return sys_clk_array[index];
  72}
  73
  74void setup_post_dividers(u32 const base, const struct dpll_params *params)
  75{
  76        struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
  77
  78        /* Setup post-dividers */
  79        if (params->m2 >= 0)
  80                writel(params->m2, &dpll_regs->cm_div_m2_dpll);
  81        if (params->m3 >= 0)
  82                writel(params->m3, &dpll_regs->cm_div_m3_dpll);
  83        if (params->m4_h11 >= 0)
  84                writel(params->m4_h11, &dpll_regs->cm_div_m4_h11_dpll);
  85        if (params->m5_h12 >= 0)
  86                writel(params->m5_h12, &dpll_regs->cm_div_m5_h12_dpll);
  87        if (params->m6_h13 >= 0)
  88                writel(params->m6_h13, &dpll_regs->cm_div_m6_h13_dpll);
  89        if (params->m7_h14 >= 0)
  90                writel(params->m7_h14, &dpll_regs->cm_div_m7_h14_dpll);
  91        if (params->h21 >= 0)
  92                writel(params->h21, &dpll_regs->cm_div_h21_dpll);
  93        if (params->h22 >= 0)
  94                writel(params->h22, &dpll_regs->cm_div_h22_dpll);
  95        if (params->h23 >= 0)
  96                writel(params->h23, &dpll_regs->cm_div_h23_dpll);
  97        if (params->h24 >= 0)
  98                writel(params->h24, &dpll_regs->cm_div_h24_dpll);
  99}
 100
 101static inline void do_bypass_dpll(u32 const base)
 102{
 103        struct dpll_regs *dpll_regs = (struct dpll_regs *)base;
 104
 105        clrsetbits_le32(&dpll_regs->cm_clkmode_dpll,
 106                        CM_CLKMODE_DPLL_DPLL_EN_MASK,
 107                        DPLL_EN_FAST_RELOCK_BYPASS <<
 108                        CM_CLKMODE_DPLL_EN_SHIFT);
 109}
 110
 111static inline void wait_for_bypass(u32 const base)
 112{
 113        struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
 114
 115        if (!wait_on_value(ST_DPLL_CLK_MASK, 0, &dpll_regs->cm_idlest_dpll,
 116                                LDELAY)) {
 117                printf("Bypassing DPLL failed %x\n", base);
 118        }
 119}
 120
 121static inline void do_lock_dpll(u32 const base)
 122{
 123        struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
 124
 125        clrsetbits_le32(&dpll_regs->cm_clkmode_dpll,
 126                      CM_CLKMODE_DPLL_DPLL_EN_MASK,
 127                      DPLL_EN_LOCK << CM_CLKMODE_DPLL_EN_SHIFT);
 128}
 129
 130static inline void wait_for_lock(u32 const base)
 131{
 132        struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
 133
 134        if (!wait_on_value(ST_DPLL_CLK_MASK, ST_DPLL_CLK_MASK,
 135                &dpll_regs->cm_idlest_dpll, LDELAY)) {
 136                printf("DPLL locking failed for %x\n", base);
 137                hang();
 138        }
 139}
 140
 141inline u32 check_for_lock(u32 const base)
 142{
 143        struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
 144        u32 lock = readl(&dpll_regs->cm_idlest_dpll) & ST_DPLL_CLK_MASK;
 145
 146        return lock;
 147}
 148
 149const struct dpll_params *get_mpu_dpll_params(struct dplls const *dpll_data)
 150{
 151        u32 sysclk_ind = get_sys_clk_index();
 152        return &dpll_data->mpu[sysclk_ind];
 153}
 154
 155const struct dpll_params *get_core_dpll_params(struct dplls const *dpll_data)
 156{
 157        u32 sysclk_ind = get_sys_clk_index();
 158        return &dpll_data->core[sysclk_ind];
 159}
 160
 161const struct dpll_params *get_per_dpll_params(struct dplls const *dpll_data)
 162{
 163        u32 sysclk_ind = get_sys_clk_index();
 164        return &dpll_data->per[sysclk_ind];
 165}
 166
 167const struct dpll_params *get_iva_dpll_params(struct dplls const *dpll_data)
 168{
 169        u32 sysclk_ind = get_sys_clk_index();
 170        return &dpll_data->iva[sysclk_ind];
 171}
 172
 173const struct dpll_params *get_usb_dpll_params(struct dplls const *dpll_data)
 174{
 175        u32 sysclk_ind = get_sys_clk_index();
 176        return &dpll_data->usb[sysclk_ind];
 177}
 178
 179const struct dpll_params *get_abe_dpll_params(struct dplls const *dpll_data)
 180{
 181#ifdef CONFIG_SYS_OMAP_ABE_SYSCK
 182        u32 sysclk_ind = get_sys_clk_index();
 183        return &dpll_data->abe[sysclk_ind];
 184#else
 185        return dpll_data->abe;
 186#endif
 187}
 188
 189static const struct dpll_params *get_ddr_dpll_params
 190                        (struct dplls const *dpll_data)
 191{
 192        u32 sysclk_ind = get_sys_clk_index();
 193
 194        if (!dpll_data->ddr)
 195                return NULL;
 196        return &dpll_data->ddr[sysclk_ind];
 197}
 198
 199#ifdef CONFIG_DRIVER_TI_CPSW
 200static const struct dpll_params *get_gmac_dpll_params
 201                        (struct dplls const *dpll_data)
 202{
 203        u32 sysclk_ind = get_sys_clk_index();
 204
 205        if (!dpll_data->gmac)
 206                return NULL;
 207        return &dpll_data->gmac[sysclk_ind];
 208}
 209#endif
 210
 211static void do_setup_dpll(u32 const base, const struct dpll_params *params,
 212                                u8 lock, char *dpll)
 213{
 214        u32 temp, M, N;
 215        struct dpll_regs *const dpll_regs = (struct dpll_regs *)base;
 216
 217        if (!params)
 218                return;
 219
 220        temp = readl(&dpll_regs->cm_clksel_dpll);
 221
 222        if (check_for_lock(base)) {
 223                /*
 224                 * The Dpll has already been locked by rom code using CH.
 225                 * Check if M,N are matching with Ideal nominal opp values.
 226                 * If matches, skip the rest otherwise relock.
 227                 */
 228                M = (temp & CM_CLKSEL_DPLL_M_MASK) >> CM_CLKSEL_DPLL_M_SHIFT;
 229                N = (temp & CM_CLKSEL_DPLL_N_MASK) >> CM_CLKSEL_DPLL_N_SHIFT;
 230                if ((M != (params->m)) || (N != (params->n))) {
 231                        debug("\n %s Dpll locked, but not for ideal M = %d,"
 232                                "N = %d values, current values are M = %d,"
 233                                "N= %d" , dpll, params->m, params->n,
 234                                M, N);
 235                } else {
 236                        /* Dpll locked with ideal values for nominal opps. */
 237                        debug("\n %s Dpll already locked with ideal"
 238                                                "nominal opp values", dpll);
 239                        goto setup_post_dividers;
 240                }
 241        }
 242
 243        bypass_dpll(base);
 244
 245        /* Set M & N */
 246        temp &= ~CM_CLKSEL_DPLL_M_MASK;
 247        temp |= (params->m << CM_CLKSEL_DPLL_M_SHIFT) & CM_CLKSEL_DPLL_M_MASK;
 248
 249        temp &= ~CM_CLKSEL_DPLL_N_MASK;
 250        temp |= (params->n << CM_CLKSEL_DPLL_N_SHIFT) & CM_CLKSEL_DPLL_N_MASK;
 251
 252        writel(temp, &dpll_regs->cm_clksel_dpll);
 253
 254        /* Lock */
 255        if (lock)
 256                do_lock_dpll(base);
 257
 258setup_post_dividers:
 259        setup_post_dividers(base, params);
 260
 261        /* Wait till the DPLL locks */
 262        if (lock)
 263                wait_for_lock(base);
 264}
 265
 266u32 omap_ddr_clk(void)
 267{
 268        u32 ddr_clk, sys_clk_khz, omap_rev, divider;
 269        const struct dpll_params *core_dpll_params;
 270
 271        omap_rev = omap_revision();
 272        sys_clk_khz = get_sys_clk_freq() / 1000;
 273
 274        core_dpll_params = get_core_dpll_params(*dplls_data);
 275
 276        debug("sys_clk %d\n ", sys_clk_khz * 1000);
 277
 278        /* Find Core DPLL locked frequency first */
 279        ddr_clk = sys_clk_khz * 2 * core_dpll_params->m /
 280                        (core_dpll_params->n + 1);
 281
 282        if (omap_rev < OMAP5430_ES1_0) {
 283                /*
 284                 * DDR frequency is PHY_ROOT_CLK/2
 285                 * PHY_ROOT_CLK = Fdpll/2/M2
 286                 */
 287                divider = 4;
 288        } else {
 289                /*
 290                 * DDR frequency is PHY_ROOT_CLK
 291                 * PHY_ROOT_CLK = Fdpll/2/M2
 292                 */
 293                divider = 2;
 294        }
 295
 296        ddr_clk = ddr_clk / divider / core_dpll_params->m2;
 297        ddr_clk *= 1000;        /* convert to Hz */
 298        debug("ddr_clk %d\n ", ddr_clk);
 299
 300        return ddr_clk;
 301}
 302
 303/*
 304 * Lock MPU dpll
 305 *
 306 * Resulting MPU frequencies:
 307 * 4430 ES1.0   : 600 MHz
 308 * 4430 ES2.x   : 792 MHz (OPP Turbo)
 309 * 4460         : 920 MHz (OPP Turbo) - DCC disabled
 310 */
 311void configure_mpu_dpll(void)
 312{
 313        const struct dpll_params *params;
 314        struct dpll_regs *mpu_dpll_regs;
 315        u32 omap_rev;
 316        omap_rev = omap_revision();
 317
 318        /*
 319         * DCC and clock divider settings for 4460.
 320         * DCC is required, if more than a certain frequency is required.
 321         * For, 4460 > 1GHZ.
 322         *     5430 > 1.4GHZ.
 323         */
 324        if ((omap_rev >= OMAP4460_ES1_0) && (omap_rev < OMAP5430_ES1_0)) {
 325                mpu_dpll_regs =
 326                        (struct dpll_regs *)((*prcm)->cm_clkmode_dpll_mpu);
 327                bypass_dpll((*prcm)->cm_clkmode_dpll_mpu);
 328                clrbits_le32((*prcm)->cm_mpu_mpu_clkctrl,
 329                        MPU_CLKCTRL_CLKSEL_EMIF_DIV_MODE_MASK);
 330                setbits_le32((*prcm)->cm_mpu_mpu_clkctrl,
 331                        MPU_CLKCTRL_CLKSEL_ABE_DIV_MODE_MASK);
 332                clrbits_le32(&mpu_dpll_regs->cm_clksel_dpll,
 333                        CM_CLKSEL_DCC_EN_MASK);
 334        }
 335
 336        params = get_mpu_dpll_params(*dplls_data);
 337
 338        do_setup_dpll((*prcm)->cm_clkmode_dpll_mpu, params, DPLL_LOCK, "mpu");
 339        debug("MPU DPLL locked\n");
 340}
 341
 342#if defined(CONFIG_USB_EHCI_OMAP) || defined(CONFIG_USB_XHCI_OMAP)
 343static void setup_usb_dpll(void)
 344{
 345        const struct dpll_params *params;
 346        u32 sys_clk_khz, sd_div, num, den;
 347
 348        sys_clk_khz = get_sys_clk_freq() / 1000;
 349        /*
 350         * USB:
 351         * USB dpll is J-type. Need to set DPLL_SD_DIV for jitter correction
 352         * DPLL_SD_DIV = CEILING ([DPLL_MULT/(DPLL_DIV+1)]* CLKINP / 250)
 353         *      - where CLKINP is sys_clk in MHz
 354         * Use CLKINP in KHz and adjust the denominator accordingly so
 355         * that we have enough accuracy and at the same time no overflow
 356         */
 357        params = get_usb_dpll_params(*dplls_data);
 358        num = params->m * sys_clk_khz;
 359        den = (params->n + 1) * 250 * 1000;
 360        num += den - 1;
 361        sd_div = num / den;
 362        clrsetbits_le32((*prcm)->cm_clksel_dpll_usb,
 363                        CM_CLKSEL_DPLL_DPLL_SD_DIV_MASK,
 364                        sd_div << CM_CLKSEL_DPLL_DPLL_SD_DIV_SHIFT);
 365
 366        /* Now setup the dpll with the regular function */
 367        do_setup_dpll((*prcm)->cm_clkmode_dpll_usb, params, DPLL_LOCK, "usb");
 368}
 369#endif
 370
 371static void setup_dplls(void)
 372{
 373        u32 temp;
 374        const struct dpll_params *params;
 375        struct emif_reg_struct *emif = (struct emif_reg_struct *)EMIF1_BASE;
 376
 377        debug("setup_dplls\n");
 378
 379        /* CORE dpll */
 380        params = get_core_dpll_params(*dplls_data);     /* default - safest */
 381        /*
 382         * Do not lock the core DPLL now. Just set it up.
 383         * Core DPLL will be locked after setting up EMIF
 384         * using the FREQ_UPDATE method(freq_update_core())
 385         */
 386        if (emif_sdram_type(readl(&emif->emif_sdram_config)) ==
 387            EMIF_SDRAM_TYPE_LPDDR2)
 388                do_setup_dpll((*prcm)->cm_clkmode_dpll_core, params,
 389                                                        DPLL_NO_LOCK, "core");
 390        else
 391                do_setup_dpll((*prcm)->cm_clkmode_dpll_core, params,
 392                                                        DPLL_LOCK, "core");
 393        /* Set the ratios for CORE_CLK, L3_CLK, L4_CLK */
 394        temp = (CLKSEL_CORE_X2_DIV_1 << CLKSEL_CORE_SHIFT) |
 395            (CLKSEL_L3_CORE_DIV_2 << CLKSEL_L3_SHIFT) |
 396            (CLKSEL_L4_L3_DIV_2 << CLKSEL_L4_SHIFT);
 397        writel(temp, (*prcm)->cm_clksel_core);
 398        debug("Core DPLL configured\n");
 399
 400        /* lock PER dpll */
 401        params = get_per_dpll_params(*dplls_data);
 402        do_setup_dpll((*prcm)->cm_clkmode_dpll_per,
 403                        params, DPLL_LOCK, "per");
 404        debug("PER DPLL locked\n");
 405
 406        /* MPU dpll */
 407        configure_mpu_dpll();
 408
 409#if defined(CONFIG_USB_EHCI_OMAP) || defined(CONFIG_USB_XHCI_OMAP)
 410        setup_usb_dpll();
 411#endif
 412        params = get_ddr_dpll_params(*dplls_data);
 413        do_setup_dpll((*prcm)->cm_clkmode_dpll_ddrphy,
 414                      params, DPLL_LOCK, "ddr");
 415
 416#ifdef CONFIG_DRIVER_TI_CPSW
 417        params = get_gmac_dpll_params(*dplls_data);
 418        do_setup_dpll((*prcm)->cm_clkmode_dpll_gmac, params,
 419                      DPLL_LOCK, "gmac");
 420#endif
 421}
 422
 423u32 get_offset_code(u32 volt_offset, struct pmic_data *pmic)
 424{
 425        u32 offset_code;
 426
 427        volt_offset -= pmic->base_offset;
 428
 429        offset_code = (volt_offset + pmic->step - 1) / pmic->step;
 430
 431        /*
 432         * Offset codes 1-6 all give the base voltage in Palmas
 433         * Offset code 0 switches OFF the SMPS
 434         */
 435        return offset_code + pmic->start_code;
 436}
 437
 438void do_scale_vcore(u32 vcore_reg, u32 volt_mv, struct pmic_data *pmic)
 439{
 440        u32 offset_code;
 441        u32 offset = volt_mv;
 442#ifndef CONFIG_DRA7XX
 443        int ret = 0;
 444#endif
 445
 446        if (!volt_mv)
 447                return;
 448
 449        pmic->pmic_bus_init();
 450#ifndef CONFIG_DRA7XX
 451        /* See if we can first get the GPIO if needed */
 452        if (pmic->gpio_en)
 453                ret = gpio_request(pmic->gpio, "PMIC_GPIO");
 454
 455        if (ret < 0) {
 456                printf("%s: gpio %d request failed %d\n", __func__,
 457                                                        pmic->gpio, ret);
 458                return;
 459        }
 460
 461        /* Pull the GPIO low to select SET0 register, while we program SET1 */
 462        if (pmic->gpio_en)
 463                gpio_direction_output(pmic->gpio, 0);
 464#endif
 465        /* convert to uV for better accuracy in the calculations */
 466        offset *= 1000;
 467
 468        offset_code = get_offset_code(offset, pmic);
 469
 470        debug("do_scale_vcore: volt - %d offset_code - 0x%x\n", volt_mv,
 471                offset_code);
 472
 473        if (pmic->pmic_write(pmic->i2c_slave_addr, vcore_reg, offset_code))
 474                printf("Scaling voltage failed for 0x%x\n", vcore_reg);
 475#ifndef CONFIG_DRA7XX
 476        if (pmic->gpio_en)
 477                gpio_direction_output(pmic->gpio, 1);
 478#endif
 479}
 480
 481static u32 optimize_vcore_voltage(struct volts const *v)
 482{
 483        u32 val;
 484        if (!v->value)
 485                return 0;
 486        if (!v->efuse.reg)
 487                return v->value;
 488
 489        switch (v->efuse.reg_bits) {
 490        case 16:
 491                val = readw(v->efuse.reg);
 492                break;
 493        case 32:
 494                val = readl(v->efuse.reg);
 495                break;
 496        default:
 497                printf("Error: efuse 0x%08x bits=%d unknown\n",
 498                       v->efuse.reg, v->efuse.reg_bits);
 499                return v->value;
 500        }
 501
 502        if (!val) {
 503                printf("Error: efuse 0x%08x bits=%d val=0, using %d\n",
 504                       v->efuse.reg, v->efuse.reg_bits, v->value);
 505                return v->value;
 506        }
 507
 508        debug("%s:efuse 0x%08x bits=%d Vnom=%d, using efuse value %d\n",
 509              __func__, v->efuse.reg, v->efuse.reg_bits, v->value, val);
 510        return val;
 511}
 512
 513#ifdef CONFIG_IODELAY_RECALIBRATION
 514void __weak recalibrate_iodelay(void)
 515{
 516}
 517#endif
 518
 519/*
 520 * Setup the voltages for the main SoC core power domains.
 521 * We start with the maximum voltages allowed here, as set in the corresponding
 522 * vcores_data struct, and then scale (usually down) to the fused values that
 523 * are retrieved from the SoC. The scaling happens only if the efuse.reg fields
 524 * are initialised.
 525 * Rail grouping is supported for the DRA7xx SoCs only, therefore the code is
 526 * compiled conditionally. Note that the new code writes the scaled (or zeroed)
 527 * values back to the vcores_data struct for eventual reuse. Zero values mean
 528 * that the corresponding rails are not controlled separately, and are not sent
 529 * to the PMIC.
 530 */
 531void scale_vcores(struct vcores_data const *vcores)
 532{
 533#if defined(CONFIG_DRA7XX)
 534        int i;
 535        struct volts *pv = (struct volts *)vcores;
 536        struct volts *px;
 537
 538        for (i=0; i<(sizeof(struct vcores_data)/sizeof(struct volts)); i++) {
 539                debug("%d -> ", pv->value);
 540                if (pv->value) {
 541                        /* Handle non-empty members only */
 542                        pv->value = optimize_vcore_voltage(pv);
 543                        px = (struct volts *)vcores;
 544                        while (px < pv) {
 545                                /*
 546                                 * Scan already handled non-empty members to see
 547                                 * if we have a group and find the max voltage,
 548                                 * which is set to the first occurance of the
 549                                 * particular SMPS; the other group voltages are
 550                                 * zeroed.
 551                                 */
 552                                if (px->value) {
 553                                        if ((pv->pmic->i2c_slave_addr ==
 554                                             px->pmic->i2c_slave_addr) &&
 555                                            (pv->addr == px->addr)) {
 556                                                /* Same PMIC, same SMPS */
 557                                                if (pv->value > px->value)
 558                                                        px->value = pv->value;
 559
 560                                                pv->value = 0;
 561                                        }
 562                                }
 563                                px++;
 564                        }
 565                }
 566                debug("%d\n", pv->value);
 567                pv++;
 568        }
 569
 570        debug("cor: %d\n", vcores->core.value);
 571        do_scale_vcore(vcores->core.addr, vcores->core.value, vcores->core.pmic);
 572        /*
 573         * IO delay recalibration should be done immediately after
 574         * adjusting AVS voltages for VDD_CORE_L.
 575         * Respective boards should call __recalibrate_iodelay()
 576         * with proper mux, virtual and manual mode configurations.
 577         */
 578#ifdef CONFIG_IODELAY_RECALIBRATION
 579        recalibrate_iodelay();
 580#endif
 581
 582        debug("mpu: %d\n", vcores->mpu.value);
 583        do_scale_vcore(vcores->mpu.addr, vcores->mpu.value, vcores->mpu.pmic);
 584        /* Configure MPU ABB LDO after scale */
 585        abb_setup((*ctrl)->control_std_fuse_opp_vdd_mpu_2,
 586                  (*ctrl)->control_wkup_ldovbb_mpu_voltage_ctrl,
 587                  (*prcm)->prm_abbldo_mpu_setup,
 588                  (*prcm)->prm_abbldo_mpu_ctrl,
 589                  (*prcm)->prm_irqstatus_mpu_2,
 590                  OMAP_ABB_MPU_TXDONE_MASK,
 591                  OMAP_ABB_FAST_OPP);
 592
 593        /* The .mm member is not used for the DRA7xx */
 594
 595        debug("gpu: %d\n", vcores->gpu.value);
 596        do_scale_vcore(vcores->gpu.addr, vcores->gpu.value, vcores->gpu.pmic);
 597        debug("eve: %d\n", vcores->eve.value);
 598        do_scale_vcore(vcores->eve.addr, vcores->eve.value, vcores->eve.pmic);
 599        debug("iva: %d\n", vcores->iva.value);
 600        do_scale_vcore(vcores->iva.addr, vcores->iva.value, vcores->iva.pmic);
 601        /* Might need udelay(1000) here if debug is enabled to see all prints */
 602#else
 603        u32 val;
 604
 605        val = optimize_vcore_voltage(&vcores->core);
 606        do_scale_vcore(vcores->core.addr, val, vcores->core.pmic);
 607
 608        /*
 609         * IO delay recalibration should be done immediately after
 610         * adjusting AVS voltages for VDD_CORE_L.
 611         * Respective boards should call __recalibrate_iodelay()
 612         * with proper mux, virtual and manual mode configurations.
 613         */
 614#ifdef CONFIG_IODELAY_RECALIBRATION
 615        recalibrate_iodelay();
 616#endif
 617
 618        val = optimize_vcore_voltage(&vcores->mpu);
 619        do_scale_vcore(vcores->mpu.addr, val, vcores->mpu.pmic);
 620
 621        /* Configure MPU ABB LDO after scale */
 622        abb_setup((*ctrl)->control_std_fuse_opp_vdd_mpu_2,
 623                  (*ctrl)->control_wkup_ldovbb_mpu_voltage_ctrl,
 624                  (*prcm)->prm_abbldo_mpu_setup,
 625                  (*prcm)->prm_abbldo_mpu_ctrl,
 626                  (*prcm)->prm_irqstatus_mpu_2,
 627                  OMAP_ABB_MPU_TXDONE_MASK,
 628                  OMAP_ABB_FAST_OPP);
 629
 630        val = optimize_vcore_voltage(&vcores->mm);
 631        do_scale_vcore(vcores->mm.addr, val, vcores->mm.pmic);
 632
 633        val = optimize_vcore_voltage(&vcores->gpu);
 634        do_scale_vcore(vcores->gpu.addr, val, vcores->gpu.pmic);
 635
 636        val = optimize_vcore_voltage(&vcores->eve);
 637        do_scale_vcore(vcores->eve.addr, val, vcores->eve.pmic);
 638
 639        val = optimize_vcore_voltage(&vcores->iva);
 640        do_scale_vcore(vcores->iva.addr, val, vcores->iva.pmic);
 641#endif
 642}
 643
 644static inline void enable_clock_domain(u32 const clkctrl_reg, u32 enable_mode)
 645{
 646        clrsetbits_le32(clkctrl_reg, CD_CLKCTRL_CLKTRCTRL_MASK,
 647                        enable_mode << CD_CLKCTRL_CLKTRCTRL_SHIFT);
 648        debug("Enable clock domain - %x\n", clkctrl_reg);
 649}
 650
 651static inline void disable_clock_domain(u32 const clkctrl_reg)
 652{
 653        clrsetbits_le32(clkctrl_reg, CD_CLKCTRL_CLKTRCTRL_MASK,
 654                        CD_CLKCTRL_CLKTRCTRL_SW_SLEEP <<
 655                        CD_CLKCTRL_CLKTRCTRL_SHIFT);
 656        debug("Disable clock domain - %x\n", clkctrl_reg);
 657}
 658
 659static inline void wait_for_clk_enable(u32 clkctrl_addr)
 660{
 661        u32 clkctrl, idlest = MODULE_CLKCTRL_IDLEST_DISABLED;
 662        u32 bound = LDELAY;
 663
 664        while ((idlest == MODULE_CLKCTRL_IDLEST_DISABLED) ||
 665                (idlest == MODULE_CLKCTRL_IDLEST_TRANSITIONING)) {
 666
 667                clkctrl = readl(clkctrl_addr);
 668                idlest = (clkctrl & MODULE_CLKCTRL_IDLEST_MASK) >>
 669                         MODULE_CLKCTRL_IDLEST_SHIFT;
 670                if (--bound == 0) {
 671                        printf("Clock enable failed for 0x%x idlest 0x%x\n",
 672                                clkctrl_addr, clkctrl);
 673                        return;
 674                }
 675        }
 676}
 677
 678static inline void enable_clock_module(u32 const clkctrl_addr, u32 enable_mode,
 679                                u32 wait_for_enable)
 680{
 681        clrsetbits_le32(clkctrl_addr, MODULE_CLKCTRL_MODULEMODE_MASK,
 682                        enable_mode << MODULE_CLKCTRL_MODULEMODE_SHIFT);
 683        debug("Enable clock module - %x\n", clkctrl_addr);
 684        if (wait_for_enable)
 685                wait_for_clk_enable(clkctrl_addr);
 686}
 687
 688static inline void wait_for_clk_disable(u32 clkctrl_addr)
 689{
 690        u32 clkctrl, idlest = MODULE_CLKCTRL_IDLEST_FULLY_FUNCTIONAL;
 691        u32 bound = LDELAY;
 692
 693        while ((idlest != MODULE_CLKCTRL_IDLEST_DISABLED)) {
 694                clkctrl = readl(clkctrl_addr);
 695                idlest = (clkctrl & MODULE_CLKCTRL_IDLEST_MASK) >>
 696                         MODULE_CLKCTRL_IDLEST_SHIFT;
 697                if (--bound == 0) {
 698                        printf("Clock disable failed for 0x%x idlest 0x%x\n",
 699                               clkctrl_addr, clkctrl);
 700                        return;
 701                }
 702        }
 703}
 704
 705static inline void disable_clock_module(u32 const clkctrl_addr,
 706                                        u32 wait_for_disable)
 707{
 708        clrsetbits_le32(clkctrl_addr, MODULE_CLKCTRL_MODULEMODE_MASK,
 709                        MODULE_CLKCTRL_MODULEMODE_SW_DISABLE <<
 710                        MODULE_CLKCTRL_MODULEMODE_SHIFT);
 711        debug("Disable clock module - %x\n", clkctrl_addr);
 712        if (wait_for_disable)
 713                wait_for_clk_disable(clkctrl_addr);
 714}
 715
 716void freq_update_core(void)
 717{
 718        u32 freq_config1 = 0;
 719        const struct dpll_params *core_dpll_params;
 720        u32 omap_rev = omap_revision();
 721
 722        core_dpll_params = get_core_dpll_params(*dplls_data);
 723        /* Put EMIF clock domain in sw wakeup mode */
 724        enable_clock_domain((*prcm)->cm_memif_clkstctrl,
 725                                CD_CLKCTRL_CLKTRCTRL_SW_WKUP);
 726        wait_for_clk_enable((*prcm)->cm_memif_emif_1_clkctrl);
 727        wait_for_clk_enable((*prcm)->cm_memif_emif_2_clkctrl);
 728
 729        freq_config1 = SHADOW_FREQ_CONFIG1_FREQ_UPDATE_MASK |
 730            SHADOW_FREQ_CONFIG1_DLL_RESET_MASK;
 731
 732        freq_config1 |= (DPLL_EN_LOCK << SHADOW_FREQ_CONFIG1_DPLL_EN_SHIFT) &
 733                                SHADOW_FREQ_CONFIG1_DPLL_EN_MASK;
 734
 735        freq_config1 |= (core_dpll_params->m2 <<
 736                        SHADOW_FREQ_CONFIG1_M2_DIV_SHIFT) &
 737                        SHADOW_FREQ_CONFIG1_M2_DIV_MASK;
 738
 739        writel(freq_config1, (*prcm)->cm_shadow_freq_config1);
 740        if (!wait_on_value(SHADOW_FREQ_CONFIG1_FREQ_UPDATE_MASK, 0,
 741                        (u32 *) (*prcm)->cm_shadow_freq_config1, LDELAY)) {
 742                puts("FREQ UPDATE procedure failed!!");
 743                hang();
 744        }
 745
 746        /*
 747         * Putting EMIF in HW_AUTO is seen to be causing issues with
 748         * EMIF clocks and the master DLL. Keep EMIF in SW_WKUP
 749         * in OMAP5430 ES1.0 silicon
 750         */
 751        if (omap_rev != OMAP5430_ES1_0) {
 752                /* Put EMIF clock domain back in hw auto mode */
 753                enable_clock_domain((*prcm)->cm_memif_clkstctrl,
 754                                        CD_CLKCTRL_CLKTRCTRL_HW_AUTO);
 755                wait_for_clk_enable((*prcm)->cm_memif_emif_1_clkctrl);
 756                wait_for_clk_enable((*prcm)->cm_memif_emif_2_clkctrl);
 757        }
 758}
 759
 760void bypass_dpll(u32 const base)
 761{
 762        do_bypass_dpll(base);
 763        wait_for_bypass(base);
 764}
 765
 766void lock_dpll(u32 const base)
 767{
 768        do_lock_dpll(base);
 769        wait_for_lock(base);
 770}
 771
 772void setup_clocks_for_console(void)
 773{
 774        /* Do not add any spl_debug prints in this function */
 775        clrsetbits_le32((*prcm)->cm_l4per_clkstctrl, CD_CLKCTRL_CLKTRCTRL_MASK,
 776                        CD_CLKCTRL_CLKTRCTRL_SW_WKUP <<
 777                        CD_CLKCTRL_CLKTRCTRL_SHIFT);
 778
 779        /* Enable all UARTs - console will be on one of them */
 780        clrsetbits_le32((*prcm)->cm_l4per_uart1_clkctrl,
 781                        MODULE_CLKCTRL_MODULEMODE_MASK,
 782                        MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN <<
 783                        MODULE_CLKCTRL_MODULEMODE_SHIFT);
 784
 785        clrsetbits_le32((*prcm)->cm_l4per_uart2_clkctrl,
 786                        MODULE_CLKCTRL_MODULEMODE_MASK,
 787                        MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN <<
 788                        MODULE_CLKCTRL_MODULEMODE_SHIFT);
 789
 790        clrsetbits_le32((*prcm)->cm_l4per_uart3_clkctrl,
 791                        MODULE_CLKCTRL_MODULEMODE_MASK,
 792                        MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN <<
 793                        MODULE_CLKCTRL_MODULEMODE_SHIFT);
 794
 795        clrsetbits_le32((*prcm)->cm_l4per_uart4_clkctrl,
 796                        MODULE_CLKCTRL_MODULEMODE_MASK,
 797                        MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN <<
 798                        MODULE_CLKCTRL_MODULEMODE_SHIFT);
 799
 800        clrsetbits_le32((*prcm)->cm_l4per_clkstctrl, CD_CLKCTRL_CLKTRCTRL_MASK,
 801                        CD_CLKCTRL_CLKTRCTRL_HW_AUTO <<
 802                        CD_CLKCTRL_CLKTRCTRL_SHIFT);
 803}
 804
 805void do_enable_clocks(u32 const *clk_domains,
 806                            u32 const *clk_modules_hw_auto,
 807                            u32 const *clk_modules_explicit_en,
 808                            u8 wait_for_enable)
 809{
 810        u32 i, max = 100;
 811
 812        /* Put the clock domains in SW_WKUP mode */
 813        for (i = 0; (i < max) && clk_domains[i]; i++) {
 814                enable_clock_domain(clk_domains[i],
 815                                    CD_CLKCTRL_CLKTRCTRL_SW_WKUP);
 816        }
 817
 818        /* Clock modules that need to be put in HW_AUTO */
 819        for (i = 0; (i < max) && clk_modules_hw_auto[i]; i++) {
 820                enable_clock_module(clk_modules_hw_auto[i],
 821                                    MODULE_CLKCTRL_MODULEMODE_HW_AUTO,
 822                                    wait_for_enable);
 823        };
 824
 825        /* Clock modules that need to be put in SW_EXPLICIT_EN mode */
 826        for (i = 0; (i < max) && clk_modules_explicit_en[i]; i++) {
 827                enable_clock_module(clk_modules_explicit_en[i],
 828                                    MODULE_CLKCTRL_MODULEMODE_SW_EXPLICIT_EN,
 829                                    wait_for_enable);
 830        };
 831
 832        /* Put the clock domains in HW_AUTO mode now */
 833        for (i = 0; (i < max) && clk_domains[i]; i++) {
 834                enable_clock_domain(clk_domains[i],
 835                                    CD_CLKCTRL_CLKTRCTRL_HW_AUTO);
 836        }
 837}
 838
 839void do_disable_clocks(u32 const *clk_domains,
 840                            u32 const *clk_modules_disable,
 841                            u8 wait_for_disable)
 842{
 843        u32 i, max = 100;
 844
 845
 846        /* Clock modules that need to be put in SW_DISABLE */
 847        for (i = 0; (i < max) && clk_modules_disable[i]; i++)
 848                disable_clock_module(clk_modules_disable[i],
 849                                     wait_for_disable);
 850
 851        /* Put the clock domains in SW_SLEEP mode */
 852        for (i = 0; (i < max) && clk_domains[i]; i++)
 853                disable_clock_domain(clk_domains[i]);
 854}
 855
 856void prcm_init(void)
 857{
 858        switch (omap_hw_init_context()) {
 859        case OMAP_INIT_CONTEXT_SPL:
 860        case OMAP_INIT_CONTEXT_UBOOT_FROM_NOR:
 861        case OMAP_INIT_CONTEXT_UBOOT_AFTER_CH:
 862                enable_basic_clocks();
 863                timer_init();
 864                scale_vcores(*omap_vcores);
 865                setup_dplls();
 866                setup_warmreset_time();
 867                break;
 868        default:
 869                break;
 870        }
 871
 872        if (OMAP_INIT_CONTEXT_SPL != omap_hw_init_context())
 873                enable_basic_uboot_clocks();
 874}
 875
 876void gpi2c_init(void)
 877{
 878        static int gpi2c = 1;
 879
 880        if (gpi2c) {
 881                i2c_init(CONFIG_SYS_OMAP24_I2C_SPEED,
 882                         CONFIG_SYS_OMAP24_I2C_SLAVE);
 883                gpi2c = 0;
 884        }
 885}
 886