uboot/arch/arm/cpu/armv7/omap-common/emif-common.c
<<
>>
Prefs
   1/*
   2 * EMIF programming
   3 *
   4 * (C) Copyright 2010
   5 * Texas Instruments, <www.ti.com>
   6 *
   7 * Aneesh V <aneesh@ti.com>
   8 *
   9 * SPDX-License-Identifier:     GPL-2.0+
  10 */
  11
  12#include <common.h>
  13#include <asm/emif.h>
  14#include <asm/arch/clock.h>
  15#include <asm/arch/sys_proto.h>
  16#include <asm/omap_common.h>
  17#include <asm/utils.h>
  18#include <linux/compiler.h>
  19
  20static int emif1_enabled = -1, emif2_enabled = -1;
  21
  22void set_lpmode_selfrefresh(u32 base)
  23{
  24        struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
  25        u32 reg;
  26
  27        reg = readl(&emif->emif_pwr_mgmt_ctrl);
  28        reg &= ~EMIF_REG_LP_MODE_MASK;
  29        reg |= LP_MODE_SELF_REFRESH << EMIF_REG_LP_MODE_SHIFT;
  30        reg &= ~EMIF_REG_SR_TIM_MASK;
  31        writel(reg, &emif->emif_pwr_mgmt_ctrl);
  32
  33        /* dummy read for the new SR_TIM to be loaded */
  34        readl(&emif->emif_pwr_mgmt_ctrl);
  35}
  36
  37void force_emif_self_refresh()
  38{
  39        set_lpmode_selfrefresh(EMIF1_BASE);
  40        set_lpmode_selfrefresh(EMIF2_BASE);
  41}
  42
  43inline u32 emif_num(u32 base)
  44{
  45        if (base == EMIF1_BASE)
  46                return 1;
  47        else if (base == EMIF2_BASE)
  48                return 2;
  49        else
  50                return 0;
  51}
  52
  53static inline u32 get_mr(u32 base, u32 cs, u32 mr_addr)
  54{
  55        u32 mr;
  56        struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
  57
  58        mr_addr |= cs << EMIF_REG_CS_SHIFT;
  59        writel(mr_addr, &emif->emif_lpddr2_mode_reg_cfg);
  60        if (omap_revision() == OMAP4430_ES2_0)
  61                mr = readl(&emif->emif_lpddr2_mode_reg_data_es2);
  62        else
  63                mr = readl(&emif->emif_lpddr2_mode_reg_data);
  64        debug("get_mr: EMIF%d cs %d mr %08x val 0x%x\n", emif_num(base),
  65              cs, mr_addr, mr);
  66        if (((mr & 0x0000ff00) >>  8) == (mr & 0xff) &&
  67            ((mr & 0x00ff0000) >> 16) == (mr & 0xff) &&
  68            ((mr & 0xff000000) >> 24) == (mr & 0xff))
  69                return mr & 0xff;
  70        else
  71                return mr;
  72}
  73
  74static inline void set_mr(u32 base, u32 cs, u32 mr_addr, u32 mr_val)
  75{
  76        struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
  77
  78        mr_addr |= cs << EMIF_REG_CS_SHIFT;
  79        writel(mr_addr, &emif->emif_lpddr2_mode_reg_cfg);
  80        writel(mr_val, &emif->emif_lpddr2_mode_reg_data);
  81}
  82
  83void emif_reset_phy(u32 base)
  84{
  85        struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
  86        u32 iodft;
  87
  88        iodft = readl(&emif->emif_iodft_tlgc);
  89        iodft |= EMIF_REG_RESET_PHY_MASK;
  90        writel(iodft, &emif->emif_iodft_tlgc);
  91}
  92
  93static void do_lpddr2_init(u32 base, u32 cs)
  94{
  95        u32 mr_addr;
  96        const struct lpddr2_mr_regs *mr_regs;
  97
  98        get_lpddr2_mr_regs(&mr_regs);
  99        /* Wait till device auto initialization is complete */
 100        while (get_mr(base, cs, LPDDR2_MR0) & LPDDR2_MR0_DAI_MASK)
 101                ;
 102        set_mr(base, cs, LPDDR2_MR10, mr_regs->mr10);
 103        /*
 104         * tZQINIT = 1 us
 105         * Enough loops assuming a maximum of 2GHz
 106         */
 107
 108        sdelay(2000);
 109
 110        set_mr(base, cs, LPDDR2_MR1, mr_regs->mr1);
 111        set_mr(base, cs, LPDDR2_MR16, mr_regs->mr16);
 112
 113        /*
 114         * Enable refresh along with writing MR2
 115         * Encoding of RL in MR2 is (RL - 2)
 116         */
 117        mr_addr = LPDDR2_MR2 | EMIF_REG_REFRESH_EN_MASK;
 118        set_mr(base, cs, mr_addr, mr_regs->mr2);
 119
 120        if (mr_regs->mr3 > 0)
 121                set_mr(base, cs, LPDDR2_MR3, mr_regs->mr3);
 122}
 123
 124static void lpddr2_init(u32 base, const struct emif_regs *regs)
 125{
 126        struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
 127
 128        /* Not NVM */
 129        clrbits_le32(&emif->emif_lpddr2_nvm_config, EMIF_REG_CS1NVMEN_MASK);
 130
 131        /*
 132         * Keep REG_INITREF_DIS = 1 to prevent re-initialization of SDRAM
 133         * when EMIF_SDRAM_CONFIG register is written
 134         */
 135        setbits_le32(&emif->emif_sdram_ref_ctrl, EMIF_REG_INITREF_DIS_MASK);
 136
 137        /*
 138         * Set the SDRAM_CONFIG and PHY_CTRL for the
 139         * un-locked frequency & default RL
 140         */
 141        writel(regs->sdram_config_init, &emif->emif_sdram_config);
 142        writel(regs->emif_ddr_phy_ctlr_1_init, &emif->emif_ddr_phy_ctrl_1);
 143
 144        do_ext_phy_settings(base, regs);
 145
 146        do_lpddr2_init(base, CS0);
 147        if (regs->sdram_config & EMIF_REG_EBANK_MASK)
 148                do_lpddr2_init(base, CS1);
 149
 150        writel(regs->sdram_config, &emif->emif_sdram_config);
 151        writel(regs->emif_ddr_phy_ctlr_1, &emif->emif_ddr_phy_ctrl_1);
 152
 153        /* Enable refresh now */
 154        clrbits_le32(&emif->emif_sdram_ref_ctrl, EMIF_REG_INITREF_DIS_MASK);
 155
 156        }
 157
 158__weak void do_ext_phy_settings(u32 base, const struct emif_regs *regs)
 159{
 160}
 161
 162void emif_update_timings(u32 base, const struct emif_regs *regs)
 163{
 164        struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
 165
 166        if (!is_dra7xx())
 167                writel(regs->ref_ctrl, &emif->emif_sdram_ref_ctrl_shdw);
 168        else
 169                writel(regs->ref_ctrl_final, &emif->emif_sdram_ref_ctrl_shdw);
 170
 171        writel(regs->sdram_tim1, &emif->emif_sdram_tim_1_shdw);
 172        writel(regs->sdram_tim2, &emif->emif_sdram_tim_2_shdw);
 173        writel(regs->sdram_tim3, &emif->emif_sdram_tim_3_shdw);
 174        if (omap_revision() == OMAP4430_ES1_0) {
 175                /* ES1 bug EMIF should be in force idle during freq_update */
 176                writel(0, &emif->emif_pwr_mgmt_ctrl);
 177        } else {
 178                writel(EMIF_PWR_MGMT_CTRL, &emif->emif_pwr_mgmt_ctrl);
 179                writel(EMIF_PWR_MGMT_CTRL_SHDW, &emif->emif_pwr_mgmt_ctrl_shdw);
 180        }
 181        writel(regs->read_idle_ctrl, &emif->emif_read_idlectrl_shdw);
 182        writel(regs->zq_config, &emif->emif_zq_config);
 183        writel(regs->temp_alert_config, &emif->emif_temp_alert_config);
 184        writel(regs->emif_ddr_phy_ctlr_1, &emif->emif_ddr_phy_ctrl_1_shdw);
 185
 186        if ((omap_revision() >= OMAP5430_ES1_0) || is_dra7xx()) {
 187                writel(EMIF_L3_CONFIG_VAL_SYS_10_MPU_5_LL_0,
 188                        &emif->emif_l3_config);
 189        } else if (omap_revision() >= OMAP4460_ES1_0) {
 190                writel(EMIF_L3_CONFIG_VAL_SYS_10_MPU_3_LL_0,
 191                        &emif->emif_l3_config);
 192        } else {
 193                writel(EMIF_L3_CONFIG_VAL_SYS_10_LL_0,
 194                        &emif->emif_l3_config);
 195        }
 196}
 197
 198#ifndef CONFIG_OMAP44XX
 199static void omap5_ddr3_leveling(u32 base, const struct emif_regs *regs)
 200{
 201        struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
 202
 203        /* keep sdram in self-refresh */
 204        writel(((LP_MODE_SELF_REFRESH << EMIF_REG_LP_MODE_SHIFT)
 205                & EMIF_REG_LP_MODE_MASK), &emif->emif_pwr_mgmt_ctrl);
 206        __udelay(130);
 207
 208        /*
 209         * Set invert_clkout (if activated)--DDR_PHYCTRL_1
 210         * Invert clock adds an additional half cycle delay on the
 211         * command interface.  The additional half cycle, is usually
 212         * meant to enable leveling in the situation that DQS is later
 213         * than CK on the board.It also helps provide some additional
 214         * margin for leveling.
 215         */
 216        writel(regs->emif_ddr_phy_ctlr_1,
 217               &emif->emif_ddr_phy_ctrl_1);
 218
 219        writel(regs->emif_ddr_phy_ctlr_1,
 220               &emif->emif_ddr_phy_ctrl_1_shdw);
 221        __udelay(130);
 222
 223        writel(((LP_MODE_DISABLE << EMIF_REG_LP_MODE_SHIFT)
 224               & EMIF_REG_LP_MODE_MASK), &emif->emif_pwr_mgmt_ctrl);
 225
 226        /* Launch Full leveling */
 227        writel(DDR3_FULL_LVL, &emif->emif_rd_wr_lvl_ctl);
 228
 229        /* Wait till full leveling is complete */
 230        readl(&emif->emif_rd_wr_lvl_ctl);
 231              __udelay(130);
 232
 233        /* Read data eye leveling no of samples */
 234        config_data_eye_leveling_samples(base);
 235
 236        /*
 237         * Launch 8 incremental WR_LVL- to compensate for
 238         * PHY limitation.
 239         */
 240        writel(0x2 << EMIF_REG_WRLVLINC_INT_SHIFT,
 241               &emif->emif_rd_wr_lvl_ctl);
 242
 243        __udelay(130);
 244
 245        /* Launch Incremental leveling */
 246        writel(DDR3_INC_LVL, &emif->emif_rd_wr_lvl_ctl);
 247               __udelay(130);
 248}
 249
 250static void update_hwleveling_output(u32 base, const struct emif_regs *regs)
 251{
 252        struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
 253        u32 *emif_ext_phy_ctrl_reg, *emif_phy_status;
 254        u32 reg, i, phy;
 255
 256        emif_phy_status = (u32 *)&emif->emif_ddr_phy_status[7];
 257        phy = readl(&emif->emif_ddr_phy_ctrl_1);
 258
 259        /* Update PHY_REG_RDDQS_RATIO */
 260        emif_ext_phy_ctrl_reg = (u32 *)&emif->emif_ddr_ext_phy_ctrl_7;
 261        if (!(phy & EMIF_DDR_PHY_CTRL_1_RDLVL_MASK_MASK))
 262                for (i = 0; i < PHY_RDDQS_RATIO_REGS; i++) {
 263                        reg = readl(emif_phy_status++);
 264                        writel(reg, emif_ext_phy_ctrl_reg++);
 265                        writel(reg, emif_ext_phy_ctrl_reg++);
 266                }
 267
 268        /* Update PHY_REG_FIFO_WE_SLAVE_RATIO */
 269        emif_ext_phy_ctrl_reg = (u32 *)&emif->emif_ddr_ext_phy_ctrl_2;
 270        emif_phy_status = (u32 *)&emif->emif_ddr_phy_status[12];
 271        if (!(phy & EMIF_DDR_PHY_CTRL_1_RDLVLGATE_MASK_MASK))
 272                for (i = 0; i < PHY_FIFO_WE_SLAVE_RATIO_REGS; i++) {
 273                        reg = readl(emif_phy_status++);
 274                        writel(reg, emif_ext_phy_ctrl_reg++);
 275                        writel(reg, emif_ext_phy_ctrl_reg++);
 276                }
 277
 278        /* Update PHY_REG_WR_DQ/DQS_SLAVE_RATIO */
 279        emif_ext_phy_ctrl_reg = (u32 *)&emif->emif_ddr_ext_phy_ctrl_12;
 280        emif_phy_status = (u32 *)&emif->emif_ddr_phy_status[17];
 281        if (!(phy & EMIF_DDR_PHY_CTRL_1_WRLVL_MASK_MASK))
 282                for (i = 0; i < PHY_REG_WR_DQ_SLAVE_RATIO_REGS; i++) {
 283                        reg = readl(emif_phy_status++);
 284                        writel(reg, emif_ext_phy_ctrl_reg++);
 285                        writel(reg, emif_ext_phy_ctrl_reg++);
 286                }
 287
 288        /* Disable Leveling */
 289        writel(regs->emif_ddr_phy_ctlr_1, &emif->emif_ddr_phy_ctrl_1);
 290        writel(regs->emif_ddr_phy_ctlr_1, &emif->emif_ddr_phy_ctrl_1_shdw);
 291        writel(0x0, &emif->emif_rd_wr_lvl_rmp_ctl);
 292}
 293
 294static void dra7_ddr3_leveling(u32 base, const struct emif_regs *regs)
 295{
 296        struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
 297
 298        /* Clear Error Status */
 299        clrsetbits_le32(&emif->emif_ddr_ext_phy_ctrl_36,
 300                        EMIF_REG_PHY_FIFO_WE_IN_MISALINED_CLR,
 301                        EMIF_REG_PHY_FIFO_WE_IN_MISALINED_CLR);
 302
 303        clrsetbits_le32(&emif->emif_ddr_ext_phy_ctrl_36_shdw,
 304                        EMIF_REG_PHY_FIFO_WE_IN_MISALINED_CLR,
 305                        EMIF_REG_PHY_FIFO_WE_IN_MISALINED_CLR);
 306
 307        /* Disable refreshed before leveling */
 308        clrsetbits_le32(&emif->emif_sdram_ref_ctrl, EMIF_REG_INITREF_DIS_MASK,
 309                        EMIF_REG_INITREF_DIS_MASK);
 310
 311        /* Start Full leveling */
 312        writel(DDR3_FULL_LVL, &emif->emif_rd_wr_lvl_ctl);
 313
 314        __udelay(300);
 315
 316        /* Check for leveling timeout */
 317        if (readl(&emif->emif_status) & EMIF_REG_LEVELING_TO_MASK) {
 318                printf("Leveling timeout on EMIF%d\n", emif_num(base));
 319                return;
 320        }
 321
 322        /* Enable refreshes after leveling */
 323        clrbits_le32(&emif->emif_sdram_ref_ctrl, EMIF_REG_INITREF_DIS_MASK);
 324
 325        debug("HW leveling success\n");
 326        /*
 327         * Update slave ratios in EXT_PHY_CTRLx registers
 328         * as per HW leveling output
 329         */
 330        update_hwleveling_output(base, regs);
 331}
 332
 333static void dra7_ddr3_init(u32 base, const struct emif_regs *regs)
 334{
 335        struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
 336
 337        if (warm_reset()) {
 338                emif_reset_phy(base);
 339                writel(0x0, &emif->emif_pwr_mgmt_ctrl);
 340        }
 341        do_ext_phy_settings(base, regs);
 342
 343        writel(regs->ref_ctrl | EMIF_REG_INITREF_DIS_MASK,
 344               &emif->emif_sdram_ref_ctrl);
 345        /* Update timing registers */
 346        writel(regs->sdram_tim1, &emif->emif_sdram_tim_1);
 347        writel(regs->sdram_tim2, &emif->emif_sdram_tim_2);
 348        writel(regs->sdram_tim3, &emif->emif_sdram_tim_3);
 349
 350        writel(EMIF_L3_CONFIG_VAL_SYS_10_MPU_5_LL_0, &emif->emif_l3_config);
 351        writel(regs->read_idle_ctrl, &emif->emif_read_idlectrl);
 352        writel(regs->zq_config, &emif->emif_zq_config);
 353        writel(regs->temp_alert_config, &emif->emif_temp_alert_config);
 354        writel(regs->emif_rd_wr_lvl_rmp_ctl, &emif->emif_rd_wr_lvl_rmp_ctl);
 355        writel(regs->emif_rd_wr_lvl_ctl, &emif->emif_rd_wr_lvl_ctl);
 356
 357        writel(regs->emif_ddr_phy_ctlr_1_init, &emif->emif_ddr_phy_ctrl_1);
 358        writel(regs->emif_rd_wr_exec_thresh, &emif->emif_rd_wr_exec_thresh);
 359
 360        writel(regs->ref_ctrl, &emif->emif_sdram_ref_ctrl);
 361
 362        writel(regs->sdram_config2, &emif->emif_lpddr2_nvm_config);
 363        writel(regs->sdram_config_init, &emif->emif_sdram_config);
 364
 365        __udelay(1000);
 366
 367        writel(regs->ref_ctrl_final, &emif->emif_sdram_ref_ctrl);
 368
 369        if (regs->emif_rd_wr_lvl_rmp_ctl & EMIF_REG_RDWRLVL_EN_MASK)
 370                dra7_ddr3_leveling(base, regs);
 371}
 372
 373static void omap5_ddr3_init(u32 base, const struct emif_regs *regs)
 374{
 375        struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
 376
 377        writel(regs->ref_ctrl, &emif->emif_sdram_ref_ctrl);
 378        writel(regs->sdram_config_init, &emif->emif_sdram_config);
 379        /*
 380         * Set SDRAM_CONFIG and PHY control registers to locked frequency
 381         * and RL =7. As the default values of the Mode Registers are not
 382         * defined, contents of mode Registers must be fully initialized.
 383         * H/W takes care of this initialization
 384         */
 385        writel(regs->emif_ddr_phy_ctlr_1_init, &emif->emif_ddr_phy_ctrl_1);
 386
 387        /* Update timing registers */
 388        writel(regs->sdram_tim1, &emif->emif_sdram_tim_1);
 389        writel(regs->sdram_tim2, &emif->emif_sdram_tim_2);
 390        writel(regs->sdram_tim3, &emif->emif_sdram_tim_3);
 391
 392        writel(regs->read_idle_ctrl, &emif->emif_read_idlectrl);
 393
 394        writel(regs->sdram_config2, &emif->emif_lpddr2_nvm_config);
 395        writel(regs->sdram_config_init, &emif->emif_sdram_config);
 396        do_ext_phy_settings(base, regs);
 397
 398        writel(regs->emif_rd_wr_lvl_rmp_ctl, &emif->emif_rd_wr_lvl_rmp_ctl);
 399        omap5_ddr3_leveling(base, regs);
 400}
 401
 402static void ddr3_init(u32 base, const struct emif_regs *regs)
 403{
 404        if (is_omap54xx())
 405                omap5_ddr3_init(base, regs);
 406        else
 407                dra7_ddr3_init(base, regs);
 408}
 409#endif
 410
 411#ifndef CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS
 412#define print_timing_reg(reg) debug(#reg" - 0x%08x\n", (reg))
 413
 414/*
 415 * Organization and refresh requirements for LPDDR2 devices of different
 416 * types and densities. Derived from JESD209-2 section 2.4
 417 */
 418const struct lpddr2_addressing addressing_table[] = {
 419        /* Banks tREFIx10     rowx32,rowx16      colx32,colx16  density */
 420        {BANKS4, T_REFI_15_6, {ROW_12, ROW_12}, {COL_7, COL_8} },/*64M */
 421        {BANKS4, T_REFI_15_6, {ROW_12, ROW_12}, {COL_8, COL_9} },/*128M */
 422        {BANKS4, T_REFI_7_8, {ROW_13, ROW_13}, {COL_8, COL_9} },/*256M */
 423        {BANKS4, T_REFI_7_8, {ROW_13, ROW_13}, {COL_9, COL_10} },/*512M */
 424        {BANKS8, T_REFI_7_8, {ROW_13, ROW_13}, {COL_9, COL_10} },/*1GS4 */
 425        {BANKS8, T_REFI_3_9, {ROW_14, ROW_14}, {COL_9, COL_10} },/*2GS4 */
 426        {BANKS8, T_REFI_3_9, {ROW_14, ROW_14}, {COL_10, COL_11} },/*4G */
 427        {BANKS8, T_REFI_3_9, {ROW_15, ROW_15}, {COL_10, COL_11} },/*8G */
 428        {BANKS4, T_REFI_7_8, {ROW_14, ROW_14}, {COL_9, COL_10} },/*1GS2 */
 429        {BANKS4, T_REFI_3_9, {ROW_15, ROW_15}, {COL_9, COL_10} },/*2GS2 */
 430};
 431
 432static const u32 lpddr2_density_2_size_in_mbytes[] = {
 433        8,                      /* 64Mb */
 434        16,                     /* 128Mb */
 435        32,                     /* 256Mb */
 436        64,                     /* 512Mb */
 437        128,                    /* 1Gb   */
 438        256,                    /* 2Gb   */
 439        512,                    /* 4Gb   */
 440        1024,                   /* 8Gb   */
 441        2048,                   /* 16Gb  */
 442        4096                    /* 32Gb  */
 443};
 444
 445/*
 446 * Calculate the period of DDR clock from frequency value and set the
 447 * denominator and numerator in global variables for easy access later
 448 */
 449static void set_ddr_clk_period(u32 freq)
 450{
 451        /*
 452         * period = 1/freq
 453         * period_in_ns = 10^9/freq
 454         */
 455        *T_num = 1000000000;
 456        *T_den = freq;
 457        cancel_out(T_num, T_den, 200);
 458
 459}
 460
 461/*
 462 * Convert time in nano seconds to number of cycles of DDR clock
 463 */
 464static inline u32 ns_2_cycles(u32 ns)
 465{
 466        return ((ns * (*T_den)) + (*T_num) - 1) / (*T_num);
 467}
 468
 469/*
 470 * ns_2_cycles with the difference that the time passed is 2 times the actual
 471 * value(to avoid fractions). The cycles returned is for the original value of
 472 * the timing parameter
 473 */
 474static inline u32 ns_x2_2_cycles(u32 ns)
 475{
 476        return ((ns * (*T_den)) + (*T_num) * 2 - 1) / ((*T_num) * 2);
 477}
 478
 479/*
 480 * Find addressing table index based on the device's type(S2 or S4) and
 481 * density
 482 */
 483s8 addressing_table_index(u8 type, u8 density, u8 width)
 484{
 485        u8 index;
 486        if ((density > LPDDR2_DENSITY_8Gb) || (width == LPDDR2_IO_WIDTH_8))
 487                return -1;
 488
 489        /*
 490         * Look at the way ADDR_TABLE_INDEX* values have been defined
 491         * in emif.h compared to LPDDR2_DENSITY_* values
 492         * The table is layed out in the increasing order of density
 493         * (ignoring type). The exceptions 1GS2 and 2GS2 have been placed
 494         * at the end
 495         */
 496        if ((type == LPDDR2_TYPE_S2) && (density == LPDDR2_DENSITY_1Gb))
 497                index = ADDR_TABLE_INDEX1GS2;
 498        else if ((type == LPDDR2_TYPE_S2) && (density == LPDDR2_DENSITY_2Gb))
 499                index = ADDR_TABLE_INDEX2GS2;
 500        else
 501                index = density;
 502
 503        debug("emif: addressing table index %d\n", index);
 504
 505        return index;
 506}
 507
 508/*
 509 * Find the the right timing table from the array of timing
 510 * tables of the device using DDR clock frequency
 511 */
 512static const struct lpddr2_ac_timings *get_timings_table(const struct
 513                        lpddr2_ac_timings const *const *device_timings,
 514                        u32 freq)
 515{
 516        u32 i, temp, freq_nearest;
 517        const struct lpddr2_ac_timings *timings = 0;
 518
 519        emif_assert(freq <= MAX_LPDDR2_FREQ);
 520        emif_assert(device_timings);
 521
 522        /*
 523         * Start with the maximum allowed frequency - that is always safe
 524         */
 525        freq_nearest = MAX_LPDDR2_FREQ;
 526        /*
 527         * Find the timings table that has the max frequency value:
 528         *   i.  Above or equal to the DDR frequency - safe
 529         *   ii. The lowest that satisfies condition (i) - optimal
 530         */
 531        for (i = 0; (i < MAX_NUM_SPEEDBINS) && device_timings[i]; i++) {
 532                temp = device_timings[i]->max_freq;
 533                if ((temp >= freq) && (temp <= freq_nearest)) {
 534                        freq_nearest = temp;
 535                        timings = device_timings[i];
 536                }
 537        }
 538        debug("emif: timings table: %d\n", freq_nearest);
 539        return timings;
 540}
 541
 542/*
 543 * Finds the value of emif_sdram_config_reg
 544 * All parameters are programmed based on the device on CS0.
 545 * If there is a device on CS1, it will be same as that on CS0 or
 546 * it will be NVM. We don't support NVM yet.
 547 * If cs1_device pointer is NULL it is assumed that there is no device
 548 * on CS1
 549 */
 550static u32 get_sdram_config_reg(const struct lpddr2_device_details *cs0_device,
 551                                const struct lpddr2_device_details *cs1_device,
 552                                const struct lpddr2_addressing *addressing,
 553                                u8 RL)
 554{
 555        u32 config_reg = 0;
 556
 557        config_reg |=  (cs0_device->type + 4) << EMIF_REG_SDRAM_TYPE_SHIFT;
 558        config_reg |=  EMIF_INTERLEAVING_POLICY_MAX_INTERLEAVING <<
 559                        EMIF_REG_IBANK_POS_SHIFT;
 560
 561        config_reg |= cs0_device->io_width << EMIF_REG_NARROW_MODE_SHIFT;
 562
 563        config_reg |= RL << EMIF_REG_CL_SHIFT;
 564
 565        config_reg |= addressing->row_sz[cs0_device->io_width] <<
 566                        EMIF_REG_ROWSIZE_SHIFT;
 567
 568        config_reg |= addressing->num_banks << EMIF_REG_IBANK_SHIFT;
 569
 570        config_reg |= (cs1_device ? EBANK_CS1_EN : EBANK_CS1_DIS) <<
 571                        EMIF_REG_EBANK_SHIFT;
 572
 573        config_reg |= addressing->col_sz[cs0_device->io_width] <<
 574                        EMIF_REG_PAGESIZE_SHIFT;
 575
 576        return config_reg;
 577}
 578
 579static u32 get_sdram_ref_ctrl(u32 freq,
 580                              const struct lpddr2_addressing *addressing)
 581{
 582        u32 ref_ctrl = 0, val = 0, freq_khz;
 583        freq_khz = freq / 1000;
 584        /*
 585         * refresh rate to be set is 'tREFI * freq in MHz
 586         * division by 10000 to account for khz and x10 in t_REFI_us_x10
 587         */
 588        val = addressing->t_REFI_us_x10 * freq_khz / 10000;
 589        ref_ctrl |= val << EMIF_REG_REFRESH_RATE_SHIFT;
 590
 591        return ref_ctrl;
 592}
 593
 594static u32 get_sdram_tim_1_reg(const struct lpddr2_ac_timings *timings,
 595                               const struct lpddr2_min_tck *min_tck,
 596                               const struct lpddr2_addressing *addressing)
 597{
 598        u32 tim1 = 0, val = 0;
 599        val = max(min_tck->tWTR, ns_x2_2_cycles(timings->tWTRx2)) - 1;
 600        tim1 |= val << EMIF_REG_T_WTR_SHIFT;
 601
 602        if (addressing->num_banks == BANKS8)
 603                val = (timings->tFAW * (*T_den) + 4 * (*T_num) - 1) /
 604                                                        (4 * (*T_num)) - 1;
 605        else
 606                val = max(min_tck->tRRD, ns_2_cycles(timings->tRRD)) - 1;
 607
 608        tim1 |= val << EMIF_REG_T_RRD_SHIFT;
 609
 610        val = ns_2_cycles(timings->tRASmin + timings->tRPab) - 1;
 611        tim1 |= val << EMIF_REG_T_RC_SHIFT;
 612
 613        val = max(min_tck->tRAS_MIN, ns_2_cycles(timings->tRASmin)) - 1;
 614        tim1 |= val << EMIF_REG_T_RAS_SHIFT;
 615
 616        val = max(min_tck->tWR, ns_2_cycles(timings->tWR)) - 1;
 617        tim1 |= val << EMIF_REG_T_WR_SHIFT;
 618
 619        val = max(min_tck->tRCD, ns_2_cycles(timings->tRCD)) - 1;
 620        tim1 |= val << EMIF_REG_T_RCD_SHIFT;
 621
 622        val = max(min_tck->tRP_AB, ns_2_cycles(timings->tRPab)) - 1;
 623        tim1 |= val << EMIF_REG_T_RP_SHIFT;
 624
 625        return tim1;
 626}
 627
 628static u32 get_sdram_tim_2_reg(const struct lpddr2_ac_timings *timings,
 629                               const struct lpddr2_min_tck *min_tck)
 630{
 631        u32 tim2 = 0, val = 0;
 632        val = max(min_tck->tCKE, timings->tCKE) - 1;
 633        tim2 |= val << EMIF_REG_T_CKE_SHIFT;
 634
 635        val = max(min_tck->tRTP, ns_x2_2_cycles(timings->tRTPx2)) - 1;
 636        tim2 |= val << EMIF_REG_T_RTP_SHIFT;
 637
 638        /*
 639         * tXSRD = tRFCab + 10 ns. XSRD and XSNR should have the
 640         * same value
 641         */
 642        val = ns_2_cycles(timings->tXSR) - 1;
 643        tim2 |= val << EMIF_REG_T_XSRD_SHIFT;
 644        tim2 |= val << EMIF_REG_T_XSNR_SHIFT;
 645
 646        val = max(min_tck->tXP, ns_x2_2_cycles(timings->tXPx2)) - 1;
 647        tim2 |= val << EMIF_REG_T_XP_SHIFT;
 648
 649        return tim2;
 650}
 651
 652static u32 get_sdram_tim_3_reg(const struct lpddr2_ac_timings *timings,
 653                               const struct lpddr2_min_tck *min_tck,
 654                               const struct lpddr2_addressing *addressing)
 655{
 656        u32 tim3 = 0, val = 0;
 657        val = min(timings->tRASmax * 10 / addressing->t_REFI_us_x10 - 1, 0xF);
 658        tim3 |= val << EMIF_REG_T_RAS_MAX_SHIFT;
 659
 660        val = ns_2_cycles(timings->tRFCab) - 1;
 661        tim3 |= val << EMIF_REG_T_RFC_SHIFT;
 662
 663        val = ns_x2_2_cycles(timings->tDQSCKMAXx2) - 1;
 664        tim3 |= val << EMIF_REG_T_TDQSCKMAX_SHIFT;
 665
 666        val = ns_2_cycles(timings->tZQCS) - 1;
 667        tim3 |= val << EMIF_REG_ZQ_ZQCS_SHIFT;
 668
 669        val = max(min_tck->tCKESR, ns_2_cycles(timings->tCKESR)) - 1;
 670        tim3 |= val << EMIF_REG_T_CKESR_SHIFT;
 671
 672        return tim3;
 673}
 674
 675static u32 get_zq_config_reg(const struct lpddr2_device_details *cs1_device,
 676                             const struct lpddr2_addressing *addressing,
 677                             u8 volt_ramp)
 678{
 679        u32 zq = 0, val = 0;
 680        if (volt_ramp)
 681                val =
 682                    EMIF_ZQCS_INTERVAL_DVFS_IN_US * 10 /
 683                    addressing->t_REFI_us_x10;
 684        else
 685                val =
 686                    EMIF_ZQCS_INTERVAL_NORMAL_IN_US * 10 /
 687                    addressing->t_REFI_us_x10;
 688        zq |= val << EMIF_REG_ZQ_REFINTERVAL_SHIFT;
 689
 690        zq |= (REG_ZQ_ZQCL_MULT - 1) << EMIF_REG_ZQ_ZQCL_MULT_SHIFT;
 691
 692        zq |= (REG_ZQ_ZQINIT_MULT - 1) << EMIF_REG_ZQ_ZQINIT_MULT_SHIFT;
 693
 694        zq |= REG_ZQ_SFEXITEN_ENABLE << EMIF_REG_ZQ_SFEXITEN_SHIFT;
 695
 696        /*
 697         * Assuming that two chipselects have a single calibration resistor
 698         * If there are indeed two calibration resistors, then this flag should
 699         * be enabled to take advantage of dual calibration feature.
 700         * This data should ideally come from board files. But considering
 701         * that none of the boards today have calibration resistors per CS,
 702         * it would be an unnecessary overhead.
 703         */
 704        zq |= REG_ZQ_DUALCALEN_DISABLE << EMIF_REG_ZQ_DUALCALEN_SHIFT;
 705
 706        zq |= REG_ZQ_CS0EN_ENABLE << EMIF_REG_ZQ_CS0EN_SHIFT;
 707
 708        zq |= (cs1_device ? 1 : 0) << EMIF_REG_ZQ_CS1EN_SHIFT;
 709
 710        return zq;
 711}
 712
 713static u32 get_temp_alert_config(const struct lpddr2_device_details *cs1_device,
 714                                 const struct lpddr2_addressing *addressing,
 715                                 u8 is_derated)
 716{
 717        u32 alert = 0, interval;
 718        interval =
 719            TEMP_ALERT_POLL_INTERVAL_MS * 10000 / addressing->t_REFI_us_x10;
 720        if (is_derated)
 721                interval *= 4;
 722        alert |= interval << EMIF_REG_TA_REFINTERVAL_SHIFT;
 723
 724        alert |= TEMP_ALERT_CONFIG_DEVCT_1 << EMIF_REG_TA_DEVCNT_SHIFT;
 725
 726        alert |= TEMP_ALERT_CONFIG_DEVWDT_32 << EMIF_REG_TA_DEVWDT_SHIFT;
 727
 728        alert |= 1 << EMIF_REG_TA_SFEXITEN_SHIFT;
 729
 730        alert |= 1 << EMIF_REG_TA_CS0EN_SHIFT;
 731
 732        alert |= (cs1_device ? 1 : 0) << EMIF_REG_TA_CS1EN_SHIFT;
 733
 734        return alert;
 735}
 736
 737static u32 get_read_idle_ctrl_reg(u8 volt_ramp)
 738{
 739        u32 idle = 0, val = 0;
 740        if (volt_ramp)
 741                val = ns_2_cycles(READ_IDLE_INTERVAL_DVFS) / 64 - 1;
 742        else
 743                /*Maximum value in normal conditions - suggested by hw team */
 744                val = 0x1FF;
 745        idle |= val << EMIF_REG_READ_IDLE_INTERVAL_SHIFT;
 746
 747        idle |= EMIF_REG_READ_IDLE_LEN_VAL << EMIF_REG_READ_IDLE_LEN_SHIFT;
 748
 749        return idle;
 750}
 751
 752static u32 get_ddr_phy_ctrl_1(u32 freq, u8 RL)
 753{
 754        u32 phy = 0, val = 0;
 755
 756        phy |= (RL + 2) << EMIF_REG_READ_LATENCY_SHIFT;
 757
 758        if (freq <= 100000000)
 759                val = EMIF_DLL_SLAVE_DLY_CTRL_100_MHZ_AND_LESS;
 760        else if (freq <= 200000000)
 761                val = EMIF_DLL_SLAVE_DLY_CTRL_200_MHZ;
 762        else
 763                val = EMIF_DLL_SLAVE_DLY_CTRL_400_MHZ;
 764        phy |= val << EMIF_REG_DLL_SLAVE_DLY_CTRL_SHIFT;
 765
 766        /* Other fields are constant magic values. Hardcode them together */
 767        phy |= EMIF_DDR_PHY_CTRL_1_BASE_VAL <<
 768                EMIF_EMIF_DDR_PHY_CTRL_1_BASE_VAL_SHIFT;
 769
 770        return phy;
 771}
 772
 773static u32 get_emif_mem_size(u32 base)
 774{
 775        u32 size_mbytes = 0, temp;
 776        struct emif_device_details dev_details;
 777        struct lpddr2_device_details cs0_dev_details, cs1_dev_details;
 778        u32 emif_nr = emif_num(base);
 779
 780        emif_reset_phy(base);
 781        dev_details.cs0_device_details = emif_get_device_details(emif_nr, CS0,
 782                                                &cs0_dev_details);
 783        dev_details.cs1_device_details = emif_get_device_details(emif_nr, CS1,
 784                                                &cs1_dev_details);
 785        emif_reset_phy(base);
 786
 787        if (dev_details.cs0_device_details) {
 788                temp = dev_details.cs0_device_details->density;
 789                size_mbytes += lpddr2_density_2_size_in_mbytes[temp];
 790        }
 791
 792        if (dev_details.cs1_device_details) {
 793                temp = dev_details.cs1_device_details->density;
 794                size_mbytes += lpddr2_density_2_size_in_mbytes[temp];
 795        }
 796        /* convert to bytes */
 797        return size_mbytes << 20;
 798}
 799
 800/* Gets the encoding corresponding to a given DMM section size */
 801u32 get_dmm_section_size_map(u32 section_size)
 802{
 803        /*
 804         * Section size mapping:
 805         * 0x0: 16-MiB section
 806         * 0x1: 32-MiB section
 807         * 0x2: 64-MiB section
 808         * 0x3: 128-MiB section
 809         * 0x4: 256-MiB section
 810         * 0x5: 512-MiB section
 811         * 0x6: 1-GiB section
 812         * 0x7: 2-GiB section
 813         */
 814        section_size >>= 24; /* divide by 16 MB */
 815        return log_2_n_round_down(section_size);
 816}
 817
 818static void emif_calculate_regs(
 819                const struct emif_device_details *emif_dev_details,
 820                u32 freq, struct emif_regs *regs)
 821{
 822        u32 temp, sys_freq;
 823        const struct lpddr2_addressing *addressing;
 824        const struct lpddr2_ac_timings *timings;
 825        const struct lpddr2_min_tck *min_tck;
 826        const struct lpddr2_device_details *cs0_dev_details =
 827                                        emif_dev_details->cs0_device_details;
 828        const struct lpddr2_device_details *cs1_dev_details =
 829                                        emif_dev_details->cs1_device_details;
 830        const struct lpddr2_device_timings *cs0_dev_timings =
 831                                        emif_dev_details->cs0_device_timings;
 832
 833        emif_assert(emif_dev_details);
 834        emif_assert(regs);
 835        /*
 836         * You can not have a device on CS1 without one on CS0
 837         * So configuring EMIF without a device on CS0 doesn't
 838         * make sense
 839         */
 840        emif_assert(cs0_dev_details);
 841        emif_assert(cs0_dev_details->type != LPDDR2_TYPE_NVM);
 842        /*
 843         * If there is a device on CS1 it should be same type as CS0
 844         * (or NVM. But NVM is not supported in this driver yet)
 845         */
 846        emif_assert((cs1_dev_details == NULL) ||
 847                    (cs1_dev_details->type == LPDDR2_TYPE_NVM) ||
 848                    (cs0_dev_details->type == cs1_dev_details->type));
 849        emif_assert(freq <= MAX_LPDDR2_FREQ);
 850
 851        set_ddr_clk_period(freq);
 852
 853        /*
 854         * The device on CS0 is used for all timing calculations
 855         * There is only one set of registers for timings per EMIF. So, if the
 856         * second CS(CS1) has a device, it should have the same timings as the
 857         * device on CS0
 858         */
 859        timings = get_timings_table(cs0_dev_timings->ac_timings, freq);
 860        emif_assert(timings);
 861        min_tck = cs0_dev_timings->min_tck;
 862
 863        temp = addressing_table_index(cs0_dev_details->type,
 864                                      cs0_dev_details->density,
 865                                      cs0_dev_details->io_width);
 866
 867        emif_assert((temp >= 0));
 868        addressing = &(addressing_table[temp]);
 869        emif_assert(addressing);
 870
 871        sys_freq = get_sys_clk_freq();
 872
 873        regs->sdram_config_init = get_sdram_config_reg(cs0_dev_details,
 874                                                        cs1_dev_details,
 875                                                        addressing, RL_BOOT);
 876
 877        regs->sdram_config = get_sdram_config_reg(cs0_dev_details,
 878                                                cs1_dev_details,
 879                                                addressing, RL_FINAL);
 880
 881        regs->ref_ctrl = get_sdram_ref_ctrl(freq, addressing);
 882
 883        regs->sdram_tim1 = get_sdram_tim_1_reg(timings, min_tck, addressing);
 884
 885        regs->sdram_tim2 = get_sdram_tim_2_reg(timings, min_tck);
 886
 887        regs->sdram_tim3 = get_sdram_tim_3_reg(timings, min_tck, addressing);
 888
 889        regs->read_idle_ctrl = get_read_idle_ctrl_reg(LPDDR2_VOLTAGE_STABLE);
 890
 891        regs->temp_alert_config =
 892            get_temp_alert_config(cs1_dev_details, addressing, 0);
 893
 894        regs->zq_config = get_zq_config_reg(cs1_dev_details, addressing,
 895                                            LPDDR2_VOLTAGE_STABLE);
 896
 897        regs->emif_ddr_phy_ctlr_1_init =
 898                        get_ddr_phy_ctrl_1(sys_freq / 2, RL_BOOT);
 899
 900        regs->emif_ddr_phy_ctlr_1 =
 901                        get_ddr_phy_ctrl_1(freq, RL_FINAL);
 902
 903        regs->freq = freq;
 904
 905        print_timing_reg(regs->sdram_config_init);
 906        print_timing_reg(regs->sdram_config);
 907        print_timing_reg(regs->ref_ctrl);
 908        print_timing_reg(regs->sdram_tim1);
 909        print_timing_reg(regs->sdram_tim2);
 910        print_timing_reg(regs->sdram_tim3);
 911        print_timing_reg(regs->read_idle_ctrl);
 912        print_timing_reg(regs->temp_alert_config);
 913        print_timing_reg(regs->zq_config);
 914        print_timing_reg(regs->emif_ddr_phy_ctlr_1);
 915        print_timing_reg(regs->emif_ddr_phy_ctlr_1_init);
 916}
 917#endif /* CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS */
 918
 919#ifdef CONFIG_SYS_AUTOMATIC_SDRAM_DETECTION
 920const char *get_lpddr2_type(u8 type_id)
 921{
 922        switch (type_id) {
 923        case LPDDR2_TYPE_S4:
 924                return "LPDDR2-S4";
 925        case LPDDR2_TYPE_S2:
 926                return "LPDDR2-S2";
 927        default:
 928                return NULL;
 929        }
 930}
 931
 932const char *get_lpddr2_io_width(u8 width_id)
 933{
 934        switch (width_id) {
 935        case LPDDR2_IO_WIDTH_8:
 936                return "x8";
 937        case LPDDR2_IO_WIDTH_16:
 938                return "x16";
 939        case LPDDR2_IO_WIDTH_32:
 940                return "x32";
 941        default:
 942                return NULL;
 943        }
 944}
 945
 946const char *get_lpddr2_manufacturer(u32 manufacturer)
 947{
 948        switch (manufacturer) {
 949        case LPDDR2_MANUFACTURER_SAMSUNG:
 950                return "Samsung";
 951        case LPDDR2_MANUFACTURER_QIMONDA:
 952                return "Qimonda";
 953        case LPDDR2_MANUFACTURER_ELPIDA:
 954                return "Elpida";
 955        case LPDDR2_MANUFACTURER_ETRON:
 956                return "Etron";
 957        case LPDDR2_MANUFACTURER_NANYA:
 958                return "Nanya";
 959        case LPDDR2_MANUFACTURER_HYNIX:
 960                return "Hynix";
 961        case LPDDR2_MANUFACTURER_MOSEL:
 962                return "Mosel";
 963        case LPDDR2_MANUFACTURER_WINBOND:
 964                return "Winbond";
 965        case LPDDR2_MANUFACTURER_ESMT:
 966                return "ESMT";
 967        case LPDDR2_MANUFACTURER_SPANSION:
 968                return "Spansion";
 969        case LPDDR2_MANUFACTURER_SST:
 970                return "SST";
 971        case LPDDR2_MANUFACTURER_ZMOS:
 972                return "ZMOS";
 973        case LPDDR2_MANUFACTURER_INTEL:
 974                return "Intel";
 975        case LPDDR2_MANUFACTURER_NUMONYX:
 976                return "Numonyx";
 977        case LPDDR2_MANUFACTURER_MICRON:
 978                return "Micron";
 979        default:
 980                return NULL;
 981        }
 982}
 983
 984static void display_sdram_details(u32 emif_nr, u32 cs,
 985                                  struct lpddr2_device_details *device)
 986{
 987        const char *mfg_str;
 988        const char *type_str;
 989        char density_str[10];
 990        u32 density;
 991
 992        debug("EMIF%d CS%d\t", emif_nr, cs);
 993
 994        if (!device) {
 995                debug("None\n");
 996                return;
 997        }
 998
 999        mfg_str = get_lpddr2_manufacturer(device->manufacturer);
1000        type_str = get_lpddr2_type(device->type);
1001
1002        density = lpddr2_density_2_size_in_mbytes[device->density];
1003        if ((density / 1024 * 1024) == density) {
1004                density /= 1024;
1005                sprintf(density_str, "%d GB", density);
1006        } else
1007                sprintf(density_str, "%d MB", density);
1008        if (mfg_str && type_str)
1009                debug("%s\t\t%s\t%s\n", mfg_str, type_str, density_str);
1010}
1011
1012static u8 is_lpddr2_sdram_present(u32 base, u32 cs,
1013                                  struct lpddr2_device_details *lpddr2_device)
1014{
1015        u32 mr = 0, temp;
1016
1017        mr = get_mr(base, cs, LPDDR2_MR0);
1018        if (mr > 0xFF) {
1019                /* Mode register value bigger than 8 bit */
1020                return 0;
1021        }
1022
1023        temp = (mr & LPDDR2_MR0_DI_MASK) >> LPDDR2_MR0_DI_SHIFT;
1024        if (temp) {
1025                /* Not SDRAM */
1026                return 0;
1027        }
1028        temp = (mr & LPDDR2_MR0_DNVI_MASK) >> LPDDR2_MR0_DNVI_SHIFT;
1029
1030        if (temp) {
1031                /* DNV supported - But DNV is only supported for NVM */
1032                return 0;
1033        }
1034
1035        mr = get_mr(base, cs, LPDDR2_MR4);
1036        if (mr > 0xFF) {
1037                /* Mode register value bigger than 8 bit */
1038                return 0;
1039        }
1040
1041        mr = get_mr(base, cs, LPDDR2_MR5);
1042        if (mr > 0xFF) {
1043                /* Mode register value bigger than 8 bit */
1044                return 0;
1045        }
1046
1047        if (!get_lpddr2_manufacturer(mr)) {
1048                /* Manufacturer not identified */
1049                return 0;
1050        }
1051        lpddr2_device->manufacturer = mr;
1052
1053        mr = get_mr(base, cs, LPDDR2_MR6);
1054        if (mr >= 0xFF) {
1055                /* Mode register value bigger than 8 bit */
1056                return 0;
1057        }
1058
1059        mr = get_mr(base, cs, LPDDR2_MR7);
1060        if (mr >= 0xFF) {
1061                /* Mode register value bigger than 8 bit */
1062                return 0;
1063        }
1064
1065        mr = get_mr(base, cs, LPDDR2_MR8);
1066        if (mr >= 0xFF) {
1067                /* Mode register value bigger than 8 bit */
1068                return 0;
1069        }
1070
1071        temp = (mr & MR8_TYPE_MASK) >> MR8_TYPE_SHIFT;
1072        if (!get_lpddr2_type(temp)) {
1073                /* Not SDRAM */
1074                return 0;
1075        }
1076        lpddr2_device->type = temp;
1077
1078        temp = (mr & MR8_DENSITY_MASK) >> MR8_DENSITY_SHIFT;
1079        if (temp > LPDDR2_DENSITY_32Gb) {
1080                /* Density not supported */
1081                return 0;
1082        }
1083        lpddr2_device->density = temp;
1084
1085        temp = (mr & MR8_IO_WIDTH_MASK) >> MR8_IO_WIDTH_SHIFT;
1086        if (!get_lpddr2_io_width(temp)) {
1087                /* IO width unsupported value */
1088                return 0;
1089        }
1090        lpddr2_device->io_width = temp;
1091
1092        /*
1093         * If all the above tests pass we should
1094         * have a device on this chip-select
1095         */
1096        return 1;
1097}
1098
1099struct lpddr2_device_details *emif_get_device_details(u32 emif_nr, u8 cs,
1100                        struct lpddr2_device_details *lpddr2_dev_details)
1101{
1102        u32 phy;
1103        u32 base = (emif_nr == 1) ? EMIF1_BASE : EMIF2_BASE;
1104
1105        struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
1106
1107        if (!lpddr2_dev_details)
1108                return NULL;
1109
1110        /* Do the minimum init for mode register accesses */
1111        if (!(running_from_sdram() || warm_reset())) {
1112                phy = get_ddr_phy_ctrl_1(get_sys_clk_freq() / 2, RL_BOOT);
1113                writel(phy, &emif->emif_ddr_phy_ctrl_1);
1114        }
1115
1116        if (!(is_lpddr2_sdram_present(base, cs, lpddr2_dev_details)))
1117                return NULL;
1118
1119        display_sdram_details(emif_num(base), cs, lpddr2_dev_details);
1120
1121        return lpddr2_dev_details;
1122}
1123#endif /* CONFIG_SYS_AUTOMATIC_SDRAM_DETECTION */
1124
1125static void do_sdram_init(u32 base)
1126{
1127        const struct emif_regs *regs;
1128        u32 in_sdram, emif_nr;
1129
1130        debug(">>do_sdram_init() %x\n", base);
1131
1132        in_sdram = running_from_sdram();
1133        emif_nr = (base == EMIF1_BASE) ? 1 : 2;
1134
1135#ifdef CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS
1136        emif_get_reg_dump(emif_nr, &regs);
1137        if (!regs) {
1138                debug("EMIF: reg dump not provided\n");
1139                return;
1140        }
1141#else
1142        /*
1143         * The user has not provided the register values. We need to
1144         * calculate it based on the timings and the DDR frequency
1145         */
1146        struct emif_device_details dev_details;
1147        struct emif_regs calculated_regs;
1148
1149        /*
1150         * Get device details:
1151         * - Discovered if CONFIG_SYS_AUTOMATIC_SDRAM_DETECTION is set
1152         * - Obtained from user otherwise
1153         */
1154        struct lpddr2_device_details cs0_dev_details, cs1_dev_details;
1155        emif_reset_phy(base);
1156        dev_details.cs0_device_details = emif_get_device_details(emif_nr, CS0,
1157                                                &cs0_dev_details);
1158        dev_details.cs1_device_details = emif_get_device_details(emif_nr, CS1,
1159                                                &cs1_dev_details);
1160        emif_reset_phy(base);
1161
1162        /* Return if no devices on this EMIF */
1163        if (!dev_details.cs0_device_details &&
1164            !dev_details.cs1_device_details) {
1165                return;
1166        }
1167
1168        /*
1169         * Get device timings:
1170         * - Default timings specified by JESD209-2 if
1171         *   CONFIG_SYS_DEFAULT_LPDDR2_TIMINGS is set
1172         * - Obtained from user otherwise
1173         */
1174        emif_get_device_timings(emif_nr, &dev_details.cs0_device_timings,
1175                                &dev_details.cs1_device_timings);
1176
1177        /* Calculate the register values */
1178        emif_calculate_regs(&dev_details, omap_ddr_clk(), &calculated_regs);
1179        regs = &calculated_regs;
1180#endif /* CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS */
1181
1182        /*
1183         * Initializing the DDR device can not happen from SDRAM.
1184         * Changing the timing registers in EMIF can happen(going from one
1185         * OPP to another)
1186         */
1187        if (!in_sdram && (!warm_reset() || is_dra7xx())) {
1188                if (emif_sdram_type(regs->sdram_config) ==
1189                    EMIF_SDRAM_TYPE_LPDDR2)
1190                        lpddr2_init(base, regs);
1191#ifndef CONFIG_OMAP44XX
1192                else
1193                        ddr3_init(base, regs);
1194#endif
1195        }
1196#ifdef CONFIG_OMAP54X
1197        if (warm_reset() && (emif_sdram_type(regs->sdram_config) ==
1198            EMIF_SDRAM_TYPE_DDR3) && !is_dra7xx()) {
1199                set_lpmode_selfrefresh(base);
1200                emif_reset_phy(base);
1201                omap5_ddr3_leveling(base, regs);
1202        }
1203#endif
1204
1205        /* Write to the shadow registers */
1206        emif_update_timings(base, regs);
1207
1208        debug("<<do_sdram_init() %x\n", base);
1209}
1210
1211void emif_post_init_config(u32 base)
1212{
1213        struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
1214        u32 omap_rev = omap_revision();
1215
1216        /* reset phy on ES2.0 */
1217        if (omap_rev == OMAP4430_ES2_0)
1218                emif_reset_phy(base);
1219
1220        /* Put EMIF back in smart idle on ES1.0 */
1221        if (omap_rev == OMAP4430_ES1_0)
1222                writel(0x80000000, &emif->emif_pwr_mgmt_ctrl);
1223}
1224
1225void dmm_init(u32 base)
1226{
1227        const struct dmm_lisa_map_regs *lisa_map_regs;
1228        u32 i, section, valid;
1229
1230#ifdef CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS
1231        emif_get_dmm_regs(&lisa_map_regs);
1232#else
1233        u32 emif1_size, emif2_size, mapped_size, section_map = 0;
1234        u32 section_cnt, sys_addr;
1235        struct dmm_lisa_map_regs lis_map_regs_calculated = {0};
1236
1237        mapped_size = 0;
1238        section_cnt = 3;
1239        sys_addr = CONFIG_SYS_SDRAM_BASE;
1240        emif1_size = get_emif_mem_size(EMIF1_BASE);
1241        emif2_size = get_emif_mem_size(EMIF2_BASE);
1242        debug("emif1_size 0x%x emif2_size 0x%x\n", emif1_size, emif2_size);
1243
1244        if (!emif1_size && !emif2_size)
1245                return;
1246
1247        /* symmetric interleaved section */
1248        if (emif1_size && emif2_size) {
1249                mapped_size = min(emif1_size, emif2_size);
1250                section_map = DMM_LISA_MAP_INTERLEAVED_BASE_VAL;
1251                section_map |= 0 << EMIF_SDRC_ADDR_SHIFT;
1252                /* only MSB */
1253                section_map |= (sys_addr >> 24) <<
1254                                EMIF_SYS_ADDR_SHIFT;
1255                section_map |= get_dmm_section_size_map(mapped_size * 2)
1256                                << EMIF_SYS_SIZE_SHIFT;
1257                lis_map_regs_calculated.dmm_lisa_map_3 = section_map;
1258                emif1_size -= mapped_size;
1259                emif2_size -= mapped_size;
1260                sys_addr += (mapped_size * 2);
1261                section_cnt--;
1262        }
1263
1264        /*
1265         * Single EMIF section(we can have a maximum of 1 single EMIF
1266         * section- either EMIF1 or EMIF2 or none, but not both)
1267         */
1268        if (emif1_size) {
1269                section_map = DMM_LISA_MAP_EMIF1_ONLY_BASE_VAL;
1270                section_map |= get_dmm_section_size_map(emif1_size)
1271                                << EMIF_SYS_SIZE_SHIFT;
1272                /* only MSB */
1273                section_map |= (mapped_size >> 24) <<
1274                                EMIF_SDRC_ADDR_SHIFT;
1275                /* only MSB */
1276                section_map |= (sys_addr >> 24) << EMIF_SYS_ADDR_SHIFT;
1277                section_cnt--;
1278        }
1279        if (emif2_size) {
1280                section_map = DMM_LISA_MAP_EMIF2_ONLY_BASE_VAL;
1281                section_map |= get_dmm_section_size_map(emif2_size) <<
1282                                EMIF_SYS_SIZE_SHIFT;
1283                /* only MSB */
1284                section_map |= mapped_size >> 24 << EMIF_SDRC_ADDR_SHIFT;
1285                /* only MSB */
1286                section_map |= sys_addr >> 24 << EMIF_SYS_ADDR_SHIFT;
1287                section_cnt--;
1288        }
1289
1290        if (section_cnt == 2) {
1291                /* Only 1 section - either symmetric or single EMIF */
1292                lis_map_regs_calculated.dmm_lisa_map_3 = section_map;
1293                lis_map_regs_calculated.dmm_lisa_map_2 = 0;
1294                lis_map_regs_calculated.dmm_lisa_map_1 = 0;
1295        } else {
1296                /* 2 sections - 1 symmetric, 1 single EMIF */
1297                lis_map_regs_calculated.dmm_lisa_map_2 = section_map;
1298                lis_map_regs_calculated.dmm_lisa_map_1 = 0;
1299        }
1300
1301        /* TRAP for invalid TILER mappings in section 0 */
1302        lis_map_regs_calculated.dmm_lisa_map_0 = DMM_LISA_MAP_0_INVAL_ADDR_TRAP;
1303
1304        if (omap_revision() >= OMAP4460_ES1_0)
1305                lis_map_regs_calculated.is_ma_present = 1;
1306
1307        lisa_map_regs = &lis_map_regs_calculated;
1308#endif
1309        struct dmm_lisa_map_regs *hw_lisa_map_regs =
1310            (struct dmm_lisa_map_regs *)base;
1311
1312        writel(0, &hw_lisa_map_regs->dmm_lisa_map_3);
1313        writel(0, &hw_lisa_map_regs->dmm_lisa_map_2);
1314        writel(0, &hw_lisa_map_regs->dmm_lisa_map_1);
1315        writel(0, &hw_lisa_map_regs->dmm_lisa_map_0);
1316
1317        writel(lisa_map_regs->dmm_lisa_map_3,
1318                &hw_lisa_map_regs->dmm_lisa_map_3);
1319        writel(lisa_map_regs->dmm_lisa_map_2,
1320                &hw_lisa_map_regs->dmm_lisa_map_2);
1321        writel(lisa_map_regs->dmm_lisa_map_1,
1322                &hw_lisa_map_regs->dmm_lisa_map_1);
1323        writel(lisa_map_regs->dmm_lisa_map_0,
1324                &hw_lisa_map_regs->dmm_lisa_map_0);
1325
1326        if (lisa_map_regs->is_ma_present) {
1327                hw_lisa_map_regs =
1328                    (struct dmm_lisa_map_regs *)MA_BASE;
1329
1330                writel(lisa_map_regs->dmm_lisa_map_3,
1331                        &hw_lisa_map_regs->dmm_lisa_map_3);
1332                writel(lisa_map_regs->dmm_lisa_map_2,
1333                        &hw_lisa_map_regs->dmm_lisa_map_2);
1334                writel(lisa_map_regs->dmm_lisa_map_1,
1335                        &hw_lisa_map_regs->dmm_lisa_map_1);
1336                writel(lisa_map_regs->dmm_lisa_map_0,
1337                        &hw_lisa_map_regs->dmm_lisa_map_0);
1338
1339                setbits_le32(MA_PRIORITY, MA_HIMEM_INTERLEAVE_UN_MASK);
1340        }
1341
1342        /*
1343         * EMIF should be configured only when
1344         * memory is mapped on it. Using emif1_enabled
1345         * and emif2_enabled variables for this.
1346         */
1347        emif1_enabled = 0;
1348        emif2_enabled = 0;
1349        for (i = 0; i < 4; i++) {
1350                section = __raw_readl(DMM_BASE + i*4);
1351                valid = (section & EMIF_SDRC_MAP_MASK) >>
1352                        (EMIF_SDRC_MAP_SHIFT);
1353                if (valid == 3) {
1354                        emif1_enabled = 1;
1355                        emif2_enabled = 1;
1356                        break;
1357                }
1358
1359                if (valid == 1)
1360                        emif1_enabled = 1;
1361
1362                if (valid == 2)
1363                        emif2_enabled = 1;
1364        }
1365}
1366
1367static void do_bug0039_workaround(u32 base)
1368{
1369        u32 val, i, clkctrl;
1370        struct emif_reg_struct *emif_base = (struct emif_reg_struct *)base;
1371        const struct read_write_regs *bug_00339_regs;
1372        u32 iterations;
1373        u32 *phy_status_base = &emif_base->emif_ddr_phy_status[0];
1374        u32 *phy_ctrl_base = &emif_base->emif_ddr_ext_phy_ctrl_1;
1375
1376        if (is_dra7xx())
1377                phy_status_base++;
1378
1379        bug_00339_regs = get_bug_regs(&iterations);
1380
1381        /* Put EMIF in to idle */
1382        clkctrl = __raw_readl((*prcm)->cm_memif_clkstctrl);
1383        __raw_writel(0x0, (*prcm)->cm_memif_clkstctrl);
1384
1385        /* Copy the phy status registers in to phy ctrl shadow registers */
1386        for (i = 0; i < iterations; i++) {
1387                val = __raw_readl(phy_status_base +
1388                                  bug_00339_regs[i].read_reg - 1);
1389
1390                __raw_writel(val, phy_ctrl_base +
1391                             ((bug_00339_regs[i].write_reg - 1) << 1));
1392
1393                __raw_writel(val, phy_ctrl_base +
1394                             (bug_00339_regs[i].write_reg << 1) - 1);
1395        }
1396
1397        /* Disable leveling */
1398        writel(0x0, &emif_base->emif_rd_wr_lvl_rmp_ctl);
1399
1400        __raw_writel(clkctrl,  (*prcm)->cm_memif_clkstctrl);
1401}
1402
1403/*
1404 * SDRAM initialization:
1405 * SDRAM initialization has two parts:
1406 * 1. Configuring the SDRAM device
1407 * 2. Update the AC timings related parameters in the EMIF module
1408 * (1) should be done only once and should not be done while we are
1409 * running from SDRAM.
1410 * (2) can and should be done more than once if OPP changes.
1411 * Particularly, this may be needed when we boot without SPL and
1412 * and using Configuration Header(CH). ROM code supports only at 50% OPP
1413 * at boot (low power boot). So u-boot has to switch to OPP100 and update
1414 * the frequency. So,
1415 * Doing (1) and (2) makes sense - first time initialization
1416 * Doing (2) and not (1) makes sense - OPP change (when using CH)
1417 * Doing (1) and not (2) doen't make sense
1418 * See do_sdram_init() for the details
1419 */
1420void sdram_init(void)
1421{
1422        u32 in_sdram, size_prog, size_detect;
1423        struct emif_reg_struct *emif = (struct emif_reg_struct *)EMIF1_BASE;
1424        u32 sdram_type = emif_sdram_type(emif->emif_sdram_config);
1425
1426        debug(">>sdram_init()\n");
1427
1428        if (omap_hw_init_context() == OMAP_INIT_CONTEXT_UBOOT_AFTER_SPL)
1429                return;
1430
1431        in_sdram = running_from_sdram();
1432        debug("in_sdram = %d\n", in_sdram);
1433
1434        if (!in_sdram) {
1435                if ((sdram_type == EMIF_SDRAM_TYPE_LPDDR2) && !warm_reset())
1436                        bypass_dpll((*prcm)->cm_clkmode_dpll_core);
1437                else if (sdram_type == EMIF_SDRAM_TYPE_DDR3)
1438                        writel(CM_DLL_CTRL_NO_OVERRIDE, (*prcm)->cm_dll_ctrl);
1439        }
1440
1441        if (!in_sdram)
1442                dmm_init(DMM_BASE);
1443
1444        if (emif1_enabled)
1445                do_sdram_init(EMIF1_BASE);
1446
1447        if (emif2_enabled)
1448                do_sdram_init(EMIF2_BASE);
1449
1450        if (!(in_sdram || warm_reset())) {
1451                if (emif1_enabled)
1452                        emif_post_init_config(EMIF1_BASE);
1453                if (emif2_enabled)
1454                        emif_post_init_config(EMIF2_BASE);
1455        }
1456
1457        /* for the shadow registers to take effect */
1458        if (sdram_type == EMIF_SDRAM_TYPE_LPDDR2)
1459                freq_update_core();
1460
1461        /* Do some testing after the init */
1462        if (!in_sdram) {
1463                size_prog = omap_sdram_size();
1464                size_prog = log_2_n_round_down(size_prog);
1465                size_prog = (1 << size_prog);
1466
1467                size_detect = get_ram_size((long *)CONFIG_SYS_SDRAM_BASE,
1468                                                size_prog);
1469                /* Compare with the size programmed */
1470                if (size_detect != size_prog) {
1471                        printf("SDRAM: identified size not same as expected"
1472                                " size identified: %x expected: %x\n",
1473                                size_detect,
1474                                size_prog);
1475                } else
1476                        debug("get_ram_size() successful");
1477        }
1478
1479        if (sdram_type == EMIF_SDRAM_TYPE_DDR3 &&
1480            (!in_sdram && !warm_reset()) && (!is_dra7xx())) {
1481                if (emif1_enabled)
1482                        do_bug0039_workaround(EMIF1_BASE);
1483                if (emif2_enabled)
1484                        do_bug0039_workaround(EMIF2_BASE);
1485        }
1486
1487        debug("<<sdram_init()\n");
1488}
1489