uboot/arch/powerpc/cpu/mpc85xx/cpu_init.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Copyright 2007-2011 Freescale Semiconductor, Inc.
   4 *
   5 * (C) Copyright 2003 Motorola Inc.
   6 * Modified by Xianghua Xiao, X.Xiao@motorola.com
   7 *
   8 * (C) Copyright 2000
   9 * Wolfgang Denk, DENX Software Engineering, wd@denx.de.
  10 */
  11
  12#include <common.h>
  13#include <display_options.h>
  14#include <env.h>
  15#include <init.h>
  16#include <net.h>
  17#include <watchdog.h>
  18#include <asm/processor.h>
  19#include <ioports.h>
  20#include <sata.h>
  21#include <fm_eth.h>
  22#include <asm/io.h>
  23#include <asm/cache.h>
  24#include <asm/mmu.h>
  25#include <fsl_errata.h>
  26#include <asm/fsl_law.h>
  27#include <asm/fsl_serdes.h>
  28#include <asm/fsl_srio.h>
  29#ifdef CONFIG_FSL_CORENET
  30#include <asm/fsl_portals.h>
  31#include <asm/fsl_liodn.h>
  32#include <fsl_qbman.h>
  33#endif
  34#include <fsl_usb.h>
  35#include <hwconfig.h>
  36#include <linux/compiler.h>
  37#include <linux/delay.h>
  38#include "mp.h"
  39#ifdef CONFIG_CHAIN_OF_TRUST
  40#include <fsl_validate.h>
  41#endif
  42#ifdef CONFIG_FSL_CAAM
  43#include <fsl_sec.h>
  44#endif
  45#if defined(CONFIG_NXP_ESBC) && defined(CONFIG_FSL_CORENET)
  46#include <asm/fsl_pamu.h>
  47#include <fsl_secboot_err.h>
  48#endif
  49#ifdef CONFIG_SYS_QE_FMAN_FW_IN_NAND
  50#include <nand.h>
  51#include <errno.h>
  52#endif
  53#ifndef CONFIG_ARCH_QEMU_E500
  54#include <fsl_ddr.h>
  55#endif
  56#include "../../../../drivers/ata/fsl_sata.h"
  57#ifdef CONFIG_U_QE
  58#include <fsl_qe.h>
  59#endif
  60#include <dm.h>
  61
  62#ifdef CONFIG_SYS_FSL_SINGLE_SOURCE_CLK
  63/*
  64 * For deriving usb clock from 100MHz sysclk, reference divisor is set
  65 * to a value of 5, which gives an intermediate value 20(100/5). The
  66 * multiplication factor integer is set to 24, which when multiplied to
  67 * above intermediate value provides clock for usb ip.
  68 */
  69void usb_single_source_clk_configure(struct ccsr_usb_phy *usb_phy)
  70{
  71        sys_info_t sysinfo;
  72
  73        get_sys_info(&sysinfo);
  74        if (sysinfo.diff_sysclk == 1) {
  75                clrbits_be32(&usb_phy->pllprg[1],
  76                             CFG_SYS_FSL_USB_PLLPRG2_MFI);
  77                setbits_be32(&usb_phy->pllprg[1],
  78                             CFG_SYS_FSL_USB_PLLPRG2_REF_DIV_INTERNAL_CLK |
  79                             CFG_SYS_FSL_USB_PLLPRG2_MFI_INTERNAL_CLK |
  80                             CFG_SYS_FSL_USB_INTERNAL_SOC_CLK_EN);
  81                }
  82}
  83#endif
  84
  85#ifdef CONFIG_SYS_FSL_ERRATUM_A006261
  86void fsl_erratum_a006261_workaround(struct ccsr_usb_phy __iomem *usb_phy)
  87{
  88#ifdef CONFIG_SYS_FSL_USB_DUAL_PHY_ENABLE
  89        u32 xcvrprg = in_be32(&usb_phy->port1.xcvrprg);
  90
  91        /* Increase Disconnect Threshold by 50mV */
  92        xcvrprg &= ~CFG_SYS_FSL_USB_XCVRPRG_HS_DCNT_PROG_MASK |
  93                                                INC_DCNT_THRESHOLD_50MV;
  94        /* Enable programming of USB High speed Disconnect threshold */
  95        xcvrprg |= CFG_SYS_FSL_USB_XCVRPRG_HS_DCNT_PROG_EN;
  96        out_be32(&usb_phy->port1.xcvrprg, xcvrprg);
  97
  98        xcvrprg = in_be32(&usb_phy->port2.xcvrprg);
  99        /* Increase Disconnect Threshold by 50mV */
 100        xcvrprg &= ~CFG_SYS_FSL_USB_XCVRPRG_HS_DCNT_PROG_MASK |
 101                                                INC_DCNT_THRESHOLD_50MV;
 102        /* Enable programming of USB High speed Disconnect threshold */
 103        xcvrprg |= CFG_SYS_FSL_USB_XCVRPRG_HS_DCNT_PROG_EN;
 104        out_be32(&usb_phy->port2.xcvrprg, xcvrprg);
 105#else
 106
 107        u32 temp = 0;
 108        u32 status = in_be32(&usb_phy->status1);
 109
 110        u32 squelch_prog_rd_0_2 =
 111                (status >> CFG_SYS_FSL_USB_SQUELCH_PROG_RD_0)
 112                        & CFG_SYS_FSL_USB_SQUELCH_PROG_MASK;
 113
 114        u32 squelch_prog_rd_3_5 =
 115                (status >> CFG_SYS_FSL_USB_SQUELCH_PROG_RD_3)
 116                        & CFG_SYS_FSL_USB_SQUELCH_PROG_MASK;
 117
 118        setbits_be32(&usb_phy->config1,
 119                     CFG_SYS_FSL_USB_HS_DISCNCT_INC);
 120        setbits_be32(&usb_phy->config2,
 121                     CFG_SYS_FSL_USB_RX_AUTO_CAL_RD_WR_SEL);
 122
 123        temp = squelch_prog_rd_0_2 << CFG_SYS_FSL_USB_SQUELCH_PROG_WR_3;
 124        out_be32(&usb_phy->config2, in_be32(&usb_phy->config2) | temp);
 125
 126        temp = squelch_prog_rd_3_5 << CFG_SYS_FSL_USB_SQUELCH_PROG_WR_0;
 127        out_be32(&usb_phy->config2, in_be32(&usb_phy->config2) | temp);
 128#endif
 129}
 130#endif
 131
 132
 133#if defined(CONFIG_QE) && !defined(CONFIG_U_QE)
 134extern qe_iop_conf_t qe_iop_conf_tab[];
 135extern void qe_config_iopin(u8 port, u8 pin, int dir,
 136                                int open_drain, int assign);
 137extern void qe_init(uint qe_base);
 138extern void qe_reset(void);
 139
 140static void config_qe_ioports(void)
 141{
 142        u8      port, pin;
 143        int     dir, open_drain, assign;
 144        int     i;
 145
 146        for (i = 0; qe_iop_conf_tab[i].assign != QE_IOP_TAB_END; i++) {
 147                port            = qe_iop_conf_tab[i].port;
 148                pin             = qe_iop_conf_tab[i].pin;
 149                dir             = qe_iop_conf_tab[i].dir;
 150                open_drain      = qe_iop_conf_tab[i].open_drain;
 151                assign          = qe_iop_conf_tab[i].assign;
 152                qe_config_iopin(port, pin, dir, open_drain, assign);
 153        }
 154}
 155#endif
 156
 157#ifdef CONFIG_SYS_FSL_CPC
 158#if defined(CONFIG_RAMBOOT_PBL) || defined(CONFIG_SYS_CPC_REINIT_F)
 159void disable_cpc_sram(void)
 160{
 161        int i;
 162
 163        cpc_corenet_t *cpc = (cpc_corenet_t *)CFG_SYS_FSL_CPC_ADDR;
 164
 165        for (i = 0; i < CFG_SYS_NUM_CPC; i++, cpc++) {
 166                if (in_be32(&cpc->cpcsrcr0) & CPC_SRCR0_SRAMEN) {
 167                        /* find and disable LAW of SRAM */
 168                        struct law_entry law = find_law(CFG_SYS_INIT_L3_ADDR);
 169
 170                        if (law.index == -1) {
 171                                printf("\nFatal error happened\n");
 172                                return;
 173                        }
 174                        disable_law(law.index);
 175
 176                        clrbits_be32(&cpc->cpchdbcr0, CPC_HDBCR0_CDQ_SPEC_DIS);
 177                        out_be32(&cpc->cpccsr0, 0);
 178                        out_be32(&cpc->cpcsrcr0, 0);
 179                }
 180        }
 181}
 182#endif
 183
 184#if defined(T1040_TDM_QUIRK_CCSR_BASE)
 185#ifdef CONFIG_POST
 186#error POST memory test cannot be enabled with TDM
 187#endif
 188static void enable_tdm_law(void)
 189{
 190        int ret;
 191        char buffer[HWCONFIG_BUFFER_SIZE] = {0};
 192        int tdm_hwconfig_enabled = 0;
 193
 194        /*
 195         * Extract hwconfig from environment since environment
 196         * is not setup properly yet. Search for tdm entry in
 197         * hwconfig.
 198         */
 199        ret = env_get_f("hwconfig", buffer, sizeof(buffer));
 200        if (ret > 0) {
 201                tdm_hwconfig_enabled = hwconfig_f("tdm", buffer);
 202                /* If tdm is defined in hwconfig, set law for tdm workaround */
 203                if (tdm_hwconfig_enabled)
 204                        set_next_law(T1040_TDM_QUIRK_CCSR_BASE, LAW_SIZE_16M,
 205                                     LAW_TRGT_IF_CCSR);
 206        }
 207}
 208#endif
 209
 210void enable_cpc(void)
 211{
 212        int i;
 213        int ret;
 214        u32 size = 0;
 215        u32 cpccfg0;
 216        char buffer[HWCONFIG_BUFFER_SIZE];
 217        char cpc_subarg[16];
 218        bool have_hwconfig = false;
 219        int cpc_args = 0;
 220        cpc_corenet_t *cpc = (cpc_corenet_t *)CFG_SYS_FSL_CPC_ADDR;
 221
 222        /* Extract hwconfig from environment */
 223        ret = env_get_f("hwconfig", buffer, sizeof(buffer));
 224        if (ret > 0) {
 225                /*
 226                 * If "en_cpc" is not defined in hwconfig then by default all
 227                 * cpcs are enable. If this config is defined then individual
 228                 * cpcs which have to be enabled should also be defined.
 229                 * e.g en_cpc:cpc1,cpc2;
 230                 */
 231                if (hwconfig_f("en_cpc", buffer))
 232                        have_hwconfig = true;
 233        }
 234
 235        for (i = 0; i < CFG_SYS_NUM_CPC; i++, cpc++) {
 236                if (have_hwconfig) {
 237                        sprintf(cpc_subarg, "cpc%u", i + 1);
 238                        cpc_args = hwconfig_sub_f("en_cpc", cpc_subarg, buffer);
 239                        if (cpc_args == 0)
 240                                continue;
 241                }
 242                cpccfg0 = in_be32(&cpc->cpccfg0);
 243                size += CPC_CFG0_SZ_K(cpccfg0);
 244
 245#ifdef CONFIG_SYS_FSL_ERRATUM_CPC_A002
 246                setbits_be32(&cpc->cpchdbcr0, CPC_HDBCR0_TAG_ECC_SCRUB_DIS);
 247#endif
 248#ifdef CONFIG_SYS_FSL_ERRATUM_CPC_A003
 249                setbits_be32(&cpc->cpchdbcr0, CPC_HDBCR0_DATA_ECC_SCRUB_DIS);
 250#endif
 251#ifdef CONFIG_SYS_FSL_ERRATUM_A006593
 252                setbits_be32(&cpc->cpchdbcr0, 1 << (31 - 21));
 253#endif
 254#ifdef CONFIG_SYS_FSL_ERRATUM_A006379
 255                if (has_erratum_a006379()) {
 256                        setbits_be32(&cpc->cpchdbcr0,
 257                                     CPC_HDBCR0_SPLRU_LEVEL_EN);
 258                }
 259#endif
 260
 261                out_be32(&cpc->cpccsr0, CPC_CSR0_CE | CPC_CSR0_PE);
 262                /* Read back to sync write */
 263                in_be32(&cpc->cpccsr0);
 264
 265        }
 266
 267        puts("Corenet Platform Cache: ");
 268        print_size(size * 1024, " enabled\n");
 269}
 270
 271static void invalidate_cpc(void)
 272{
 273        int i;
 274        cpc_corenet_t *cpc = (cpc_corenet_t *)CFG_SYS_FSL_CPC_ADDR;
 275
 276        for (i = 0; i < CFG_SYS_NUM_CPC; i++, cpc++) {
 277                /* skip CPC when it used as all SRAM */
 278                if (in_be32(&cpc->cpcsrcr0) & CPC_SRCR0_SRAMEN)
 279                        continue;
 280                /* Flash invalidate the CPC and clear all the locks */
 281                out_be32(&cpc->cpccsr0, CPC_CSR0_FI | CPC_CSR0_LFC);
 282                while (in_be32(&cpc->cpccsr0) & (CPC_CSR0_FI | CPC_CSR0_LFC))
 283                        ;
 284        }
 285}
 286#else
 287#define enable_cpc()
 288#define invalidate_cpc()
 289#define disable_cpc_sram()
 290#endif /* CONFIG_SYS_FSL_CPC */
 291
 292/*
 293 * Breathe some life into the CPU...
 294 *
 295 * Set up the memory map
 296 * initialize a bunch of registers
 297 */
 298
 299#ifdef CONFIG_FSL_CORENET
 300static void corenet_tb_init(void)
 301{
 302        volatile ccsr_rcpm_t *rcpm =
 303                (void *)(CFG_SYS_FSL_CORENET_RCPM_ADDR);
 304        volatile ccsr_pic_t *pic =
 305                (void *)(CFG_SYS_MPC8xxx_PIC_ADDR);
 306        u32 whoami = in_be32(&pic->whoami);
 307
 308        /* Enable the timebase register for this core */
 309        out_be32(&rcpm->ctbenrl, (1 << whoami));
 310}
 311#endif
 312
 313#ifdef CONFIG_SYS_FSL_ERRATUM_A007212
 314void fsl_erratum_a007212_workaround(void)
 315{
 316        ccsr_gur_t __iomem *gur = (void *)(CFG_SYS_MPC85xx_GUTS_ADDR);
 317        u32 ddr_pll_ratio;
 318        u32 __iomem *plldgdcr1 = (void *)(CFG_SYS_DCSRBAR + 0x21c20);
 319        u32 __iomem *plldadcr1 = (void *)(CFG_SYS_DCSRBAR + 0x21c28);
 320        u32 __iomem *dpdovrcr4 = (void *)(CFG_SYS_DCSRBAR + 0x21e80);
 321#if (CONFIG_SYS_NUM_DDR_CTLRS >= 2)
 322        u32 __iomem *plldgdcr2 = (void *)(CFG_SYS_DCSRBAR + 0x21c40);
 323        u32 __iomem *plldadcr2 = (void *)(CFG_SYS_DCSRBAR + 0x21c48);
 324#if (CONFIG_SYS_NUM_DDR_CTLRS >= 3)
 325        u32 __iomem *plldgdcr3 = (void *)(CFG_SYS_DCSRBAR + 0x21c60);
 326        u32 __iomem *plldadcr3 = (void *)(CFG_SYS_DCSRBAR + 0x21c68);
 327#endif
 328#endif
 329        /*
 330         * Even this workaround applies to selected version of SoCs, it is
 331         * safe to apply to all versions, with the limitation of odd ratios.
 332         * If RCW has disabled DDR PLL, we have to apply this workaround,
 333         * otherwise DDR will not work.
 334         */
 335        ddr_pll_ratio = (in_be32(&gur->rcwsr[0]) >>
 336                FSL_CORENET_RCWSR0_MEM_PLL_RAT_SHIFT) &
 337                FSL_CORENET_RCWSR0_MEM_PLL_RAT_MASK;
 338        /* check if RCW sets ratio to 0, required by this workaround */
 339        if (ddr_pll_ratio != 0)
 340                return;
 341        ddr_pll_ratio = (in_be32(&gur->rcwsr[0]) >>
 342                FSL_CORENET_RCWSR0_MEM_PLL_RAT_RESV_SHIFT) &
 343                FSL_CORENET_RCWSR0_MEM_PLL_RAT_MASK;
 344        /* check if reserved bits have the desired ratio */
 345        if (ddr_pll_ratio == 0) {
 346                printf("Error: Unknown DDR PLL ratio!\n");
 347                return;
 348        }
 349        ddr_pll_ratio >>= 1;
 350
 351        setbits_be32(plldadcr1, 0x02000001);
 352#if (CONFIG_SYS_NUM_DDR_CTLRS >= 2)
 353        setbits_be32(plldadcr2, 0x02000001);
 354#if (CONFIG_SYS_NUM_DDR_CTLRS >= 3)
 355        setbits_be32(plldadcr3, 0x02000001);
 356#endif
 357#endif
 358        setbits_be32(dpdovrcr4, 0xe0000000);
 359        out_be32(plldgdcr1, 0x08000001 | (ddr_pll_ratio << 1));
 360#if (CONFIG_SYS_NUM_DDR_CTLRS >= 2)
 361        out_be32(plldgdcr2, 0x08000001 | (ddr_pll_ratio << 1));
 362#if (CONFIG_SYS_NUM_DDR_CTLRS >= 3)
 363        out_be32(plldgdcr3, 0x08000001 | (ddr_pll_ratio << 1));
 364#endif
 365#endif
 366        udelay(100);
 367        clrbits_be32(plldadcr1, 0x02000001);
 368#if (CONFIG_SYS_NUM_DDR_CTLRS >= 2)
 369        clrbits_be32(plldadcr2, 0x02000001);
 370#if (CONFIG_SYS_NUM_DDR_CTLRS >= 3)
 371        clrbits_be32(plldadcr3, 0x02000001);
 372#endif
 373#endif
 374        clrbits_be32(dpdovrcr4, 0xe0000000);
 375}
 376#endif
 377
 378ulong cpu_init_f(void)
 379{
 380        extern void m8560_cpm_reset (void);
 381#ifdef CFG_SYS_DCSRBAR_PHYS
 382        ccsr_gur_t *gur = (void *)(CFG_SYS_MPC85xx_GUTS_ADDR);
 383#endif
 384#if defined(CONFIG_NXP_ESBC) && !defined(CONFIG_SYS_RAMBOOT)
 385        struct law_entry law;
 386#endif
 387#ifdef CONFIG_ARCH_MPC8548
 388        ccsr_local_ecm_t *ecm = (void *)(CFG_SYS_MPC85xx_ECM_ADDR);
 389        uint svr = get_svr();
 390
 391        /*
 392         * CPU2 errata workaround: A core hang possible while executing
 393         * a msync instruction and a snoopable transaction from an I/O
 394         * master tagged to make quick forward progress is present.
 395         * Fixed in silicon rev 2.1.
 396         */
 397        if ((SVR_MAJ(svr) == 1) || ((SVR_MAJ(svr) == 2 && SVR_MIN(svr) == 0x0)))
 398                out_be32(&ecm->eebpcr, in_be32(&ecm->eebpcr) | (1 << 16));
 399#endif
 400
 401        disable_tlb(14);
 402        disable_tlb(15);
 403
 404#if defined(CONFIG_NXP_ESBC) && !defined(CONFIG_SYS_RAMBOOT)
 405        /* Disable the LAW created for NOR flash by the PBI commands */
 406        law = find_law(CFG_SYS_PBI_FLASH_BASE);
 407        if (law.index != -1)
 408                disable_law(law.index);
 409
 410#if defined(CONFIG_SYS_CPC_REINIT_F)
 411        disable_cpc_sram();
 412#endif
 413#endif
 414
 415       init_early_memctl_regs();
 416
 417#if defined(CONFIG_QE) && !defined(CONFIG_U_QE)
 418        /* Config QE ioports */
 419        config_qe_ioports();
 420#endif
 421
 422#if defined(CONFIG_FSL_DMA)
 423        dma_init();
 424#endif
 425#ifdef CONFIG_FSL_CORENET
 426        corenet_tb_init();
 427#endif
 428        init_used_tlb_cams();
 429
 430        /* Invalidate the CPC before DDR gets enabled */
 431        invalidate_cpc();
 432
 433 #ifdef CFG_SYS_DCSRBAR_PHYS
 434        /* set DCSRCR so that DCSR space is 1G */
 435        setbits_be32(&gur->dcsrcr, FSL_CORENET_DCSR_SZ_1G);
 436        in_be32(&gur->dcsrcr);
 437#endif
 438
 439#ifdef CONFIG_SYS_FSL_ERRATUM_A007212
 440        fsl_erratum_a007212_workaround();
 441#endif
 442
 443        return 0;
 444}
 445
 446/* Implement a dummy function for those platforms w/o SERDES */
 447static void __fsl_serdes__init(void)
 448{
 449        return;
 450}
 451__attribute__((weak, alias("__fsl_serdes__init"))) void fsl_serdes_init(void);
 452
 453#if defined(CONFIG_SYS_FSL_QORIQ_CHASSIS2) && defined(CONFIG_E6500)
 454int enable_cluster_l2(void)
 455{
 456        int i = 0;
 457        u32 cluster, svr = get_svr();
 458        ccsr_gur_t *gur = (void __iomem *)(CFG_SYS_MPC85xx_GUTS_ADDR);
 459        struct ccsr_cluster_l2 __iomem *l2cache;
 460
 461        /* only the L2 of first cluster should be enabled as expected on T4080,
 462         * but there is no EOC in the first cluster as HW sake, so return here
 463         * to skip enabling L2 cache of the 2nd cluster.
 464         */
 465        if (SVR_SOC_VER(svr) == SVR_T4080)
 466                return 0;
 467
 468        cluster = in_be32(&gur->tp_cluster[i].lower);
 469        if (cluster & TP_CLUSTER_EOC)
 470                return 0;
 471
 472        /* The first cache has already been set up, so skip it */
 473        i++;
 474
 475        /* Look through the remaining clusters, and set up their caches */
 476        do {
 477                int j, cluster_valid = 0;
 478
 479                l2cache = (void __iomem *)(CFG_SYS_FSL_CLUSTER_1_L2 + i * 0x40000);
 480
 481                cluster = in_be32(&gur->tp_cluster[i].lower);
 482
 483                /* check that at least one core/accel is enabled in cluster */
 484                for (j = 0; j < 4; j++) {
 485                        u32 idx = (cluster >> (j*8)) & TP_CLUSTER_INIT_MASK;
 486                        u32 type = in_be32(&gur->tp_ityp[idx]);
 487
 488                        if ((type & TP_ITYP_AV) &&
 489                            TP_ITYP_TYPE(type) == TP_ITYP_TYPE_PPC)
 490                                cluster_valid = 1;
 491                }
 492
 493                if (cluster_valid) {
 494                        /* set stash ID to (cluster) * 2 + 32 + 1 */
 495                        clrsetbits_be32(&l2cache->l2csr1, 0xff, 32 + i * 2 + 1);
 496
 497                        printf("enable l2 for cluster %d %p\n", i, l2cache);
 498
 499                        out_be32(&l2cache->l2csr0, L2CSR0_L2FI|L2CSR0_L2LFC);
 500                        while ((in_be32(&l2cache->l2csr0)
 501                                & (L2CSR0_L2FI|L2CSR0_L2LFC)) != 0)
 502                                        ;
 503                        out_be32(&l2cache->l2csr0, L2CSR0_L2E|L2CSR0_L2PE|L2CSR0_L2REP_MODE);
 504                }
 505                i++;
 506        } while (!(cluster & TP_CLUSTER_EOC));
 507
 508        return 0;
 509}
 510#endif
 511
 512/*
 513 * Initialize L2 as cache.
 514 */
 515int l2cache_init(void)
 516{
 517        __maybe_unused u32 svr = get_svr();
 518#ifdef CONFIG_L2_CACHE
 519        ccsr_l2cache_t *l2cache = (void __iomem *)CFG_SYS_MPC85xx_L2_ADDR;
 520#elif defined(CONFIG_SYS_FSL_QORIQ_CHASSIS2) && defined(CONFIG_E6500)
 521        struct ccsr_cluster_l2 * l2cache = (void __iomem *)CFG_SYS_FSL_CLUSTER_1_L2;
 522#endif
 523
 524        puts ("L2:    ");
 525
 526#if defined(CONFIG_L2_CACHE)
 527        volatile uint cache_ctl;
 528        uint ver;
 529        u32 l2siz_field;
 530
 531        ver = SVR_SOC_VER(svr);
 532
 533        asm("msync;isync");
 534        cache_ctl = l2cache->l2ctl;
 535
 536#if defined(CONFIG_SYS_RAMBOOT) && defined(CFG_SYS_INIT_L2_ADDR)
 537        if (cache_ctl & MPC85xx_L2CTL_L2E) {
 538                /* Clear L2 SRAM memory-mapped base address */
 539                out_be32(&l2cache->l2srbar0, 0x0);
 540                out_be32(&l2cache->l2srbar1, 0x0);
 541
 542                /* set MBECCDIS=0, SBECCDIS=0 */
 543                clrbits_be32(&l2cache->l2errdis,
 544                                (MPC85xx_L2ERRDIS_MBECC |
 545                                 MPC85xx_L2ERRDIS_SBECC));
 546
 547                /* set L2E=0, L2SRAM=0 */
 548                clrbits_be32(&l2cache->l2ctl,
 549                                (MPC85xx_L2CTL_L2E |
 550                                 MPC85xx_L2CTL_L2SRAM_ENTIRE));
 551        }
 552#endif
 553
 554        l2siz_field = (cache_ctl >> 28) & 0x3;
 555
 556        switch (l2siz_field) {
 557        case 0x0:
 558                printf(" unknown size (0x%08x)\n", cache_ctl);
 559                return -1;
 560                break;
 561        case 0x1:
 562                if (ver == SVR_8540 || ver == SVR_8560   ||
 563                    ver == SVR_8541 || ver == SVR_8555) {
 564                        puts("128 KiB ");
 565                        /* set L2E=1, L2I=1, & L2BLKSZ=1 (128 KiBibyte) */
 566                        cache_ctl = 0xc4000000;
 567                } else {
 568                        puts("256 KiB ");
 569                        cache_ctl = 0xc0000000; /* set L2E=1, L2I=1, & L2SRAM=0 */
 570                }
 571                break;
 572        case 0x2:
 573                if (ver == SVR_8540 || ver == SVR_8560   ||
 574                    ver == SVR_8541 || ver == SVR_8555) {
 575                        puts("256 KiB ");
 576                        /* set L2E=1, L2I=1, & L2BLKSZ=2 (256 KiBibyte) */
 577                        cache_ctl = 0xc8000000;
 578                } else {
 579                        puts("512 KiB ");
 580                        /* set L2E=1, L2I=1, & L2SRAM=0 */
 581                        cache_ctl = 0xc0000000;
 582                }
 583                break;
 584        case 0x3:
 585                puts("1024 KiB ");
 586                /* set L2E=1, L2I=1, & L2SRAM=0 */
 587                cache_ctl = 0xc0000000;
 588                break;
 589        }
 590
 591        if (l2cache->l2ctl & MPC85xx_L2CTL_L2E) {
 592                puts("already enabled");
 593#if defined(CFG_SYS_INIT_L2_ADDR) && defined(CFG_SYS_FLASH_BASE)
 594                u32 l2srbar = l2cache->l2srbar0;
 595                if (l2cache->l2ctl & MPC85xx_L2CTL_L2SRAM_ENTIRE
 596                                && l2srbar >= CFG_SYS_FLASH_BASE) {
 597                        l2srbar = CFG_SYS_INIT_L2_ADDR;
 598                        l2cache->l2srbar0 = l2srbar;
 599                        printf(", moving to 0x%08x", CFG_SYS_INIT_L2_ADDR);
 600                }
 601#endif /* CFG_SYS_INIT_L2_ADDR */
 602                puts("\n");
 603        } else {
 604                asm("msync;isync");
 605                l2cache->l2ctl = cache_ctl; /* invalidate & enable */
 606                asm("msync;isync");
 607                puts("enabled\n");
 608        }
 609#elif defined(CONFIG_BACKSIDE_L2_CACHE)
 610        if (SVR_SOC_VER(svr) == SVR_P2040) {
 611                puts("N/A\n");
 612                goto skip_l2;
 613        }
 614
 615        u32 l2cfg0 = mfspr(SPRN_L2CFG0);
 616
 617        /* invalidate the L2 cache */
 618        mtspr(SPRN_L2CSR0, (L2CSR0_L2FI|L2CSR0_L2LFC));
 619        while (mfspr(SPRN_L2CSR0) & (L2CSR0_L2FI|L2CSR0_L2LFC))
 620                ;
 621
 622#ifdef CONFIG_SYS_CACHE_STASHING
 623        /* set stash id to (coreID) * 2 + 32 + L2 (1) */
 624        mtspr(SPRN_L2CSR1, (32 + 1));
 625#endif
 626
 627        /* enable the cache */
 628        mtspr(SPRN_L2CSR0, CFG_SYS_INIT_L2CSR0);
 629
 630        if (CFG_SYS_INIT_L2CSR0 & L2CSR0_L2E) {
 631                while (!(mfspr(SPRN_L2CSR0) & L2CSR0_L2E))
 632                        ;
 633                print_size((l2cfg0 & 0x3fff) * 64 * 1024, " enabled\n");
 634        }
 635
 636skip_l2:
 637#elif defined(CONFIG_SYS_FSL_QORIQ_CHASSIS2) && defined(CONFIG_E6500)
 638        if (l2cache->l2csr0 & L2CSR0_L2E)
 639                print_size((l2cache->l2cfg0 & 0x3fff) * 64 * 1024,
 640                           " enabled\n");
 641
 642        enable_cluster_l2();
 643#else
 644        puts("disabled\n");
 645#endif
 646
 647        return 0;
 648}
 649
 650/*
 651 *
 652 * The newer 8548, etc, parts have twice as much cache, but
 653 * use the same bit-encoding as the older 8555, etc, parts.
 654 *
 655 */
 656int cpu_init_r(void)
 657{
 658        __maybe_unused u32 svr = get_svr();
 659#ifdef CFG_SYS_LBC_LCRR
 660        fsl_lbc_t *lbc = (void __iomem *)LBC_BASE_ADDR;
 661#endif
 662#if defined(CONFIG_PPC_SPINTABLE_COMPATIBLE) && defined(CONFIG_MP)
 663        extern int spin_table_compat;
 664        const char *spin;
 665#endif
 666#ifdef CONFIG_SYS_FSL_ERRATUM_SEC_A003571
 667        ccsr_sec_t __iomem *sec = (void *)CFG_SYS_FSL_SEC_ADDR;
 668#endif
 669#if defined(CONFIG_SYS_P4080_ERRATUM_CPU22) || \
 670        defined(CONFIG_SYS_FSL_ERRATUM_NMG_CPU_A011)
 671        /*
 672         * CPU22 and NMG_CPU_A011 share the same workaround.
 673         * CPU22 applies to P4080 rev 1.0, 2.0, fixed in 3.0
 674         * NMG_CPU_A011 applies to P4080 rev 1.0, 2.0, fixed in 3.0
 675         * also applies to P3041 rev 1.0, 1.1, P2041 rev 1.0, 1.1, both
 676         * fixed in 2.0. NMG_CPU_A011 is activated by default and can
 677         * be disabled by hwconfig with syntax:
 678         *
 679         * fsl_cpu_a011:disable
 680         */
 681        extern int enable_cpu_a011_workaround;
 682#ifdef CONFIG_SYS_P4080_ERRATUM_CPU22
 683        enable_cpu_a011_workaround = (SVR_MAJ(svr) < 3);
 684#else
 685        char buffer[HWCONFIG_BUFFER_SIZE];
 686        char *buf = NULL;
 687        int n, res;
 688
 689        n = env_get_f("hwconfig", buffer, sizeof(buffer));
 690        if (n > 0)
 691                buf = buffer;
 692
 693        res = hwconfig_arg_cmp_f("fsl_cpu_a011", "disable", buf);
 694        if (res > 0) {
 695                enable_cpu_a011_workaround = 0;
 696        } else {
 697                if (n >= HWCONFIG_BUFFER_SIZE) {
 698                        printf("fsl_cpu_a011 was not found. hwconfig variable "
 699                                "may be too long\n");
 700                }
 701                enable_cpu_a011_workaround =
 702                        (SVR_SOC_VER(svr) == SVR_P4080 && SVR_MAJ(svr) < 3) ||
 703                        (SVR_SOC_VER(svr) != SVR_P4080 && SVR_MAJ(svr) < 2);
 704        }
 705#endif
 706        if (enable_cpu_a011_workaround) {
 707                flush_dcache();
 708                mtspr(L1CSR2, (mfspr(L1CSR2) | L1CSR2_DCWS));
 709                sync();
 710        }
 711#endif
 712
 713#ifdef CONFIG_SYS_FSL_ERRATUM_A007907
 714        flush_dcache();
 715        mtspr(L1CSR2, (mfspr(L1CSR2) & ~L1CSR2_DCSTASHID));
 716        sync();
 717#endif
 718
 719#ifdef CONFIG_SYS_FSL_ERRATUM_A005812
 720        /*
 721         * A-005812 workaround sets bit 32 of SPR 976 for SoCs running
 722         * in write shadow mode. Checking DCWS before setting SPR 976.
 723         */
 724        if (mfspr(L1CSR2) & L1CSR2_DCWS)
 725                mtspr(SPRN_HDBCR0, (mfspr(SPRN_HDBCR0) | 0x80000000));
 726#endif
 727
 728#if defined(CONFIG_PPC_SPINTABLE_COMPATIBLE) && defined(CONFIG_MP)
 729        spin = env_get("spin_table_compat");
 730        if (spin && (*spin == 'n'))
 731                spin_table_compat = 0;
 732        else
 733                spin_table_compat = 1;
 734#endif
 735
 736#ifdef CONFIG_FSL_CORENET
 737        set_liodns();
 738#ifdef CONFIG_SYS_DPAA_QBMAN
 739        setup_qbman_portals();
 740#endif
 741#endif
 742
 743        l2cache_init();
 744#if defined(CONFIG_RAMBOOT_PBL)
 745        disable_cpc_sram();
 746#endif
 747        enable_cpc();
 748#if defined(T1040_TDM_QUIRK_CCSR_BASE)
 749        enable_tdm_law();
 750#endif
 751
 752#ifndef CONFIG_SYS_FSL_NO_SERDES
 753        /* needs to be in ram since code uses global static vars */
 754        fsl_serdes_init();
 755#endif
 756
 757#ifdef CONFIG_SYS_FSL_ERRATUM_SEC_A003571
 758#define MCFGR_AXIPIPE 0x000000f0
 759        if (IS_SVR_REV(svr, 1, 0))
 760                sec_clrbits32(&sec->mcfgr, MCFGR_AXIPIPE);
 761#endif
 762
 763#ifdef CONFIG_SYS_FSL_ERRATUM_A005871
 764        if (IS_SVR_REV(svr, 1, 0)) {
 765                int i;
 766                __be32 *p = (void __iomem *)CFG_SYS_DCSRBAR + 0xb004c;
 767
 768                for (i = 0; i < 12; i++) {
 769                        p += i + (i > 5 ? 11 : 0);
 770                        out_be32(p, 0x2);
 771                }
 772                p = (void __iomem *)CFG_SYS_DCSRBAR + 0xb0108;
 773                out_be32(p, 0x34);
 774        }
 775#endif
 776
 777#ifdef CONFIG_SYS_SRIO
 778        srio_init();
 779#ifdef CONFIG_SRIO_PCIE_BOOT_MASTER
 780        char *s = env_get("bootmaster");
 781        if (s) {
 782                if (!strcmp(s, "SRIO1")) {
 783                        srio_boot_master(1);
 784                        srio_boot_master_release_slave(1);
 785                }
 786                if (!strcmp(s, "SRIO2")) {
 787                        srio_boot_master(2);
 788                        srio_boot_master_release_slave(2);
 789                }
 790        }
 791#endif
 792#endif
 793
 794#if defined(CONFIG_MP)
 795        setup_mp();
 796#endif
 797
 798#ifdef CONFIG_SYS_FSL_ERRATUM_ESDHC13
 799        {
 800                if (SVR_MAJ(svr) < 3) {
 801                        void *p;
 802                        p = (void *)CFG_SYS_DCSRBAR + 0x20520;
 803                        setbits_be32(p, 1 << (31 - 14));
 804                }
 805        }
 806#endif
 807
 808#ifdef CFG_SYS_LBC_LCRR
 809        /*
 810         * Modify the CLKDIV field of LCRR register to improve the writing
 811         * speed for NOR flash.
 812         */
 813        clrsetbits_be32(&lbc->lcrr, LCRR_CLKDIV, CFG_SYS_LBC_LCRR);
 814        __raw_readl(&lbc->lcrr);
 815        isync();
 816#ifdef CONFIG_SYS_FSL_ERRATUM_NMG_LBC103
 817        udelay(100);
 818#endif
 819#endif
 820
 821#ifdef CONFIG_SYS_FSL_USB1_PHY_ENABLE
 822        {
 823                struct ccsr_usb_phy __iomem *usb_phy1 =
 824                        (void *)CFG_SYS_MPC85xx_USB1_PHY_ADDR;
 825#ifdef CONFIG_SYS_FSL_ERRATUM_A006261
 826                if (has_erratum_a006261())
 827                        fsl_erratum_a006261_workaround(usb_phy1);
 828#endif
 829                out_be32(&usb_phy1->usb_enable_override,
 830                                CFG_SYS_FSL_USB_ENABLE_OVERRIDE);
 831        }
 832#endif
 833#ifdef CONFIG_SYS_FSL_USB2_PHY_ENABLE
 834        {
 835                struct ccsr_usb_phy __iomem *usb_phy2 =
 836                        (void *)CFG_SYS_MPC85xx_USB2_PHY_ADDR;
 837#ifdef CONFIG_SYS_FSL_ERRATUM_A006261
 838                if (has_erratum_a006261())
 839                        fsl_erratum_a006261_workaround(usb_phy2);
 840#endif
 841                out_be32(&usb_phy2->usb_enable_override,
 842                                CFG_SYS_FSL_USB_ENABLE_OVERRIDE);
 843        }
 844#endif
 845
 846#ifdef CONFIG_SYS_FSL_ERRATUM_USB14
 847        /* On P204x/P304x/P50x0 Rev1.0, USB transmit will result internal
 848         * multi-bit ECC errors which has impact on performance, so software
 849         * should disable all ECC reporting from USB1 and USB2.
 850         */
 851        if (IS_SVR_REV(get_svr(), 1, 0)) {
 852                struct dcsr_dcfg_regs *dcfg = (struct dcsr_dcfg_regs *)
 853                        (CFG_SYS_DCSRBAR + CFG_SYS_DCSR_DCFG_OFFSET);
 854                setbits_be32(&dcfg->ecccr1,
 855                                (DCSR_DCFG_ECC_DISABLE_USB1 |
 856                                 DCSR_DCFG_ECC_DISABLE_USB2));
 857        }
 858#endif
 859
 860#if defined(CONFIG_SYS_FSL_USB_DUAL_PHY_ENABLE)
 861                struct ccsr_usb_phy __iomem *usb_phy =
 862                        (void *)CFG_SYS_MPC85xx_USB1_PHY_ADDR;
 863                setbits_be32(&usb_phy->pllprg[1],
 864                             CFG_SYS_FSL_USB_PLLPRG2_PHY2_CLK_EN |
 865                             CFG_SYS_FSL_USB_PLLPRG2_PHY1_CLK_EN |
 866                             CFG_SYS_FSL_USB_PLLPRG2_MFI |
 867                             CFG_SYS_FSL_USB_PLLPRG2_PLL_EN);
 868#ifdef CONFIG_SYS_FSL_SINGLE_SOURCE_CLK
 869                usb_single_source_clk_configure(usb_phy);
 870#endif
 871                setbits_be32(&usb_phy->port1.ctrl,
 872                             CFG_SYS_FSL_USB_CTRL_PHY_EN);
 873                setbits_be32(&usb_phy->port1.drvvbuscfg,
 874                             CFG_SYS_FSL_USB_DRVVBUS_CR_EN);
 875                setbits_be32(&usb_phy->port1.pwrfltcfg,
 876                             CFG_SYS_FSL_USB_PWRFLT_CR_EN);
 877                setbits_be32(&usb_phy->port2.ctrl,
 878                             CFG_SYS_FSL_USB_CTRL_PHY_EN);
 879                setbits_be32(&usb_phy->port2.drvvbuscfg,
 880                             CFG_SYS_FSL_USB_DRVVBUS_CR_EN);
 881                setbits_be32(&usb_phy->port2.pwrfltcfg,
 882                             CFG_SYS_FSL_USB_PWRFLT_CR_EN);
 883
 884#ifdef CONFIG_SYS_FSL_ERRATUM_A006261
 885                if (has_erratum_a006261())
 886                        fsl_erratum_a006261_workaround(usb_phy);
 887#endif
 888
 889#endif /* CONFIG_SYS_FSL_USB_DUAL_PHY_ENABLE */
 890
 891#ifdef CONFIG_SYS_FSL_ERRATUM_A009942
 892        erratum_a009942_check_cpo();
 893#endif
 894
 895#ifdef CONFIG_FMAN_ENET
 896#ifndef CONFIG_DM_ETH
 897        fman_enet_init();
 898#endif
 899#endif
 900
 901#if defined(CONFIG_NXP_ESBC) && defined(CONFIG_FSL_CORENET)
 902        if (pamu_init() < 0)
 903                fsl_secboot_handle_error(ERROR_ESBC_PAMU_INIT);
 904#endif
 905
 906#ifdef CONFIG_FSL_CAAM
 907#if defined(CONFIG_ARCH_C29X)
 908        if ((SVR_SOC_VER(svr) == SVR_C292) ||
 909            (SVR_SOC_VER(svr) == SVR_C293))
 910                sec_init_idx(1);
 911
 912        if (SVR_SOC_VER(svr) == SVR_C293)
 913                sec_init_idx(2);
 914#endif
 915#endif
 916
 917#if defined(CONFIG_FSL_SATA_V2) && defined(CONFIG_SYS_FSL_ERRATUM_SATA_A001)
 918        /*
 919         * For P1022/1013 Rev1.0 silicon, after power on SATA host
 920         * controller is configured in legacy mode instead of the
 921         * expected enterprise mode. Software needs to clear bit[28]
 922         * of HControl register to change to enterprise mode from
 923         * legacy mode.  We assume that the controller is offline.
 924         */
 925        if (IS_SVR_REV(svr, 1, 0) &&
 926            ((SVR_SOC_VER(svr) == SVR_P1022) ||
 927             (SVR_SOC_VER(svr) == SVR_P1013))) {
 928                fsl_sata_reg_t *reg;
 929
 930                /* first SATA controller */
 931                reg = (void *)CFG_SYS_MPC85xx_SATA1_ADDR;
 932                clrbits_le32(&reg->hcontrol, HCONTROL_ENTERPRISE_EN);
 933
 934                /* second SATA controller */
 935                reg = (void *)CFG_SYS_MPC85xx_SATA2_ADDR;
 936                clrbits_le32(&reg->hcontrol, HCONTROL_ENTERPRISE_EN);
 937        }
 938#endif
 939
 940        init_used_tlb_cams();
 941
 942        return 0;
 943}
 944
 945#ifdef CONFIG_ARCH_MISC_INIT
 946int arch_misc_init(void)
 947{
 948        if (IS_ENABLED(CONFIG_FSL_CAAM)) {
 949                struct udevice *dev;
 950                int ret;
 951
 952                ret = uclass_get_device_by_driver(UCLASS_MISC, DM_DRIVER_GET(caam_jr), &dev);
 953                if (ret)
 954                        printf("Failed to initialize caam_jr: %d\n", ret);
 955        }
 956
 957        return 0;
 958}
 959#endif
 960
 961void arch_preboot_os(void)
 962{
 963        u32 msr;
 964
 965        /*
 966         * We are changing interrupt offsets and are about to boot the OS so
 967         * we need to make sure we disable all async interrupts. EE is already
 968         * disabled by the time we get called.
 969         */
 970        msr = mfmsr();
 971        msr &= ~(MSR_ME|MSR_CE);
 972        mtmsr(msr);
 973}
 974
 975int cpu_secondary_init_r(void)
 976{
 977#ifdef CONFIG_QE
 978#ifdef CONFIG_U_QE
 979        uint qe_base = CONFIG_SYS_IMMR + 0x00140000; /* QE immr base */
 980#else
 981        uint qe_base = CONFIG_SYS_IMMR + 0x00080000; /* QE immr base */
 982#endif
 983
 984        qe_init(qe_base);
 985        qe_reset();
 986#endif
 987
 988        return 0;
 989}
 990
 991#ifdef CONFIG_BOARD_LATE_INIT
 992int board_late_init(void)
 993{
 994#ifdef CONFIG_CHAIN_OF_TRUST
 995        fsl_setenv_chain_of_trust();
 996#endif
 997
 998        return 0;
 999}
1000#endif
1001