uboot/arch/powerpc/cpu/mpc85xx/start.S
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0+ */
   2/*
   3 * Copyright 2004, 2007-2012 Freescale Semiconductor, Inc.
   4 * Copyright (C) 2003  Motorola,Inc.
   5 */
   6
   7/* U-Boot Startup Code for Motorola 85xx PowerPC based Embedded Boards
   8 *
   9 * The processor starts at 0xfffffffc and the code is first executed in the
  10 * last 4K page(0xfffff000-0xffffffff) in flash/rom.
  11 *
  12 */
  13
  14#include <asm-offsets.h>
  15#include <config.h>
  16#include <mpc85xx.h>
  17#include <system-constants.h>
  18
  19#include <ppc_asm.tmpl>
  20#include <ppc_defs.h>
  21
  22#include <asm/cache.h>
  23#include <asm/mmu.h>
  24
  25#undef  MSR_KERNEL
  26#define MSR_KERNEL ( MSR_ME )   /* Machine Check */
  27
  28#define LAW_EN          0x80000000
  29
  30#if defined(CONFIG_NAND_SPL) || \
  31        (defined(CONFIG_SPL_BUILD) && CONFIG_IS_ENABLED(INIT_MINIMAL))
  32#define MINIMAL_SPL
  33#endif
  34
  35#if !defined(CONFIG_SPL) && !defined(CONFIG_SYS_RAMBOOT) && \
  36        !defined(CONFIG_NXP_ESBC) && !defined(CONFIG_SRIO_PCIE_BOOT_SLAVE)
  37#define NOR_BOOT
  38#endif
  39
  40/*
  41 * Set up GOT: Global Offset Table
  42 *
  43 * Use r12 to access the GOT
  44 */
  45        START_GOT
  46        GOT_ENTRY(_GOT2_TABLE_)
  47        GOT_ENTRY(_FIXUP_TABLE_)
  48
  49#ifndef MINIMAL_SPL
  50        GOT_ENTRY(_start_of_vectors)
  51        GOT_ENTRY(_end_of_vectors)
  52        GOT_ENTRY(transfer_to_handler)
  53#endif
  54
  55        GOT_ENTRY(__init_end)
  56        GOT_ENTRY(__bss_end)
  57        GOT_ENTRY(__bss_start)
  58        END_GOT
  59
  60#ifdef CONFIG_FSL_PREPBL_ESDHC_BOOT_SECTOR
  61#if !defined(CONFIG_SPL) || defined(CONFIG_SPL_BUILD)
  62
  63/* Maximal size of the image */
  64#ifdef CONFIG_SPL_BUILD
  65#define MAX_IMAGE_SIZE (CONFIG_SPL_MAX_SIZE - (CONFIG_FSL_PREPBL_ESDHC_BOOT_SECTOR_DATA * 512))
  66#else
  67#define MAX_IMAGE_SIZE CONFIG_SYS_L2_SIZE
  68#endif
  69
  70#if defined(CONFIG_SPL_BUILD) && CONFIG_SPL_MAX_SIZE < CONFIG_FSL_PREPBL_ESDHC_BOOT_SECTOR_DATA * 512
  71#error "CONFIG_SPL_MAX_SIZE is too small for CONFIG_FSL_PREPBL_ESDHC_BOOT_SECTOR_DATA"
  72#endif
  73
  74#if MAX_IMAGE_SIZE > CONFIG_SYS_L2_SIZE
  75#error "Image is too big"
  76#endif
  77
  78#define DIV_ROUND_UP(a, b) (((a) + (b) - 1) / (b))
  79#define ALIGN(x, a) (DIV_ROUND_UP(x, a) * (a))
  80
  81/* Definitions from C header file asm/immap_85xx.h */
  82
  83#define CFG_SYS_MPC85xx_L2_OFFSET               0x20000
  84
  85#define MPC85xx_L2CTL                           0x000
  86#define MPC85xx_L2CTL_L2E                       0x80000000
  87#define MPC85xx_L2CTL_L2SRAM_ENTIRE             0x00010000
  88
  89#define MPC85xx_L2SRBAR0                        0x100
  90
  91#define MPC85xx_L2ERRDIS                        0xe44
  92#define MPC85xx_L2ERRDIS_MBECC                  0x00000008
  93#define MPC85xx_L2ERRDIS_SBECC                  0x00000004
  94
  95/* Definitions from C header file fsl_esdhc.h */
  96
  97#define ESDHCCTL                                0x0002e40c
  98#define ESDHCCTL_SNOOP                          0x00000040
  99
 100/*
 101 * QorIQ pre-PBL eSDHC boot sector:
 102 * Instruct BootROM to configure L2 SRAM and eSDHC then load image
 103 * from SD card into L2 SRAM and finally jump to image entry point.
 104 */
 105        .section .bootsect, "a"
 106        .globl bootsect
 107
 108bootsect:
 109        .org 0x40 /* BOOT signature */
 110        .ascii "BOOT"
 111
 112        .org 0x48 /* Number of bytes to be copied, must be multiple of block size (512) */
 113        .long ALIGN(MAX_IMAGE_SIZE, 512)
 114
 115        .org 0x50 /* Source address from the beginning of boot sector in byte address format, must be multiple of block size (512) */
 116        .long (CONFIG_FSL_PREPBL_ESDHC_BOOT_SECTOR_START + CONFIG_FSL_PREPBL_ESDHC_BOOT_SECTOR_DATA) * 512
 117
 118        .org 0x58 /* Target address in the system's local memory address space */
 119        .long CONFIG_SYS_MONITOR_BASE
 120
 121        .org 0x60 /* Execution starting address */
 122        .long _start
 123
 124        .org 0x68 /* Number of configuration data pairs */
 125        .long DIV_ROUND_UP(.Lconf_pair_end - .Lconf_pair_start, 8)
 126
 127        .org 0x80 /* Start of configuration */
 128        .Lconf_pair_start:
 129
 130        .long CONFIG_SYS_CCSRBAR_DEFAULT + CFG_SYS_MPC85xx_L2_OFFSET + MPC85xx_L2SRBAR0 /* Address: L2 memory-mapped SRAM base addr 0 */
 131        .long CONFIG_SYS_INIT_L2_ADDR
 132
 133        .long CONFIG_SYS_CCSRBAR_DEFAULT + CFG_SYS_MPC85xx_L2_OFFSET + MPC85xx_L2ERRDIS /* Address: L2 cache error disable */
 134        .long MPC85xx_L2ERRDIS_MBECC | MPC85xx_L2ERRDIS_SBECC
 135
 136        .long CONFIG_SYS_CCSRBAR_DEFAULT + CFG_SYS_MPC85xx_L2_OFFSET + MPC85xx_L2CTL /* Address: L2 configuration 0 */
 137        .long MPC85xx_L2CTL_L2E | MPC85xx_L2CTL_L2SRAM_ENTIRE
 138
 139        .long CONFIG_SYS_CCSRBAR_DEFAULT + ESDHCCTL /* Address: eSDHC DMA control */
 140        .long ESDHCCTL_SNOOP
 141
 142        .long 0x40000001 /* Command: Delay in 8 CCB clocks */
 143        .long 256
 144
 145        .long 0x80000001 /* End of configuration */
 146        .Lconf_pair_end:
 147
 148        .org 0x1b8 /* Reserved for MBR/DBR */
 149        .org 0x200 /* End of boot sector */
 150
 151#endif
 152#endif
 153
 154/*
 155 * e500 Startup -- after reset only the last 4KB of the effective
 156 * address space is mapped in the MMU L2 TLB1 Entry0. The .bootpg
 157 * section is located at THIS LAST page and basically does three
 158 * things: clear some registers, set up exception tables and
 159 * add more TLB entries for 'larger spaces'(e.g. the boot rom) to
 160 * continue the boot procedure.
 161
 162 * Once the boot rom is mapped by TLB entries we can proceed
 163 * with normal startup.
 164 *
 165 */
 166
 167        .section .bootpg,"ax"
 168        .globl _start
 169
 170_start:
 171/* Enable debug exception */
 172        li      r1,MSR_DE
 173        mtmsr   r1
 174
 175        /*
 176         * If we got an ePAPR device tree pointer passed in as r3, we need that
 177         * later in cpu_init_early_f(). Save it to a safe register before we
 178         * clobber it so that we can fetch it from there later.
 179         */
 180        mr      r24, r3
 181
 182#ifdef CONFIG_SYS_FSL_ERRATUM_A004510
 183        mfspr   r3,SPRN_SVR
 184        rlwinm  r3,r3,0,0xff
 185        li      r4,CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV
 186        cmpw    r3,r4
 187        beq     1f
 188
 189#ifdef CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV2
 190        li      r4,CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV2
 191        cmpw    r3,r4
 192        beq     1f
 193#endif
 194
 195        /* Not a supported revision affected by erratum */
 196        li      r27,0
 197        b       2f
 198
 1991:      li      r27,1   /* Remember for later that we have the erratum */
 200        /* Erratum says set bits 55:60 to 001001 */
 201        msync
 202        isync
 203        mfspr   r3,SPRN_HDBCR0
 204        li      r4,0x48
 205        rlwimi  r3,r4,0,0x1f8
 206        mtspr   SPRN_HDBCR0,r3
 207        isync
 2082:
 209#endif
 210#ifdef CONFIG_SYS_FSL_ERRATUM_A005125
 211        msync
 212        isync
 213        mfspr   r3, SPRN_HDBCR0
 214        oris    r3, r3, 0x0080
 215        mtspr   SPRN_HDBCR0, r3
 216#endif
 217
 218
 219#if defined(CONFIG_NXP_ESBC) && defined(CONFIG_E500MC) && \
 220        !defined(CONFIG_E6500)
 221        /* ISBC uses L2 as stack.
 222         * Disable L2 cache here so that u-boot can enable it later
 223         * as part of it's normal flow
 224        */
 225
 226        /* Check if L2 is enabled */
 227        mfspr   r3, SPRN_L2CSR0
 228        lis     r2, L2CSR0_L2E@h
 229        ori     r2, r2, L2CSR0_L2E@l
 230        and.    r4, r3, r2
 231        beq     l2_disabled
 232
 233        mfspr r3, SPRN_L2CSR0
 234        /* Flush L2 cache */
 235        lis     r2,(L2CSR0_L2FL)@h
 236        ori     r2, r2, (L2CSR0_L2FL)@l
 237        or      r3, r2, r3
 238        sync
 239        isync
 240        mtspr   SPRN_L2CSR0,r3
 241        isync
 2421:
 243        mfspr r3, SPRN_L2CSR0
 244        and. r1, r3, r2
 245        bne 1b
 246
 247        mfspr r3, SPRN_L2CSR0
 248        lis r2, L2CSR0_L2E@h
 249        ori r2, r2, L2CSR0_L2E@l
 250        andc r4, r3, r2
 251        sync
 252        isync
 253        mtspr SPRN_L2CSR0,r4
 254        isync
 255
 256l2_disabled:
 257#endif
 258
 259/* clear registers/arrays not reset by hardware */
 260
 261        /* L1 */
 262        li      r0,2
 263        mtspr   L1CSR0,r0       /* invalidate d-cache */
 264        mtspr   L1CSR1,r0       /* invalidate i-cache */
 265
 266        mfspr   r1,DBSR
 267        mtspr   DBSR,r1         /* Clear all valid bits */
 268
 269
 270        .macro  create_tlb1_entry esel ts tsize epn wimg rpn perm phy_high scratch
 271        lis     \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@h
 272        ori     \scratch, \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@l
 273        mtspr   MAS0, \scratch
 274        lis     \scratch, FSL_BOOKE_MAS1(1, 1, 0, \ts, \tsize)@h
 275        ori     \scratch, \scratch, FSL_BOOKE_MAS1(1, 1, 0, \ts, \tsize)@l
 276        mtspr   MAS1, \scratch
 277        lis     \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@h
 278        ori     \scratch, \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@l
 279        mtspr   MAS2, \scratch
 280        lis     \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@h
 281        ori     \scratch, \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@l
 282        mtspr   MAS3, \scratch
 283        lis     \scratch, \phy_high@h
 284        ori     \scratch, \scratch, \phy_high@l
 285        mtspr   MAS7, \scratch
 286        isync
 287        msync
 288        tlbwe
 289        isync
 290        .endm
 291
 292        .macro  create_tlb0_entry esel ts tsize epn wimg rpn perm phy_high scratch
 293        lis     \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@h
 294        ori     \scratch, \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@l
 295        mtspr   MAS0, \scratch
 296        lis     \scratch, FSL_BOOKE_MAS1(1, 0, 0, \ts, \tsize)@h
 297        ori     \scratch, \scratch, FSL_BOOKE_MAS1(1, 0, 0, \ts, \tsize)@l
 298        mtspr   MAS1, \scratch
 299        lis     \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@h
 300        ori     \scratch, \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@l
 301        mtspr   MAS2, \scratch
 302        lis     \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@h
 303        ori     \scratch, \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@l
 304        mtspr   MAS3, \scratch
 305        lis     \scratch, \phy_high@h
 306        ori     \scratch, \scratch, \phy_high@l
 307        mtspr   MAS7, \scratch
 308        isync
 309        msync
 310        tlbwe
 311        isync
 312        .endm
 313
 314        .macro  delete_tlb1_entry esel scratch
 315        lis     \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@h
 316        ori     \scratch, \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@l
 317        mtspr   MAS0, \scratch
 318        li      \scratch, 0
 319        mtspr   MAS1, \scratch
 320        isync
 321        msync
 322        tlbwe
 323        isync
 324        .endm
 325
 326        .macro  delete_tlb0_entry esel epn wimg scratch
 327        lis     \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@h
 328        ori     \scratch, \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@l
 329        mtspr   MAS0, \scratch
 330        li      \scratch, 0
 331        mtspr   MAS1, \scratch
 332        lis     \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@h
 333        ori     \scratch, \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@l
 334        mtspr   MAS2, \scratch
 335        isync
 336        msync
 337        tlbwe
 338        isync
 339        .endm
 340
 341/* Interrupt vectors do not fit in minimal SPL. */
 342#if !defined(MINIMAL_SPL)
 343        /* Setup interrupt vectors */
 344        lis     r1,CONFIG_VAL(SYS_MONITOR_BASE)@h
 345        mtspr   IVPR,r1
 346
 347        li      r4,CriticalInput@l
 348        mtspr   IVOR0,r4        /* 0: Critical input */
 349        li      r4,MachineCheck@l
 350        mtspr   IVOR1,r4        /* 1: Machine check */
 351        li      r4,DataStorage@l
 352        mtspr   IVOR2,r4        /* 2: Data storage */
 353        li      r4,InstStorage@l
 354        mtspr   IVOR3,r4        /* 3: Instruction storage */
 355        li      r4,ExtInterrupt@l
 356        mtspr   IVOR4,r4        /* 4: External interrupt */
 357        li      r4,Alignment@l
 358        mtspr   IVOR5,r4        /* 5: Alignment */
 359        li      r4,ProgramCheck@l
 360        mtspr   IVOR6,r4        /* 6: Program check */
 361        li      r4,FPUnavailable@l
 362        mtspr   IVOR7,r4        /* 7: floating point unavailable */
 363        li      r4,SystemCall@l
 364        mtspr   IVOR8,r4        /* 8: System call */
 365        /* 9: Auxiliary processor unavailable(unsupported) */
 366        li      r4,Decrementer@l
 367        mtspr   IVOR10,r4       /* 10: Decrementer */
 368        li      r4,IntervalTimer@l
 369        mtspr   IVOR11,r4       /* 11: Interval timer */
 370        li      r4,WatchdogTimer@l
 371        mtspr   IVOR12,r4       /* 12: Watchdog timer */
 372        li      r4,DataTLBError@l
 373        mtspr   IVOR13,r4       /* 13: Data TLB error */
 374        li      r4,InstructionTLBError@l
 375        mtspr   IVOR14,r4       /* 14: Instruction TLB error */
 376        li      r4,DebugBreakpoint@l
 377        mtspr   IVOR15,r4       /* 15: Debug */
 378#endif
 379
 380        /* Clear and set up some registers. */
 381        li      r0,0x0000
 382        lis     r1,0xffff
 383        mtspr   DEC,r0                  /* prevent dec exceptions */
 384        mttbl   r0                      /* prevent fit & wdt exceptions */
 385        mttbu   r0
 386        mtspr   TSR,r1                  /* clear all timer exception status */
 387        mtspr   TCR,r0                  /* disable all */
 388        mtspr   ESR,r0                  /* clear exception syndrome register */
 389        mtspr   MCSR,r0                 /* machine check syndrome register */
 390        mtxer   r0                      /* clear integer exception register */
 391
 392#ifdef CONFIG_SYS_BOOK3E_HV
 393        mtspr   MAS8,r0                 /* make sure MAS8 is clear */
 394#endif
 395
 396        /* Enable Time Base and Select Time Base Clock */
 397        lis     r0,HID0_EMCP@h          /* Enable machine check */
 398#if defined(CONFIG_ENABLE_36BIT_PHYS)
 399        ori     r0,r0,HID0_ENMAS7@l     /* Enable MAS7 */
 400#endif
 401#ifndef CONFIG_E500MC
 402        ori     r0,r0,HID0_TBEN@l       /* Enable Timebase */
 403#endif
 404        mtspr   HID0,r0
 405
 406#if !defined(CONFIG_E500MC) && !defined(CONFIG_ARCH_QEMU_E500)
 407        li      r0,(HID1_ASTME|HID1_ABE)@l      /* Addr streaming & broadcast */
 408        mfspr   r3,PVR
 409        andi.   r3,r3, 0xff
 410        cmpwi   r3,0x50@l       /* if we are rev 5.0 or greater set MBDD */
 411        blt 1f
 412        /* Set MBDD bit also */
 413        ori r0, r0, HID1_MBDD@l
 4141:
 415        mtspr   HID1,r0
 416#endif
 417
 418#ifdef CONFIG_SYS_FSL_ERRATUM_CPU_A003999
 419        mfspr   r3,SPRN_HDBCR1
 420        oris    r3,r3,0x0100
 421        mtspr   SPRN_HDBCR1,r3
 422#endif
 423
 424        /* Enable Branch Prediction */
 425#if defined(CONFIG_BTB)
 426        lis     r0,BUCSR_ENABLE@h
 427        ori     r0,r0,BUCSR_ENABLE@l
 428        mtspr   SPRN_BUCSR,r0
 429#endif
 430
 431#if defined(CONFIG_SYS_INIT_DBCR)
 432        lis     r1,0xffff
 433        ori     r1,r1,0xffff
 434        mtspr   DBSR,r1                 /* Clear all status bits */
 435        lis     r0,CONFIG_SYS_INIT_DBCR@h       /* DBCR0[IDM] must be set */
 436        ori     r0,r0,CONFIG_SYS_INIT_DBCR@l
 437        mtspr   DBCR0,r0
 438#endif
 439
 440/*
 441 * Search for the TLB that covers the code we're executing, and shrink it
 442 * so that it covers only this 4K page.  That will ensure that any other
 443 * TLB we create won't interfere with it.  We assume that the TLB exists,
 444 * which is why we don't check the Valid bit of MAS1.  We also assume
 445 * it is in TLB1.
 446 *
 447 * This is necessary, for example, when booting from the on-chip ROM,
 448 * which (oddly) creates a single 4GB TLB that covers CCSR and DDR.
 449 */
 450        bl      nexti           /* Find our address */
 451nexti:  mflr    r1              /* R1 = our PC */
 452        li      r2, 0
 453        mtspr   MAS6, r2        /* Assume the current PID and AS are 0 */
 454        isync
 455        msync
 456        tlbsx   0, r1           /* This must succeed */
 457
 458        mfspr   r14, MAS0       /* Save ESEL for later */
 459        rlwinm  r14, r14, 16, 0xfff
 460
 461        /* Set the size of the TLB to 4KB */
 462        mfspr   r3, MAS1
 463        li      r2, 0xF80
 464        andc    r3, r3, r2      /* Clear the TSIZE bits */
 465        ori     r3, r3, MAS1_TSIZE(BOOKE_PAGESZ_4K)@l
 466        oris    r3, r3, MAS1_IPROT@h
 467        mtspr   MAS1, r3
 468
 469        /*
 470         * Set the base address of the TLB to our PC.  We assume that
 471         * virtual == physical.  We also assume that MAS2_EPN == MAS3_RPN.
 472         */
 473        lis     r3, MAS2_EPN@h
 474        ori     r3, r3, MAS2_EPN@l      /* R3 = MAS2_EPN */
 475
 476        and     r1, r1, r3      /* Our PC, rounded down to the nearest page */
 477
 478        mfspr   r2, MAS2
 479        andc    r2, r2, r3
 480        or      r2, r2, r1
 481#ifdef CONFIG_SYS_FSL_ERRATUM_A004510
 482        cmpwi   r27,0
 483        beq     1f
 484        andi.   r15, r2, MAS2_I|MAS2_G /* save the old I/G for later */
 485        rlwinm  r2, r2, 0, ~MAS2_I
 486        ori     r2, r2, MAS2_G
 4871:
 488#endif
 489        mtspr   MAS2, r2        /* Set the EPN to our PC base address */
 490
 491        mfspr   r2, MAS3
 492        andc    r2, r2, r3
 493        or      r2, r2, r1
 494        mtspr   MAS3, r2        /* Set the RPN to our PC base address */
 495
 496        isync
 497        msync
 498        tlbwe
 499
 500/*
 501 * Clear out any other TLB entries that may exist, to avoid conflicts.
 502 * Our TLB entry is in r14.
 503 */
 504        li      r0, TLBIVAX_ALL | TLBIVAX_TLB0
 505        tlbivax 0, r0
 506        tlbsync
 507
 508        mfspr   r4, SPRN_TLB1CFG
 509        rlwinm  r4, r4, 0, TLBnCFG_NENTRY_MASK
 510
 511        li      r3, 0
 512        mtspr   MAS1, r3
 5131:      cmpw    r3, r14
 514        rlwinm  r5, r3, 16, MAS0_ESEL_MSK
 515        addi    r3, r3, 1
 516        beq     2f              /* skip the entry we're executing from */
 517
 518        oris    r5, r5, MAS0_TLBSEL(1)@h
 519        mtspr   MAS0, r5
 520
 521        isync
 522        tlbwe
 523        isync
 524        msync
 525
 5262:      cmpw    r3, r4
 527        blt     1b
 528
 529#if defined(CONFIG_SYS_PPC_E500_DEBUG_TLB) && !defined(MINIMAL_SPL) && \
 530        !defined(CONFIG_NXP_ESBC)
 531/*
 532 * TLB entry for debuggging in AS1
 533 * Create temporary TLB entry in AS0 to handle debug exception
 534 * As on debug exception MSR is cleared i.e. Address space is changed
 535 * to 0. A TLB entry (in AS0) is required to handle debug exception generated
 536 * in AS1.
 537 */
 538
 539#ifdef NOR_BOOT
 540/*
 541 * TLB entry is created for IVPR + IVOR15 to map on valid OP code address
 542 * bacause flash's virtual address maps to 0xff800000 - 0xffffffff.
 543 * and this window is outside of 4K boot window.
 544 */
 545        create_tlb1_entry CONFIG_SYS_PPC_E500_DEBUG_TLB, \
 546                0, BOOKE_PAGESZ_4M, \
 547                CONFIG_VAL(SYS_MONITOR_BASE) & 0xffc00000,  MAS2_I|MAS2_G, \
 548                0xffc00000, MAS3_SX|MAS3_SW|MAS3_SR, \
 549                0, r6
 550
 551#else
 552/*
 553 * TLB entry is created for IVPR + IVOR15 to map on valid OP code address
 554 * because "nexti" will resize TLB to 4K
 555 */
 556        create_tlb1_entry CONFIG_SYS_PPC_E500_DEBUG_TLB, \
 557                0, BOOKE_PAGESZ_256K, \
 558                CONFIG_VAL(SYS_MONITOR_BASE) & 0xfffc0000, MAS2_I, \
 559                CONFIG_VAL(SYS_MONITOR_BASE) & 0xfffc0000, MAS3_SX|MAS3_SW|MAS3_SR, \
 560                0, r6
 561#endif
 562#endif
 563
 564/*
 565 * Relocate CCSR, if necessary.  We relocate CCSR if (obviously) the default
 566 * location is not where we want it.  This typically happens on a 36-bit
 567 * system, where we want to move CCSR to near the top of 36-bit address space.
 568 *
 569 * To move CCSR, we create two temporary TLBs, one for the old location, and
 570 * another for the new location.  On CoreNet systems, we also need to create
 571 * a special, temporary LAW.
 572 *
 573 * As a general rule, TLB0 is used for short-term TLBs, and TLB1 is used for
 574 * long-term TLBs, so we use TLB0 here.
 575 */
 576#if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR_PHYS)
 577
 578#if !defined(CONFIG_SYS_CCSRBAR_PHYS_HIGH) || !defined(CONFIG_SYS_CCSRBAR_PHYS_LOW)
 579#error "CONFIG_SYS_CCSRBAR_PHYS_HIGH and CONFIG_SYS_CCSRBAR_PHYS_LOW) must be defined."
 580#endif
 581
 582create_ccsr_new_tlb:
 583        /*
 584         * Create a TLB for the new location of CCSR.  Register R8 is reserved
 585         * for the virtual address of this TLB (CONFIG_SYS_CCSRBAR).
 586         */
 587        lis     r8, CONFIG_SYS_CCSRBAR@h
 588        ori     r8, r8, CONFIG_SYS_CCSRBAR@l
 589        lis     r9, (CONFIG_SYS_CCSRBAR + 0x1000)@h
 590        ori     r9, r9, (CONFIG_SYS_CCSRBAR + 0x1000)@l
 591        create_tlb0_entry 0, \
 592                0, BOOKE_PAGESZ_4K, \
 593                CONFIG_SYS_CCSRBAR, MAS2_I|MAS2_G, \
 594                CONFIG_SYS_CCSRBAR_PHYS_LOW, MAS3_SW|MAS3_SR, \
 595                CONFIG_SYS_CCSRBAR_PHYS_HIGH, r3
 596        /*
 597         * Create a TLB for the current location of CCSR.  Register R9 is reserved
 598         * for the virtual address of this TLB (CONFIG_SYS_CCSRBAR + 0x1000).
 599         */
 600create_ccsr_old_tlb:
 601        create_tlb0_entry 1, \
 602                0, BOOKE_PAGESZ_4K, \
 603                CONFIG_SYS_CCSRBAR + 0x1000, MAS2_I|MAS2_G, \
 604                CONFIG_SYS_CCSRBAR_DEFAULT, MAS3_SW|MAS3_SR, \
 605                0, r3 /* The default CCSR address is always a 32-bit number */
 606
 607
 608        /*
 609         * We have a TLB for what we think is the current (old) CCSR.  Let's
 610         * verify that, otherwise we won't be able to move it.
 611         * CONFIG_SYS_CCSRBAR_DEFAULT is always a 32-bit number, so we only
 612         * need to compare the lower 32 bits of CCSRBAR on CoreNet systems.
 613         */
 614verify_old_ccsr:
 615        lis     r0, CONFIG_SYS_CCSRBAR_DEFAULT@h
 616        ori     r0, r0, CONFIG_SYS_CCSRBAR_DEFAULT@l
 617#ifdef CONFIG_FSL_CORENET
 618        lwz     r1, 4(r9)               /* CCSRBARL */
 619#else
 620        lwz     r1, 0(r9)               /* CCSRBAR, shifted right by 12 */
 621        slwi    r1, r1, 12
 622#endif
 623
 624        cmpl    0, r0, r1
 625
 626        /*
 627         * If the value we read from CCSRBARL is not what we expect, then
 628         * enter an infinite loop.  This will at least allow a debugger to
 629         * halt execution and examine TLBs, etc.  There's no point in going
 630         * on.
 631         */
 632infinite_debug_loop:
 633        bne     infinite_debug_loop
 634
 635#ifdef CONFIG_FSL_CORENET
 636
 637#define CCSR_LAWBARH0   (CONFIG_SYS_CCSRBAR + 0x1000)
 638#define LAW_SIZE_4K     0xb
 639#define CCSRBAR_LAWAR   (LAW_EN | (0x1e << 20) | LAW_SIZE_4K)
 640#define CCSRAR_C        0x80000000      /* Commit */
 641
 642create_temp_law:
 643        /*
 644         * On CoreNet systems, we create the temporary LAW using a special LAW
 645         * target ID of 0x1e.  LAWBARH is at offset 0xc00 in CCSR.
 646         */
 647        lis     r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h
 648        ori     r0, r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l
 649        lis     r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@h
 650        ori     r1, r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@l
 651        lis     r2, CCSRBAR_LAWAR@h
 652        ori     r2, r2, CCSRBAR_LAWAR@l
 653
 654        stw     r0, 0xc00(r9)   /* LAWBARH0 */
 655        stw     r1, 0xc04(r9)   /* LAWBARL0 */
 656        sync
 657        stw     r2, 0xc08(r9)   /* LAWAR0 */
 658
 659        /*
 660         * Read back from LAWAR to ensure the update is complete.  e500mc
 661         * cores also require an isync.
 662         */
 663        lwz     r0, 0xc08(r9)   /* LAWAR0 */
 664        isync
 665
 666        /*
 667         * Read the current CCSRBARH and CCSRBARL using load word instructions.
 668         * Follow this with an isync instruction. This forces any outstanding
 669         * accesses to configuration space to completion.
 670         */
 671read_old_ccsrbar:
 672        lwz     r0, 0(r9)       /* CCSRBARH */
 673        lwz     r0, 4(r9)       /* CCSRBARL */
 674        isync
 675
 676        /*
 677         * Write the new values for CCSRBARH and CCSRBARL to their old
 678         * locations.  The CCSRBARH has a shadow register. When the CCSRBARH
 679         * has a new value written it loads a CCSRBARH shadow register. When
 680         * the CCSRBARL is written, the CCSRBARH shadow register contents
 681         * along with the CCSRBARL value are loaded into the CCSRBARH and
 682         * CCSRBARL registers, respectively.  Follow this with a sync
 683         * instruction.
 684         */
 685write_new_ccsrbar:
 686        lis     r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h
 687        ori     r0, r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l
 688        lis     r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@h
 689        ori     r1, r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@l
 690        lis     r2, CCSRAR_C@h
 691        ori     r2, r2, CCSRAR_C@l
 692
 693        stw     r0, 0(r9)       /* Write to CCSRBARH */
 694        sync                    /* Make sure we write to CCSRBARH first */
 695        stw     r1, 4(r9)       /* Write to CCSRBARL */
 696        sync
 697
 698        /*
 699         * Write a 1 to the commit bit (C) of CCSRAR at the old location.
 700         * Follow this with a sync instruction.
 701         */
 702        stw     r2, 8(r9)
 703        sync
 704
 705        /* Delete the temporary LAW */
 706delete_temp_law:
 707        li      r1, 0
 708        stw     r1, 0xc08(r8)
 709        sync
 710        stw     r1, 0xc00(r8)
 711        stw     r1, 0xc04(r8)
 712        sync
 713
 714#else /* #ifdef CONFIG_FSL_CORENET */
 715
 716write_new_ccsrbar:
 717        /*
 718         * Read the current value of CCSRBAR using a load word instruction
 719         * followed by an isync. This forces all accesses to configuration
 720         * space to complete.
 721         */
 722        sync
 723        lwz     r0, 0(r9)
 724        isync
 725
 726/* CONFIG_SYS_CCSRBAR_PHYS right shifted by 12 */
 727#define CCSRBAR_PHYS_RS12 ((CONFIG_SYS_CCSRBAR_PHYS_HIGH << 20) | \
 728                           (CONFIG_SYS_CCSRBAR_PHYS_LOW >> 12))
 729
 730        /* Write the new value to CCSRBAR. */
 731        lis     r0, CCSRBAR_PHYS_RS12@h
 732        ori     r0, r0, CCSRBAR_PHYS_RS12@l
 733        stw     r0, 0(r9)
 734        sync
 735
 736        /*
 737         * The manual says to perform a load of an address that does not
 738         * access configuration space or the on-chip SRAM using an existing TLB,
 739         * but that doesn't appear to be necessary.  We will do the isync,
 740         * though.
 741         */
 742        isync
 743
 744        /*
 745         * Read the contents of CCSRBAR from its new location, followed by
 746         * another isync.
 747         */
 748        lwz     r0, 0(r8)
 749        isync
 750
 751#endif  /* #ifdef CONFIG_FSL_CORENET */
 752
 753        /* Delete the temporary TLBs */
 754delete_temp_tlbs:
 755        delete_tlb0_entry 0, CONFIG_SYS_CCSRBAR, MAS2_I|MAS2_G, r3
 756        delete_tlb0_entry 1, CONFIG_SYS_CCSRBAR + 0x1000, MAS2_I|MAS2_G, r3
 757
 758#endif /* #if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR_PHYS) */
 759
 760#if defined(CONFIG_SYS_FSL_QORIQ_CHASSIS2) && defined(CONFIG_E6500)
 761create_ccsr_l2_tlb:
 762        /*
 763         * Create a TLB for the MMR location of CCSR
 764         * to access L2CSR0 register
 765         */
 766        create_tlb0_entry 0, \
 767                0, BOOKE_PAGESZ_4K, \
 768                CONFIG_SYS_CCSRBAR + 0xC20000, MAS2_I|MAS2_G, \
 769                CONFIG_SYS_CCSRBAR_PHYS_LOW + 0xC20000, MAS3_SW|MAS3_SR, \
 770                CONFIG_SYS_CCSRBAR_PHYS_HIGH, r3
 771
 772enable_l2_cluster_l2:
 773        /* enable L2 cache */
 774        lis     r3, (CONFIG_SYS_CCSRBAR + 0xC20000)@h
 775        ori     r3, r3, (CONFIG_SYS_CCSRBAR + 0xC20000)@l
 776        li      r4, 33  /* stash id */
 777        stw     r4, 4(r3)
 778        lis     r4, (L2CSR0_L2FI|L2CSR0_L2LFC)@h
 779        ori     r4, r4, (L2CSR0_L2FI|L2CSR0_L2LFC)@l
 780        sync
 781        stw     r4, 0(r3)       /* invalidate L2 */
 782        /* Poll till the bits are cleared */
 7831:      sync
 784        lwz     r0, 0(r3)
 785        twi     0, r0, 0
 786        isync
 787        and.    r1, r0, r4
 788        bne     1b
 789
 790        /* L2PE must be set before L2 cache is enabled */
 791        lis     r4, (L2CSR0_L2PE)@h
 792        ori     r4, r4, (L2CSR0_L2PE)@l
 793        sync
 794        stw     r4, 0(r3)       /* enable L2 parity/ECC error checking */
 795        /* Poll till the bit is set */
 7961:      sync
 797        lwz     r0, 0(r3)
 798        twi     0, r0, 0
 799        isync
 800        and.    r1, r0, r4
 801        beq     1b
 802
 803        lis     r4, (L2CSR0_L2E|L2CSR0_L2PE)@h
 804        ori     r4, r4, (L2CSR0_L2REP_MODE)@l
 805        sync
 806        stw     r4, 0(r3)       /* enable L2 */
 807        /* Poll till the bit is set */
 8081:      sync
 809        lwz     r0, 0(r3)
 810        twi     0, r0, 0
 811        isync
 812        and.    r1, r0, r4
 813        beq     1b
 814
 815delete_ccsr_l2_tlb:
 816        delete_tlb0_entry 0, CONFIG_SYS_CCSRBAR + 0xC20000, MAS2_I|MAS2_G, r3
 817#endif
 818
 819        /*
 820         * Enable the L1. On e6500, this has to be done
 821         * after the L2 is up.
 822         */
 823
 824#ifdef CONFIG_SYS_CACHE_STASHING
 825        /* set stash id to (coreID) * 2 + 32 + L1 CT (0) */
 826        li      r2,(32 + 0)
 827        mtspr   L1CSR2,r2
 828#endif
 829
 830        /* Enable/invalidate the I-Cache */
 831        lis     r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@h
 832        ori     r2,r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@l
 833        mtspr   SPRN_L1CSR1,r2
 8341:
 835        mfspr   r3,SPRN_L1CSR1
 836        and.    r1,r3,r2
 837        bne     1b
 838
 839        lis     r3,(L1CSR1_CPE|L1CSR1_ICE)@h
 840        ori     r3,r3,(L1CSR1_CPE|L1CSR1_ICE)@l
 841        mtspr   SPRN_L1CSR1,r3
 842        isync
 8432:
 844        mfspr   r3,SPRN_L1CSR1
 845        andi.   r1,r3,L1CSR1_ICE@l
 846        beq     2b
 847
 848        /* Enable/invalidate the D-Cache */
 849        lis     r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@h
 850        ori     r2,r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@l
 851        mtspr   SPRN_L1CSR0,r2
 8521:
 853        mfspr   r3,SPRN_L1CSR0
 854        and.    r1,r3,r2
 855        bne     1b
 856
 857        lis     r3,(L1CSR0_CPE|L1CSR0_DCE)@h
 858        ori     r3,r3,(L1CSR0_CPE|L1CSR0_DCE)@l
 859        mtspr   SPRN_L1CSR0,r3
 860        isync
 8612:
 862        mfspr   r3,SPRN_L1CSR0
 863        andi.   r1,r3,L1CSR0_DCE@l
 864        beq     2b
 865#ifdef CONFIG_SYS_FSL_ERRATUM_A004510
 866#define DCSR_LAWBARH0   (CONFIG_SYS_CCSRBAR + 0x1000)
 867#define LAW_SIZE_1M     0x13
 868#define DCSRBAR_LAWAR   (LAW_EN | (0x1d << 20) | LAW_SIZE_1M)
 869
 870        cmpwi   r27,0
 871        beq     9f
 872
 873        /*
 874         * Create a TLB entry for CCSR
 875         *
 876         * We're executing out of TLB1 entry in r14, and that's the only
 877         * TLB entry that exists.  To allocate some TLB entries for our
 878         * own use, flip a bit high enough that we won't flip it again
 879         * via incrementing.
 880         */
 881
 882        xori    r8, r14, 32
 883        lis     r0, MAS0_TLBSEL(1)@h
 884        rlwimi  r0, r8, 16, MAS0_ESEL_MSK
 885        lis     r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_16M)@h
 886        ori     r1, r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_16M)@l
 887        lis     r7, CONFIG_SYS_CCSRBAR@h
 888        ori     r7, r7, CONFIG_SYS_CCSRBAR@l
 889        ori     r2, r7, MAS2_I|MAS2_G
 890        lis     r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW, 0, (MAS3_SW|MAS3_SR))@h
 891        ori     r3, r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW, 0, (MAS3_SW|MAS3_SR))@l
 892        lis     r4, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h
 893        ori     r4, r4, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l
 894        mtspr   MAS0, r0
 895        mtspr   MAS1, r1
 896        mtspr   MAS2, r2
 897        mtspr   MAS3, r3
 898        mtspr   MAS7, r4
 899        isync
 900        tlbwe
 901        isync
 902        msync
 903
 904        /* Map DCSR temporarily to physical address zero */
 905        li      r0, 0
 906        lis     r3, DCSRBAR_LAWAR@h
 907        ori     r3, r3, DCSRBAR_LAWAR@l
 908
 909        stw     r0, 0xc00(r7)   /* LAWBARH0 */
 910        stw     r0, 0xc04(r7)   /* LAWBARL0 */
 911        sync
 912        stw     r3, 0xc08(r7)   /* LAWAR0 */
 913
 914        /* Read back from LAWAR to ensure the update is complete. */
 915        lwz     r3, 0xc08(r7)   /* LAWAR0 */
 916        isync
 917
 918        /* Create a TLB entry for DCSR at zero */
 919
 920        addi    r9, r8, 1
 921        lis     r0, MAS0_TLBSEL(1)@h
 922        rlwimi  r0, r9, 16, MAS0_ESEL_MSK
 923        lis     r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_1M)@h
 924        ori     r1, r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_1M)@l
 925        li      r6, 0   /* DCSR effective address */
 926        ori     r2, r6, MAS2_I|MAS2_G
 927        li      r3, MAS3_SW|MAS3_SR
 928        li      r4, 0
 929        mtspr   MAS0, r0
 930        mtspr   MAS1, r1
 931        mtspr   MAS2, r2
 932        mtspr   MAS3, r3
 933        mtspr   MAS7, r4
 934        isync
 935        tlbwe
 936        isync
 937        msync
 938
 939        /* enable the timebase */
 940#define CTBENR  0xe2084
 941        li      r3, 1
 942        addis   r4, r7, CTBENR@ha
 943        stw     r3, CTBENR@l(r4)
 944        lwz     r3, CTBENR@l(r4)
 945        twi     0,r3,0
 946        isync
 947
 948        .macro  erratum_set_ccsr offset value
 949        addis   r3, r7, \offset@ha
 950        lis     r4, \value@h
 951        addi    r3, r3, \offset@l
 952        ori     r4, r4, \value@l
 953        bl      erratum_set_value
 954        .endm
 955
 956        .macro  erratum_set_dcsr offset value
 957        addis   r3, r6, \offset@ha
 958        lis     r4, \value@h
 959        addi    r3, r3, \offset@l
 960        ori     r4, r4, \value@l
 961        bl      erratum_set_value
 962        .endm
 963
 964        erratum_set_dcsr 0xb0e08 0xe0201800
 965        erratum_set_dcsr 0xb0e18 0xe0201800
 966        erratum_set_dcsr 0xb0e38 0xe0400000
 967        erratum_set_dcsr 0xb0008 0x00900000
 968        erratum_set_dcsr 0xb0e40 0xe00a0000
 969        erratum_set_ccsr 0x18600 CFG_SYS_FSL_CORENET_SNOOPVEC_COREONLY
 970#ifdef  CONFIG_RAMBOOT_PBL
 971        erratum_set_ccsr 0x10f00 0x495e5000
 972#else
 973        erratum_set_ccsr 0x10f00 0x415e5000
 974#endif
 975        erratum_set_ccsr 0x11f00 0x415e5000
 976
 977        /* Make temp mapping uncacheable again, if it was initially */
 978        bl      2f
 9792:      mflr    r3
 980        tlbsx   0, r3
 981        mfspr   r4, MAS2
 982        rlwimi  r4, r15, 0, MAS2_I
 983        rlwimi  r4, r15, 0, MAS2_G
 984        mtspr   MAS2, r4
 985        isync
 986        tlbwe
 987        isync
 988        msync
 989
 990        /* Clear the cache */
 991        lis     r3,(L1CSR1_ICFI|L1CSR1_ICLFR)@h
 992        ori     r3,r3,(L1CSR1_ICFI|L1CSR1_ICLFR)@l
 993        sync
 994        isync
 995        mtspr   SPRN_L1CSR1,r3
 996        isync
 9972:      sync
 998        mfspr   r4,SPRN_L1CSR1
 999        and.    r4,r4,r3
1000        bne     2b
1001
1002        lis     r3,(L1CSR1_CPE|L1CSR1_ICE)@h
1003        ori     r3,r3,(L1CSR1_CPE|L1CSR1_ICE)@l
1004        sync
1005        isync
1006        mtspr   SPRN_L1CSR1,r3
1007        isync
10082:      sync
1009        mfspr   r4,SPRN_L1CSR1
1010        and.    r4,r4,r3
1011        beq     2b
1012
1013        /* Remove temporary mappings */
1014        lis     r0, MAS0_TLBSEL(1)@h
1015        rlwimi  r0, r9, 16, MAS0_ESEL_MSK
1016        li      r3, 0
1017        mtspr   MAS0, r0
1018        mtspr   MAS1, r3
1019        isync
1020        tlbwe
1021        isync
1022        msync
1023
1024        li      r3, 0
1025        stw     r3, 0xc08(r7)   /* LAWAR0 */
1026        lwz     r3, 0xc08(r7)
1027        isync
1028
1029        lis     r0, MAS0_TLBSEL(1)@h
1030        rlwimi  r0, r8, 16, MAS0_ESEL_MSK
1031        li      r3, 0
1032        mtspr   MAS0, r0
1033        mtspr   MAS1, r3
1034        isync
1035        tlbwe
1036        isync
1037        msync
1038
1039        b       9f
1040
1041        /* r3 = addr, r4 = value, clobbers r5, r11, r12 */
1042erratum_set_value:
1043        /* Lock two cache lines into I-Cache */
1044        sync
1045        mfspr   r11, SPRN_L1CSR1
1046        rlwinm  r11, r11, 0, ~L1CSR1_ICUL
1047        sync
1048        isync
1049        mtspr   SPRN_L1CSR1, r11
1050        isync
1051
1052        mflr    r12
1053        bl      5f
10545:      mflr    r5
1055        addi    r5, r5, 2f - 5b
1056        icbtls  0, 0, r5
1057        addi    r5, r5, 64
1058
1059        sync
1060        mfspr   r11, SPRN_L1CSR1
10613:      andi.   r11, r11, L1CSR1_ICUL
1062        bne     3b
1063
1064        icbtls  0, 0, r5
1065        addi    r5, r5, 64
1066
1067        sync
1068        mfspr   r11, SPRN_L1CSR1
10693:      andi.   r11, r11, L1CSR1_ICUL
1070        bne     3b
1071
1072        b       2f
1073        .align  6
1074        /* Inside a locked cacheline, wait a while, write, then wait a while */
10752:      sync
1076
1077        mfspr   r5, SPRN_TBRL
1078        addis   r11, r5, 0x10000@h /* wait 65536 timebase ticks */
10794:      mfspr   r5, SPRN_TBRL
1080        subf.   r5, r5, r11
1081        bgt     4b
1082
1083        stw     r4, 0(r3)
1084
1085        mfspr   r5, SPRN_TBRL
1086        addis   r11, r5, 0x10000@h /* wait 65536 timebase ticks */
10874:      mfspr   r5, SPRN_TBRL
1088        subf.   r5, r5, r11
1089        bgt     4b
1090
1091        sync
1092
1093        /*
1094         * Fill out the rest of this cache line and the next with nops,
1095         * to ensure that nothing outside the locked area will be
1096         * fetched due to a branch.
1097         */
1098        .rept 19
1099        nop
1100        .endr
1101
1102        sync
1103        mfspr   r11, SPRN_L1CSR1
1104        rlwinm  r11, r11, 0, ~L1CSR1_ICUL
1105        sync
1106        isync
1107        mtspr   SPRN_L1CSR1, r11
1108        isync
1109
1110        mtlr    r12
1111        blr
1112
11139:
1114#endif
1115
1116create_init_ram_area:
1117        lis     r6,FSL_BOOKE_MAS0(1, 15, 0)@h
1118        ori     r6,r6,FSL_BOOKE_MAS0(1, 15, 0)@l
1119
1120#ifdef NOR_BOOT
1121        /* create a temp mapping in AS=1 to the 4M boot window */
1122        create_tlb1_entry 15, \
1123                1, BOOKE_PAGESZ_4M, \
1124                CONFIG_VAL(SYS_MONITOR_BASE) & 0xffc00000, MAS2_I|MAS2_G, \
1125                0xffc00000, MAS3_SX|MAS3_SW|MAS3_SR, \
1126                0, r6
1127
1128#elif !defined(CONFIG_SYS_RAMBOOT) && defined(CONFIG_NXP_ESBC)
1129        /* create a temp mapping in AS = 1 for Flash mapping
1130         * created by PBL for ISBC code
1131         */
1132        create_tlb1_entry 15, \
1133                1, BOOKE_PAGESZ_1M, \
1134                CONFIG_VAL(SYS_MONITOR_BASE) & 0xfff00000, MAS2_I|MAS2_G, \
1135                CONFIG_SYS_PBI_FLASH_WINDOW & 0xfff00000, MAS3_SX|MAS3_SW|MAS3_SR, \
1136                0, r6
1137
1138/*
1139 * For Targets without CONFIG_SPL like P3, P5
1140 * and for targets with CONFIG_SPL like T1, T2, T4, only for
1141 * u-boot-spl i.e. CONFIG_SPL_BUILD
1142 */
1143#elif defined(CONFIG_RAMBOOT_PBL) && defined(CONFIG_NXP_ESBC) && \
1144        (!defined(CONFIG_SPL) || defined(CONFIG_SPL_BUILD))
1145        /* create a temp mapping in AS = 1 for mapping CONFIG_VAL(SYS_MONITOR_BASE)
1146         * to L3 Address configured by PBL for ISBC code
1147         */
1148        create_tlb1_entry 15, \
1149                1, BOOKE_PAGESZ_1M, \
1150                CONFIG_VAL(SYS_MONITOR_BASE) & 0xfff00000, MAS2_I|MAS2_G, \
1151                CONFIG_SYS_INIT_L3_ADDR & 0xfff00000, MAS3_SX|MAS3_SW|MAS3_SR, \
1152                0, r6
1153
1154#else
1155        /*
1156         * create a temp mapping in AS=1 to the 1M CONFIG_VAL(SYS_MONITOR_BASE) space, the main
1157         * image has been relocated to CONFIG_VAL(SYS_MONITOR_BASE) on the second stage.
1158         */
1159        create_tlb1_entry 15, \
1160                1, BOOKE_PAGESZ_1M, \
1161                CONFIG_VAL(SYS_MONITOR_BASE) & 0xfff00000, MAS2_I|MAS2_G, \
1162                CONFIG_VAL(SYS_MONITOR_BASE) & 0xfff00000, MAS3_SX|MAS3_SW|MAS3_SR, \
1163                0, r6
1164#endif
1165
1166        /* create a temp mapping in AS=1 to the stack */
1167#if defined(CONFIG_SYS_INIT_RAM_ADDR_PHYS_LOW) && \
1168    defined(CONFIG_SYS_INIT_RAM_ADDR_PHYS_HIGH)
1169        create_tlb1_entry 14, \
1170                1, BOOKE_PAGESZ_16K, \
1171                CONFIG_SYS_INIT_RAM_ADDR, 0, \
1172                CONFIG_SYS_INIT_RAM_ADDR_PHYS_LOW, MAS3_SX|MAS3_SW|MAS3_SR, \
1173                CONFIG_SYS_INIT_RAM_ADDR_PHYS_HIGH, r6
1174
1175#else
1176        create_tlb1_entry 14, \
1177                1, BOOKE_PAGESZ_16K, \
1178                CONFIG_SYS_INIT_RAM_ADDR, 0, \
1179                CONFIG_SYS_INIT_RAM_ADDR, MAS3_SX|MAS3_SW|MAS3_SR, \
1180                0, r6
1181#endif
1182
1183        lis     r6,MSR_IS|MSR_DS|MSR_DE@h
1184        ori     r6,r6,MSR_IS|MSR_DS|MSR_DE@l
1185        lis     r7,switch_as@h
1186        ori     r7,r7,switch_as@l
1187
1188        mtspr   SPRN_SRR0,r7
1189        mtspr   SPRN_SRR1,r6
1190        rfi
1191
1192switch_as:
1193/* L1 DCache is used for initial RAM */
1194
1195        /* Allocate Initial RAM in data cache.
1196         */
1197        lis     r3,CONFIG_SYS_INIT_RAM_ADDR@h
1198        ori     r3,r3,CONFIG_SYS_INIT_RAM_ADDR@l
1199        mfspr   r2, L1CFG0
1200        andi.   r2, r2, 0x1ff
1201        /* cache size * 1024 / (2 * L1 line size) */
1202        slwi    r2, r2, (10 - 1 - L1_CACHE_SHIFT)
1203        mtctr   r2
1204        li      r0,0
12051:
1206        dcbz    r0,r3
1207#ifdef CONFIG_E6500     /* Lock/unlock L2 cache long with L1 */
1208        dcbtls  2, r0, r3
1209        dcbtls  0, r0, r3
1210#else
1211        dcbtls  0, r0, r3
1212#endif
1213        addi    r3,r3,CONFIG_SYS_CACHELINE_SIZE
1214        bdnz    1b
1215
1216        /* Jump out the last 4K page and continue to 'normal' start */
1217#if defined(CONFIG_SYS_RAMBOOT) || defined(CONFIG_SPL)
1218        /* We assume that we're already running at the address we're linked at */
1219        b       _start_cont
1220#else
1221        /* Calculate absolute address in FLASH and jump there           */
1222        /*--------------------------------------------------------------*/
1223        lis     r3,_start_cont@h
1224        ori     r3,r3,_start_cont@l
1225        mtlr    r3
1226        blr
1227#endif
1228
1229        .text
1230        .globl  _start_cont
1231_start_cont:
1232        /* Setup the stack in initial RAM,could be L2-as-SRAM or L1 dcache*/
1233        lis     r3,(CONFIG_SYS_INIT_RAM_ADDR)@h
1234        ori     r3,r3,((CONFIG_SYS_INIT_SP_OFFSET-16)&~0xf)@l /* Align to 16 */
1235
1236#if CONFIG_VAL(SYS_MALLOC_F_LEN)
1237#if CONFIG_VAL(SYS_MALLOC_F_LEN) + GENERATED_GBL_DATA_SIZE > CONFIG_SYS_INIT_RAM_SIZE
1238#error "SYS_MALLOC_F_LEN too large to fit into initial RAM."
1239#endif
1240
1241        /* Leave 16+ byte for back chain termination and NULL return address */
1242        subi    r3,r3,((CONFIG_VAL(SYS_MALLOC_F_LEN)+16+15)&~0xf)
1243#endif
1244
1245        /* End of RAM */
1246        lis     r4,(CONFIG_SYS_INIT_RAM_ADDR)@h
1247        ori     r4,r4,(CONFIG_SYS_INIT_RAM_SIZE)@l
1248
1249        li      r0,0
1250
12511:      subi    r4,r4,4
1252        stw     r0,0(r4)
1253        cmplw   r4,r3
1254        bne     1b
1255
1256#if CONFIG_VAL(SYS_MALLOC_F_LEN)
1257        lis     r4,SYS_INIT_SP_ADDR@h
1258        ori     r4,r4,SYS_INIT_SP_ADDR@l
1259
1260        addi    r3,r3,16        /* Pre-relocation malloc area */
1261        stw     r3,GD_MALLOC_BASE(r4)
1262        subi    r3,r3,16
1263#endif
1264        li      r0,0
1265        stw     r0,0(r3)        /* Terminate Back Chain */
1266        stw     r0,+4(r3)       /* NULL return address. */
1267        mr      r1,r3           /* Transfer to SP(r1) */
1268
1269        GET_GOT
1270        /* Needed for -msingle-pic-base */
1271        bl      _GLOBAL_OFFSET_TABLE_@local-4
1272        mflr    r30
1273
1274        /* Pass our potential ePAPR device tree pointer to cpu_init_early_f */
1275        mr      r3, r24
1276
1277        bl      cpu_init_early_f
1278
1279        /* switch back to AS = 0 */
1280        lis     r3,(MSR_CE|MSR_ME|MSR_DE)@h
1281        ori     r3,r3,(MSR_CE|MSR_ME|MSR_DE)@l
1282        mtmsr   r3
1283        isync
1284
1285        bl      cpu_init_f      /* return boot_flag for calling board_init_f */
1286        bl      board_init_f
1287        isync
1288
1289        /* NOTREACHED - board_init_f() does not return */
1290
1291#ifndef MINIMAL_SPL
1292        .globl  _start_of_vectors
1293_start_of_vectors:
1294
1295/* Critical input. */
1296        CRIT_EXCEPTION(0x0100, CriticalInput, CritcalInputException)
1297
1298/* Machine check */
1299        MCK_EXCEPTION(0x200, MachineCheck, MachineCheckException)
1300
1301/* Data Storage exception. */
1302        STD_EXCEPTION(0x0300, DataStorage, UnknownException)
1303
1304/* Instruction Storage exception. */
1305        STD_EXCEPTION(0x0400, InstStorage, UnknownException)
1306
1307/* External Interrupt exception. */
1308        STD_EXCEPTION(0x0500, ExtInterrupt, ExtIntException)
1309
1310/* Alignment exception. */
1311Alignment:
1312        EXCEPTION_PROLOG(SRR0, SRR1)
1313        mfspr   r4,DAR
1314        stw     r4,_DAR(r21)
1315        mfspr   r5,DSISR
1316        stw     r5,_DSISR(r21)
1317        addi    r3,r1,STACK_FRAME_OVERHEAD
1318        EXC_XFER_TEMPLATE(0x600, Alignment, AlignmentException,
1319                MSR_KERNEL, COPY_EE)
1320
1321/* Program check exception */
1322ProgramCheck:
1323        EXCEPTION_PROLOG(SRR0, SRR1)
1324        addi    r3,r1,STACK_FRAME_OVERHEAD
1325        EXC_XFER_TEMPLATE(0x700, ProgramCheck, ProgramCheckException,
1326                MSR_KERNEL, COPY_EE)
1327
1328        /* No FPU on MPC85xx.  This exception is not supposed to happen.
1329        */
1330        STD_EXCEPTION(0x0800, FPUnavailable, UnknownException)
1331        STD_EXCEPTION(0x0900, SystemCall, UnknownException)
1332        STD_EXCEPTION(0x0a00, Decrementer, timer_interrupt)
1333        STD_EXCEPTION(0x0b00, IntervalTimer, UnknownException)
1334        STD_EXCEPTION(0x0c00, WatchdogTimer, UnknownException)
1335
1336        STD_EXCEPTION(0x0d00, DataTLBError, UnknownException)
1337        STD_EXCEPTION(0x0e00, InstructionTLBError, UnknownException)
1338
1339        CRIT_EXCEPTION(0x0f00, DebugBreakpoint, DebugException )
1340
1341        .globl  _end_of_vectors
1342_end_of_vectors:
1343
1344
1345        . = . + (0x100 - ( . & 0xff ))  /* align for debug */
1346
1347/*
1348 * This code finishes saving the registers to the exception frame
1349 * and jumps to the appropriate handler for the exception.
1350 * Register r21 is pointer into trap frame, r1 has new stack pointer.
1351 * r23 is the address of the handler.
1352 */
1353        .globl  transfer_to_handler
1354transfer_to_handler:
1355        SAVE_GPR(7, r21)
1356        SAVE_4GPRS(8, r21)
1357        SAVE_8GPRS(12, r21)
1358        SAVE_8GPRS(24, r21)
1359
1360        li      r22,0
1361        stw     r22,RESULT(r21)
1362        mtspr   SPRG2,r22               /* r1 is now kernel sp */
1363
1364        mtctr   r23                     /* virtual address of handler */
1365        mtmsr   r20
1366        bctrl
1367
1368int_return:
1369        mfmsr   r28             /* Disable interrupts */
1370        li      r4,0
1371        ori     r4,r4,MSR_EE
1372        andc    r28,r28,r4
1373        SYNC                    /* Some chip revs need this... */
1374        mtmsr   r28
1375        SYNC
1376        lwz     r2,_CTR(r1)
1377        lwz     r0,_LINK(r1)
1378        mtctr   r2
1379        mtlr    r0
1380        lwz     r2,_XER(r1)
1381        lwz     r0,_CCR(r1)
1382        mtspr   XER,r2
1383        mtcrf   0xFF,r0
1384        REST_10GPRS(3, r1)
1385        REST_10GPRS(13, r1)
1386        REST_8GPRS(23, r1)
1387        REST_GPR(31, r1)
1388        lwz     r2,_NIP(r1)     /* Restore environment */
1389        lwz     r0,_MSR(r1)
1390        mtspr   SRR0,r2
1391        mtspr   SRR1,r0
1392        lwz     r0,GPR0(r1)
1393        lwz     r2,GPR2(r1)
1394        lwz     r1,GPR1(r1)
1395        SYNC
1396        rfi
1397
1398/* Cache functions.
1399*/
1400.globl flush_icache
1401flush_icache:
1402.globl invalidate_icache
1403invalidate_icache:
1404        mfspr   r0,L1CSR1
1405        ori     r0,r0,L1CSR1_ICFI
1406        msync
1407        isync
1408        mtspr   L1CSR1,r0
1409        isync
1410        blr                             /* entire I cache */
1411
1412.globl invalidate_dcache
1413invalidate_dcache:
1414        mfspr   r0,L1CSR0
1415        ori     r0,r0,L1CSR0_DCFI
1416        msync
1417        isync
1418        mtspr   L1CSR0,r0
1419        isync
1420        blr
1421
1422        .globl  icache_enable
1423icache_enable:
1424        mflr    r8
1425        bl      invalidate_icache
1426        mtlr    r8
1427        isync
1428        mfspr   r4,L1CSR1
1429        ori     r4,r4,(L1CSR1_CPE | L1CSR1_ICE)@l
1430        oris    r4,r4,(L1CSR1_CPE | L1CSR1_ICE)@h
1431        mtspr   L1CSR1,r4
1432        isync
1433        blr
1434
1435        .globl  icache_disable
1436icache_disable:
1437        mfspr   r0,L1CSR1
1438        lis     r3,0
1439        ori     r3,r3,L1CSR1_ICE
1440        andc    r0,r0,r3
1441        mtspr   L1CSR1,r0
1442        isync
1443        blr
1444
1445        .globl  icache_status
1446icache_status:
1447        mfspr   r3,L1CSR1
1448        andi.   r3,r3,L1CSR1_ICE
1449        blr
1450
1451        .globl  dcache_enable
1452dcache_enable:
1453        mflr    r8
1454        bl      invalidate_dcache
1455        mtlr    r8
1456        isync
1457        mfspr   r0,L1CSR0
1458        ori     r0,r0,(L1CSR0_CPE |  L1CSR0_DCE)@l
1459        oris    r0,r0,(L1CSR0_CPE |  L1CSR0_DCE)@h
1460        msync
1461        isync
1462        mtspr   L1CSR0,r0
1463        isync
1464        blr
1465
1466        .globl  dcache_disable
1467dcache_disable:
1468        mfspr   r3,L1CSR0
1469        lis     r4,0
1470        ori     r4,r4,L1CSR0_DCE
1471        andc    r3,r3,r4
1472        mtspr   L1CSR0,r3
1473        isync
1474        blr
1475
1476        .globl  dcache_status
1477dcache_status:
1478        mfspr   r3,L1CSR0
1479        andi.   r3,r3,L1CSR0_DCE
1480        blr
1481
1482/*------------------------------------------------------------------------------- */
1483/* Function:     in8 */
1484/* Description:  Input 8 bits */
1485/*------------------------------------------------------------------------------- */
1486        .globl  in8
1487in8:
1488        lbz     r3,0x0000(r3)
1489        blr
1490
1491/*------------------------------------------------------------------------------- */
1492/* Function:     out8 */
1493/* Description:  Output 8 bits */
1494/*------------------------------------------------------------------------------- */
1495        .globl  out8
1496out8:
1497        stb     r4,0x0000(r3)
1498        sync
1499        blr
1500
1501/*------------------------------------------------------------------------------- */
1502/* Function:     out16 */
1503/* Description:  Output 16 bits */
1504/*------------------------------------------------------------------------------- */
1505        .globl  out16
1506out16:
1507        sth     r4,0x0000(r3)
1508        sync
1509        blr
1510
1511/*------------------------------------------------------------------------------- */
1512/* Function:     out16r */
1513/* Description:  Byte reverse and output 16 bits */
1514/*------------------------------------------------------------------------------- */
1515        .globl  out16r
1516out16r:
1517        sthbrx  r4,r0,r3
1518        sync
1519        blr
1520
1521/*------------------------------------------------------------------------------- */
1522/* Function:     out32 */
1523/* Description:  Output 32 bits */
1524/*------------------------------------------------------------------------------- */
1525        .globl  out32
1526out32:
1527        stw     r4,0x0000(r3)
1528        sync
1529        blr
1530
1531/*------------------------------------------------------------------------------- */
1532/* Function:     out32r */
1533/* Description:  Byte reverse and output 32 bits */
1534/*------------------------------------------------------------------------------- */
1535        .globl  out32r
1536out32r:
1537        stwbrx  r4,r0,r3
1538        sync
1539        blr
1540
1541/*------------------------------------------------------------------------------- */
1542/* Function:     in16 */
1543/* Description:  Input 16 bits */
1544/*------------------------------------------------------------------------------- */
1545        .globl  in16
1546in16:
1547        lhz     r3,0x0000(r3)
1548        blr
1549
1550/*------------------------------------------------------------------------------- */
1551/* Function:     in16r */
1552/* Description:  Input 16 bits and byte reverse */
1553/*------------------------------------------------------------------------------- */
1554        .globl  in16r
1555in16r:
1556        lhbrx   r3,r0,r3
1557        blr
1558
1559/*------------------------------------------------------------------------------- */
1560/* Function:     in32 */
1561/* Description:  Input 32 bits */
1562/*------------------------------------------------------------------------------- */
1563        .globl  in32
1564in32:
1565        lwz     3,0x0000(3)
1566        blr
1567
1568/*------------------------------------------------------------------------------- */
1569/* Function:     in32r */
1570/* Description:  Input 32 bits and byte reverse */
1571/*------------------------------------------------------------------------------- */
1572        .globl  in32r
1573in32r:
1574        lwbrx   r3,r0,r3
1575        blr
1576#endif  /* !MINIMAL_SPL */
1577
1578/*------------------------------------------------------------------------------*/
1579
1580/*
1581 * void write_tlb(mas0, mas1, mas2, mas3, mas7)
1582 */
1583        .globl  write_tlb
1584write_tlb:
1585        mtspr   MAS0,r3
1586        mtspr   MAS1,r4
1587        mtspr   MAS2,r5
1588        mtspr   MAS3,r6
1589#ifdef CONFIG_ENABLE_36BIT_PHYS
1590        mtspr   MAS7,r7
1591#endif
1592        li      r3,0
1593#ifdef CONFIG_SYS_BOOK3E_HV
1594        mtspr   MAS8,r3
1595#endif
1596        isync
1597        tlbwe
1598        msync
1599        isync
1600        blr
1601
1602/*
1603 * void relocate_code(addr_sp, gd, addr_moni)
1604 *
1605 * This "function" does not return, instead it continues in RAM
1606 * after relocating the monitor code.
1607 *
1608 * r3 = dest
1609 * r4 = src
1610 * r5 = length in bytes
1611 * r6 = cachelinesize
1612 */
1613        .globl  relocate_code
1614relocate_code:
1615        mr      r1,r3           /* Set new stack pointer                */
1616        mr      r9,r4           /* Save copy of Init Data pointer       */
1617        mr      r10,r5          /* Save copy of Destination Address     */
1618
1619        GET_GOT
1620#ifndef CONFIG_SPL_SKIP_RELOCATE
1621        mr      r3,r5                           /* Destination Address  */
1622        lis     r4,CONFIG_VAL(SYS_MONITOR_BASE)@h               /* Source      Address  */
1623        ori     r4,r4,CONFIG_VAL(SYS_MONITOR_BASE)@l
1624        lwz     r5,GOT(__init_end)
1625        sub     r5,r5,r4
1626        li      r6,CONFIG_SYS_CACHELINE_SIZE            /* Cache Line Size      */
1627
1628        /*
1629         * Fix GOT pointer:
1630         *
1631         * New GOT-PTR = (old GOT-PTR - CONFIG_VAL(SYS_MONITOR_BASE)) + Destination Address
1632         *
1633         * Offset:
1634         */
1635        sub     r15,r10,r4
1636
1637        /* First our own GOT */
1638        add     r12,r12,r15
1639        /* the the one used by the C code */
1640        add     r30,r30,r15
1641
1642        /*
1643         * Now relocate code
1644         */
1645
1646        cmplw   cr1,r3,r4
1647        addi    r0,r5,3
1648        srwi.   r0,r0,2
1649        beq     cr1,4f          /* In place copy is not necessary       */
1650        beq     7f              /* Protect against 0 count              */
1651        mtctr   r0
1652        bge     cr1,2f
1653
1654        la      r8,-4(r4)
1655        la      r7,-4(r3)
16561:      lwzu    r0,4(r8)
1657        stwu    r0,4(r7)
1658        bdnz    1b
1659        b       4f
1660
16612:      slwi    r0,r0,2
1662        add     r8,r4,r0
1663        add     r7,r3,r0
16643:      lwzu    r0,-4(r8)
1665        stwu    r0,-4(r7)
1666        bdnz    3b
1667
1668/*
1669 * Now flush the cache: note that we must start from a cache aligned
1670 * address. Otherwise we might miss one cache line.
1671 */
16724:      cmpwi   r6,0
1673        add     r5,r3,r5
1674        beq     7f              /* Always flush prefetch queue in any case */
1675        subi    r0,r6,1
1676        andc    r3,r3,r0
1677        mr      r4,r3
16785:      dcbst   0,r4
1679        add     r4,r4,r6
1680        cmplw   r4,r5
1681        blt     5b
1682        sync                    /* Wait for all dcbst to complete on bus */
1683        mr      r4,r3
16846:      icbi    0,r4
1685        add     r4,r4,r6
1686        cmplw   r4,r5
1687        blt     6b
16887:      sync                    /* Wait for all icbi to complete on bus */
1689        isync
1690
1691/*
1692 * We are done. Do not return, instead branch to second part of board
1693 * initialization, now running from RAM.
1694 */
1695
1696        addi    r0,r10,in_ram - CONFIG_VAL(SYS_MONITOR_BASE)
1697
1698        /*
1699         * As IVPR is going to point RAM address,
1700         * Make sure IVOR15 has valid opcode to support debugger
1701         */
1702        mtspr   IVOR15,r0
1703
1704        /*
1705         * Re-point the IVPR at RAM
1706         */
1707        mtspr   IVPR,r10
1708
1709        mtlr    r0
1710        blr                             /* NEVER RETURNS! */
1711#endif
1712        .globl  in_ram
1713in_ram:
1714
1715        /*
1716         * Relocation Function, r12 point to got2+0x8000
1717         *
1718         * Adjust got2 pointers, no need to check for 0, this code
1719         * already puts a few entries in the table.
1720         */
1721        li      r0,__got2_entries@sectoff@l
1722        la      r3,GOT(_GOT2_TABLE_)
1723        lwz     r11,GOT(_GOT2_TABLE_)
1724        mtctr   r0
1725        sub     r11,r3,r11
1726        addi    r3,r3,-4
17271:      lwzu    r0,4(r3)
1728        cmpwi   r0,0
1729        beq-    2f
1730        add     r0,r0,r11
1731        stw     r0,0(r3)
17322:      bdnz    1b
1733
1734        /*
1735         * Now adjust the fixups and the pointers to the fixups
1736         * in case we need to move ourselves again.
1737         */
1738        li      r0,__fixup_entries@sectoff@l
1739        lwz     r3,GOT(_FIXUP_TABLE_)
1740        cmpwi   r0,0
1741        mtctr   r0
1742        addi    r3,r3,-4
1743        beq     4f
17443:      lwzu    r4,4(r3)
1745        lwzux   r0,r4,r11
1746        cmpwi   r0,0
1747        add     r0,r0,r11
1748        stw     r4,0(r3)
1749        beq-    5f
1750        stw     r0,0(r4)
17515:      bdnz    3b
17524:
1753clear_bss:
1754        /*
1755         * Now clear BSS segment
1756         */
1757        lwz     r3,GOT(__bss_start)
1758        lwz     r4,GOT(__bss_end)
1759
1760        cmplw   0,r3,r4
1761        beq     6f
1762
1763        li      r0,0
17645:
1765        stw     r0,0(r3)
1766        addi    r3,r3,4
1767        cmplw   0,r3,r4
1768        blt     5b
17696:
1770
1771        mr      r3,r9           /* Init Data pointer            */
1772        mr      r4,r10          /* Destination Address          */
1773        bl      board_init_r
1774
1775#ifndef MINIMAL_SPL
1776        /*
1777         * Copy exception vector code to low memory
1778         *
1779         * r3: dest_addr
1780         * r7: source address, r8: end address, r9: target address
1781         */
1782        .globl  trap_init
1783trap_init:
1784        mflr    r11
1785        bl      _GLOBAL_OFFSET_TABLE_-4
1786        mflr    r12
1787
1788        /* Update IVORs as per relocation */
1789        mtspr   IVPR,r3
1790
1791        lwz     r4,CriticalInput@got(r12)
1792        mtspr   IVOR0,r4        /* 0: Critical input */
1793        lwz     r4,MachineCheck@got(r12)
1794        mtspr   IVOR1,r4        /* 1: Machine check */
1795        lwz     r4,DataStorage@got(r12)
1796        mtspr   IVOR2,r4        /* 2: Data storage */
1797        lwz     r4,InstStorage@got(r12)
1798        mtspr   IVOR3,r4        /* 3: Instruction storage */
1799        lwz     r4,ExtInterrupt@got(r12)
1800        mtspr   IVOR4,r4        /* 4: External interrupt */
1801        lwz     r4,Alignment@got(r12)
1802        mtspr   IVOR5,r4        /* 5: Alignment */
1803        lwz     r4,ProgramCheck@got(r12)
1804        mtspr   IVOR6,r4        /* 6: Program check */
1805        lwz     r4,FPUnavailable@got(r12)
1806        mtspr   IVOR7,r4        /* 7: floating point unavailable */
1807        lwz     r4,SystemCall@got(r12)
1808        mtspr   IVOR8,r4        /* 8: System call */
1809        /* 9: Auxiliary processor unavailable(unsupported) */
1810        lwz     r4,Decrementer@got(r12)
1811        mtspr   IVOR10,r4       /* 10: Decrementer */
1812        lwz     r4,IntervalTimer@got(r12)
1813        mtspr   IVOR11,r4       /* 11: Interval timer */
1814        lwz     r4,WatchdogTimer@got(r12)
1815        mtspr   IVOR12,r4       /* 12: Watchdog timer */
1816        lwz     r4,DataTLBError@got(r12)
1817        mtspr   IVOR13,r4       /* 13: Data TLB error */
1818        lwz     r4,InstructionTLBError@got(r12)
1819        mtspr   IVOR14,r4       /* 14: Instruction TLB error */
1820        lwz     r4,DebugBreakpoint@got(r12)
1821        mtspr   IVOR15,r4       /* 15: Debug */
1822
1823        mtlr    r11
1824        blr
1825
1826.globl unlock_ram_in_cache
1827unlock_ram_in_cache:
1828        /* invalidate the INIT_RAM section */
1829        lis     r3,(CONFIG_SYS_INIT_RAM_ADDR & ~(CONFIG_SYS_CACHELINE_SIZE-1))@h
1830        ori     r3,r3,(CONFIG_SYS_INIT_RAM_ADDR & ~(CONFIG_SYS_CACHELINE_SIZE-1))@l
1831        mfspr   r4,L1CFG0
1832        andi.   r4,r4,0x1ff
1833        slwi    r4,r4,(10 - 1 - L1_CACHE_SHIFT)
1834        mtctr   r4
18351:      dcbi    r0,r3
1836#ifdef CONFIG_E6500     /* lock/unlock L2 cache long with L1 */
1837        dcblc   2, r0, r3
1838        dcblc   0, r0, r3
1839#else
1840        dcblc   r0,r3
1841#endif
1842        addi    r3,r3,CONFIG_SYS_CACHELINE_SIZE
1843        bdnz    1b
1844        sync
1845
1846        /* Invalidate the TLB entries for the cache */
1847        lis     r3,CONFIG_SYS_INIT_RAM_ADDR@h
1848        ori     r3,r3,CONFIG_SYS_INIT_RAM_ADDR@l
1849        tlbivax 0,r3
1850        addi    r3,r3,0x1000
1851        tlbivax 0,r3
1852        addi    r3,r3,0x1000
1853        tlbivax 0,r3
1854        addi    r3,r3,0x1000
1855        tlbivax 0,r3
1856        isync
1857        blr
1858
1859.globl flush_dcache
1860flush_dcache:
1861        mfspr   r3,SPRN_L1CFG0
1862
1863        rlwinm  r5,r3,9,3       /* Extract cache block size */
1864        twlgti  r5,1            /* Only 32 and 64 byte cache blocks
1865                                 * are currently defined.
1866                                 */
1867        li      r4,32
1868        subfic  r6,r5,2         /* r6 = log2(1KiB / cache block size) -
1869                                 *      log2(number of ways)
1870                                 */
1871        slw     r5,r4,r5        /* r5 = cache block size */
1872
1873        rlwinm  r7,r3,0,0xff    /* Extract number of KiB in the cache */
1874        mulli   r7,r7,13        /* An 8-way cache will require 13
1875                                 * loads per set.
1876                                 */
1877        slw     r7,r7,r6
1878
1879        /* save off HID0 and set DCFA */
1880        mfspr   r8,SPRN_HID0
1881        ori     r9,r8,HID0_DCFA@l
1882        mtspr   SPRN_HID0,r9
1883        isync
1884
1885        lis     r4,0
1886        mtctr   r7
1887
18881:      lwz     r3,0(r4)        /* Load... */
1889        add     r4,r4,r5
1890        bdnz    1b
1891
1892        msync
1893        lis     r4,0
1894        mtctr   r7
1895
18961:      dcbf    0,r4            /* ...and flush. */
1897        add     r4,r4,r5
1898        bdnz    1b
1899
1900        /* restore HID0 */
1901        mtspr   SPRN_HID0,r8
1902        isync
1903
1904        blr
1905#endif /* !MINIMAL_SPL */
1906