uboot/arch/powerpc/cpu/mpc85xx/start.S
<<
>>
Prefs
   1/*
   2 * Copyright 2004, 2007-2011 Freescale Semiconductor, Inc.
   3 * Copyright (C) 2003  Motorola,Inc.
   4 *
   5 * See file CREDITS for list of people who contributed to this
   6 * project.
   7 *
   8 * This program is free software; you can redistribute it and/or
   9 * modify it under the terms of the GNU General Public License as
  10 * published by the Free Software Foundation; either version 2 of
  11 * the License, or (at your option) any later version.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 * GNU General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program; if not, write to the Free Software
  20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
  21 * MA 02111-1307 USA
  22 */
  23
  24/* U-Boot Startup Code for Motorola 85xx PowerPC based Embedded Boards
  25 *
  26 * The processor starts at 0xfffffffc and the code is first executed in the
  27 * last 4K page(0xfffff000-0xffffffff) in flash/rom.
  28 *
  29 */
  30
  31#include <asm-offsets.h>
  32#include <config.h>
  33#include <mpc85xx.h>
  34#include <version.h>
  35
  36#define _LINUX_CONFIG_H 1       /* avoid reading Linux autoconf.h file  */
  37
  38#include <ppc_asm.tmpl>
  39#include <ppc_defs.h>
  40
  41#include <asm/cache.h>
  42#include <asm/mmu.h>
  43
  44#undef  MSR_KERNEL
  45#define MSR_KERNEL ( MSR_ME )   /* Machine Check */
  46
  47/*
  48 * Set up GOT: Global Offset Table
  49 *
  50 * Use r12 to access the GOT
  51 */
  52        START_GOT
  53        GOT_ENTRY(_GOT2_TABLE_)
  54        GOT_ENTRY(_FIXUP_TABLE_)
  55
  56#ifndef CONFIG_NAND_SPL
  57        GOT_ENTRY(_start)
  58        GOT_ENTRY(_start_of_vectors)
  59        GOT_ENTRY(_end_of_vectors)
  60        GOT_ENTRY(transfer_to_handler)
  61#endif
  62
  63        GOT_ENTRY(__init_end)
  64        GOT_ENTRY(__bss_end__)
  65        GOT_ENTRY(__bss_start)
  66        END_GOT
  67
  68/*
  69 * e500 Startup -- after reset only the last 4KB of the effective
  70 * address space is mapped in the MMU L2 TLB1 Entry0. The .bootpg
  71 * section is located at THIS LAST page and basically does three
  72 * things: clear some registers, set up exception tables and
  73 * add more TLB entries for 'larger spaces'(e.g. the boot rom) to
  74 * continue the boot procedure.
  75
  76 * Once the boot rom is mapped by TLB entries we can proceed
  77 * with normal startup.
  78 *
  79 */
  80
  81        .section .bootpg,"ax"
  82        .globl _start_e500
  83
  84_start_e500:
  85
  86#if defined(CONFIG_SECURE_BOOT) && defined(CONFIG_E500MC)
  87        /* ISBC uses L2 as stack.
  88         * Disable L2 cache here so that u-boot can enable it later
  89         * as part of it's normal flow
  90        */
  91
  92        /* Check if L2 is enabled */
  93        mfspr   r3, SPRN_L2CSR0
  94        lis     r2, L2CSR0_L2E@h
  95        ori     r2, r2, L2CSR0_L2E@l
  96        and.    r4, r3, r2
  97        beq     l2_disabled
  98
  99        mfspr r3, SPRN_L2CSR0
 100        /* Flush L2 cache */
 101        lis     r2,(L2CSR0_L2FL)@h
 102        ori     r2, r2, (L2CSR0_L2FL)@l
 103        or      r3, r2, r3
 104        sync
 105        isync
 106        mtspr   SPRN_L2CSR0,r3
 107        isync
 1081:
 109        mfspr r3, SPRN_L2CSR0
 110        and. r1, r3, r2
 111        bne 1b
 112
 113        mfspr r3, SPRN_L2CSR0
 114        lis r2, L2CSR0_L2E@h
 115        ori r2, r2, L2CSR0_L2E@l
 116        andc r4, r3, r2
 117        sync
 118        isync
 119        mtspr SPRN_L2CSR0,r4
 120        isync
 121
 122l2_disabled:
 123#endif
 124
 125/* clear registers/arrays not reset by hardware */
 126
 127        /* L1 */
 128        li      r0,2
 129        mtspr   L1CSR0,r0       /* invalidate d-cache */
 130        mtspr   L1CSR1,r0       /* invalidate i-cache */
 131
 132        mfspr   r1,DBSR
 133        mtspr   DBSR,r1         /* Clear all valid bits */
 134
 135        /*
 136         *      Enable L1 Caches early
 137         *
 138         */
 139
 140#if defined(CONFIG_E500MC) && defined(CONFIG_SYS_CACHE_STASHING)
 141        /* set stash id to (coreID) * 2 + 32 + L1 CT (0) */
 142        li      r2,(32 + 0)
 143        mtspr   L1CSR2,r2
 144#endif
 145
 146        /* Enable/invalidate the I-Cache */
 147        lis     r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@h
 148        ori     r2,r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@l
 149        mtspr   SPRN_L1CSR1,r2
 1501:
 151        mfspr   r3,SPRN_L1CSR1
 152        and.    r1,r3,r2
 153        bne     1b
 154
 155        lis     r3,(L1CSR1_CPE|L1CSR1_ICE)@h
 156        ori     r3,r3,(L1CSR1_CPE|L1CSR1_ICE)@l
 157        mtspr   SPRN_L1CSR1,r3
 158        isync
 1592:
 160        mfspr   r3,SPRN_L1CSR1
 161        andi.   r1,r3,L1CSR1_ICE@l
 162        beq     2b
 163
 164        /* Enable/invalidate the D-Cache */
 165        lis     r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@h
 166        ori     r2,r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@l
 167        mtspr   SPRN_L1CSR0,r2
 1681:
 169        mfspr   r3,SPRN_L1CSR0
 170        and.    r1,r3,r2
 171        bne     1b
 172
 173        lis     r3,(L1CSR0_CPE|L1CSR0_DCE)@h
 174        ori     r3,r3,(L1CSR0_CPE|L1CSR0_DCE)@l
 175        mtspr   SPRN_L1CSR0,r3
 176        isync
 1772:
 178        mfspr   r3,SPRN_L1CSR0
 179        andi.   r1,r3,L1CSR0_DCE@l
 180        beq     2b
 181
 182        /* Setup interrupt vectors */
 183        lis     r1,CONFIG_SYS_MONITOR_BASE@h
 184        mtspr   IVPR,r1
 185
 186        li      r1,0x0100
 187        mtspr   IVOR0,r1        /* 0: Critical input */
 188        li      r1,0x0200
 189        mtspr   IVOR1,r1        /* 1: Machine check */
 190        li      r1,0x0300
 191        mtspr   IVOR2,r1        /* 2: Data storage */
 192        li      r1,0x0400
 193        mtspr   IVOR3,r1        /* 3: Instruction storage */
 194        li      r1,0x0500
 195        mtspr   IVOR4,r1        /* 4: External interrupt */
 196        li      r1,0x0600
 197        mtspr   IVOR5,r1        /* 5: Alignment */
 198        li      r1,0x0700
 199        mtspr   IVOR6,r1        /* 6: Program check */
 200        li      r1,0x0800
 201        mtspr   IVOR7,r1        /* 7: floating point unavailable */
 202        li      r1,0x0900
 203        mtspr   IVOR8,r1        /* 8: System call */
 204        /* 9: Auxiliary processor unavailable(unsupported) */
 205        li      r1,0x0a00
 206        mtspr   IVOR10,r1       /* 10: Decrementer */
 207        li      r1,0x0b00
 208        mtspr   IVOR11,r1       /* 11: Interval timer */
 209        li      r1,0x0c00
 210        mtspr   IVOR12,r1       /* 12: Watchdog timer */
 211        li      r1,0x0d00
 212        mtspr   IVOR13,r1       /* 13: Data TLB error */
 213        li      r1,0x0e00
 214        mtspr   IVOR14,r1       /* 14: Instruction TLB error */
 215        li      r1,0x0f00
 216        mtspr   IVOR15,r1       /* 15: Debug */
 217
 218        /* Clear and set up some registers. */
 219        li      r0,0x0000
 220        lis     r1,0xffff
 221        mtspr   DEC,r0                  /* prevent dec exceptions */
 222        mttbl   r0                      /* prevent fit & wdt exceptions */
 223        mttbu   r0
 224        mtspr   TSR,r1                  /* clear all timer exception status */
 225        mtspr   TCR,r0                  /* disable all */
 226        mtspr   ESR,r0                  /* clear exception syndrome register */
 227        mtspr   MCSR,r0                 /* machine check syndrome register */
 228        mtxer   r0                      /* clear integer exception register */
 229
 230#ifdef CONFIG_SYS_BOOK3E_HV
 231        mtspr   MAS8,r0                 /* make sure MAS8 is clear */
 232#endif
 233
 234        /* Enable Time Base and Select Time Base Clock */
 235        lis     r0,HID0_EMCP@h          /* Enable machine check */
 236#if defined(CONFIG_ENABLE_36BIT_PHYS)
 237        ori     r0,r0,HID0_ENMAS7@l     /* Enable MAS7 */
 238#endif
 239#ifndef CONFIG_E500MC
 240        ori     r0,r0,HID0_TBEN@l       /* Enable Timebase */
 241#endif
 242        mtspr   HID0,r0
 243
 244#ifndef CONFIG_E500MC
 245        li      r0,(HID1_ASTME|HID1_ABE)@l      /* Addr streaming & broadcast */
 246        mfspr   r3,PVR
 247        andi.   r3,r3, 0xff
 248        cmpwi   r3,0x50@l       /* if we are rev 5.0 or greater set MBDD */
 249        blt 1f
 250        /* Set MBDD bit also */
 251        ori r0, r0, HID1_MBDD@l
 2521:
 253        mtspr   HID1,r0
 254#endif
 255
 256#ifdef CONFIG_SYS_FSL_ERRATUM_CPU_A003999
 257        mfspr   r3,977
 258        oris    r3,r3,0x0100
 259        mtspr   977,r3
 260#endif
 261
 262        /* Enable Branch Prediction */
 263#if defined(CONFIG_BTB)
 264        lis     r0,BUCSR_ENABLE@h
 265        ori     r0,r0,BUCSR_ENABLE@l
 266        mtspr   SPRN_BUCSR,r0
 267#endif
 268
 269#if defined(CONFIG_SYS_INIT_DBCR)
 270        lis     r1,0xffff
 271        ori     r1,r1,0xffff
 272        mtspr   DBSR,r1                 /* Clear all status bits */
 273        lis     r0,CONFIG_SYS_INIT_DBCR@h       /* DBCR0[IDM] must be set */
 274        ori     r0,r0,CONFIG_SYS_INIT_DBCR@l
 275        mtspr   DBCR0,r0
 276#endif
 277
 278#ifdef CONFIG_MPC8569
 279#define CONFIG_SYS_LBC_ADDR (CONFIG_SYS_CCSRBAR_DEFAULT + 0x5000)
 280#define CONFIG_SYS_LBCR_ADDR (CONFIG_SYS_LBC_ADDR + 0xd0)
 281
 282        /* MPC8569 Rev.0 silcon needs to set bit 13 of LBCR to allow elBC to
 283         * use address space which is more than 12bits, and it must be done in
 284         * the 4K boot page. So we set this bit here.
 285         */
 286
 287        /* create a temp mapping TLB0[0] for LBCR  */
 288        lis     r6,FSL_BOOKE_MAS0(0, 0, 0)@h
 289        ori     r6,r6,FSL_BOOKE_MAS0(0, 0, 0)@l
 290
 291        lis     r7,FSL_BOOKE_MAS1(1, 0, 0, 0, BOOKE_PAGESZ_4K)@h
 292        ori     r7,r7,FSL_BOOKE_MAS1(1, 0, 0, 0, BOOKE_PAGESZ_4K)@l
 293
 294        lis     r8,FSL_BOOKE_MAS2(CONFIG_SYS_LBC_ADDR, MAS2_I|MAS2_G)@h
 295        ori     r8,r8,FSL_BOOKE_MAS2(CONFIG_SYS_LBC_ADDR, MAS2_I|MAS2_G)@l
 296
 297        lis     r9,FSL_BOOKE_MAS3(CONFIG_SYS_LBC_ADDR, 0,
 298                                                (MAS3_SX|MAS3_SW|MAS3_SR))@h
 299        ori     r9,r9,FSL_BOOKE_MAS3(CONFIG_SYS_LBC_ADDR, 0,
 300                                                (MAS3_SX|MAS3_SW|MAS3_SR))@l
 301
 302        mtspr   MAS0,r6
 303        mtspr   MAS1,r7
 304        mtspr   MAS2,r8
 305        mtspr   MAS3,r9
 306        isync
 307        msync
 308        tlbwe
 309
 310        /* Set LBCR register */
 311        lis     r4,CONFIG_SYS_LBCR_ADDR@h
 312        ori     r4,r4,CONFIG_SYS_LBCR_ADDR@l
 313
 314        lis     r5,CONFIG_SYS_LBC_LBCR@h
 315        ori     r5,r5,CONFIG_SYS_LBC_LBCR@l
 316        stw     r5,0(r4)
 317        isync
 318
 319        /* invalidate this temp TLB */
 320        lis     r4,CONFIG_SYS_LBC_ADDR@h
 321        ori     r4,r4,CONFIG_SYS_LBC_ADDR@l
 322        tlbivax 0,r4
 323        isync
 324
 325#endif /* CONFIG_MPC8569 */
 326
 327/*
 328 * Search for the TLB that covers the code we're executing, and shrink it
 329 * so that it covers only this 4K page.  That will ensure that any other
 330 * TLB we create won't interfere with it.  We assume that the TLB exists,
 331 * which is why we don't check the Valid bit of MAS1.
 332 *
 333 * This is necessary, for example, when booting from the on-chip ROM,
 334 * which (oddly) creates a single 4GB TLB that covers CCSR and DDR.
 335 * If we don't shrink this TLB now, then we'll accidentally delete it
 336 * in "purge_old_ccsr_tlb" below.
 337 */
 338        bl      nexti           /* Find our address */
 339nexti:  mflr    r1              /* R1 = our PC */
 340        li      r2, 0
 341        mtspr   MAS6, r2        /* Assume the current PID and AS are 0 */
 342        isync
 343        msync
 344        tlbsx   0, r1           /* This must succeed */
 345
 346        /* Set the size of the TLB to 4KB */
 347        mfspr   r3, MAS1
 348        li      r2, 0xF00
 349        andc    r3, r3, r2      /* Clear the TSIZE bits */
 350        ori     r3, r3, MAS1_TSIZE(BOOKE_PAGESZ_4K)@l
 351        mtspr   MAS1, r3
 352
 353        /*
 354         * Set the base address of the TLB to our PC.  We assume that
 355         * virtual == physical.  We also assume that MAS2_EPN == MAS3_RPN.
 356         */
 357        lis     r3, MAS2_EPN@h
 358        ori     r3, r3, MAS2_EPN@l      /* R3 = MAS2_EPN */
 359
 360        and     r1, r1, r3      /* Our PC, rounded down to the nearest page */
 361
 362        mfspr   r2, MAS2
 363        andc    r2, r2, r3
 364        or      r2, r2, r1
 365        mtspr   MAS2, r2        /* Set the EPN to our PC base address */
 366
 367        mfspr   r2, MAS3
 368        andc    r2, r2, r3
 369        or      r2, r2, r1
 370        mtspr   MAS3, r2        /* Set the RPN to our PC base address */
 371
 372        isync
 373        msync
 374        tlbwe
 375
 376/*
 377 * Relocate CCSR, if necessary.  We relocate CCSR if (obviously) the default
 378 * location is not where we want it.  This typically happens on a 36-bit
 379 * system, where we want to move CCSR to near the top of 36-bit address space.
 380 *
 381 * To move CCSR, we create two temporary TLBs, one for the old location, and
 382 * another for the new location.  On CoreNet systems, we also need to create
 383 * a special, temporary LAW.
 384 *
 385 * As a general rule, TLB0 is used for short-term TLBs, and TLB1 is used for
 386 * long-term TLBs, so we use TLB0 here.
 387 */
 388#if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR_PHYS)
 389
 390#if !defined(CONFIG_SYS_CCSRBAR_PHYS_HIGH) || !defined(CONFIG_SYS_CCSRBAR_PHYS_LOW)
 391#error "CONFIG_SYS_CCSRBAR_PHYS_HIGH and CONFIG_SYS_CCSRBAR_PHYS_LOW) must be defined."
 392#endif
 393
 394purge_old_ccsr_tlb:
 395        lis     r8, CONFIG_SYS_CCSRBAR@h
 396        ori     r8, r8, CONFIG_SYS_CCSRBAR@l
 397        lis     r9, (CONFIG_SYS_CCSRBAR + 0x1000)@h
 398        ori     r9, r9, (CONFIG_SYS_CCSRBAR + 0x1000)@l
 399
 400        /*
 401         * In a multi-stage boot (e.g. NAND boot), a previous stage may have
 402         * created a TLB for CCSR, which will interfere with our relocation
 403         * code.  Since we're going to create a new TLB for CCSR anyway,
 404         * it should be safe to delete this old TLB here.  We have to search
 405         * for it, though.
 406         */
 407
 408        li      r1, 0
 409        mtspr   MAS6, r1        /* Search the current address space and PID */
 410        isync
 411        msync
 412        tlbsx   0, r8
 413        mfspr   r1, MAS1
 414        andis.  r2, r1, MAS1_VALID@h    /* Check for the Valid bit */
 415        beq     1f                      /* Skip if no TLB found */
 416
 417        rlwinm  r1, r1, 0, 1, 31        /* Clear Valid bit */
 418        mtspr   MAS1, r1
 419        isync
 420        msync
 421        tlbwe
 4221:
 423
 424create_ccsr_new_tlb:
 425        /*
 426         * Create a TLB for the new location of CCSR.  Register R8 is reserved
 427         * for the virtual address of this TLB (CONFIG_SYS_CCSRBAR).
 428         */
 429        lis     r0, FSL_BOOKE_MAS0(0, 0, 0)@h
 430        ori     r0, r0, FSL_BOOKE_MAS0(0, 0, 0)@l
 431        lis     r1, FSL_BOOKE_MAS1(1, 0, 0, 0, BOOKE_PAGESZ_4K)@h
 432        ori     r1, r1, FSL_BOOKE_MAS1(1, 0, 0, 0, BOOKE_PAGESZ_4K)@l
 433        lis     r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR, (MAS2_I|MAS2_G))@h
 434        ori     r2, r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR, (MAS2_I|MAS2_G))@l
 435        lis     r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW, 0, (MAS3_SW|MAS3_SR))@h
 436        ori     r3, r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW, 0, (MAS3_SW|MAS3_SR))@l
 437        lis     r7, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h
 438        ori     r7, r7, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l
 439        mtspr   MAS0, r0
 440        mtspr   MAS1, r1
 441        mtspr   MAS2, r2
 442        mtspr   MAS3, r3
 443        mtspr   MAS7, r7
 444        isync
 445        msync
 446        tlbwe
 447
 448        /*
 449         * Create a TLB for the current location of CCSR.  Register R9 is reserved
 450         * for the virtual address of this TLB (CONFIG_SYS_CCSRBAR + 0x1000).
 451         */
 452create_ccsr_old_tlb:
 453        lis     r0, FSL_BOOKE_MAS0(0, 1, 0)@h
 454        ori     r0, r0, FSL_BOOKE_MAS0(0, 1, 0)@l
 455        lis     r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR + 0x1000, (MAS2_I|MAS2_G))@h
 456        ori     r2, r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR + 0x1000, (MAS2_I|MAS2_G))@l
 457        lis     r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_DEFAULT, 0, (MAS3_SW|MAS3_SR))@h
 458        ori     r3, r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_DEFAULT, 0, (MAS3_SW|MAS3_SR))@l
 459        li      r7, 0   /* The default CCSR address is always a 32-bit number */
 460        mtspr   MAS0, r0
 461        /* MAS1 is the same as above */
 462        mtspr   MAS2, r2
 463        mtspr   MAS3, r3
 464        mtspr   MAS7, r7
 465        isync
 466        msync
 467        tlbwe
 468
 469        /*
 470         * We have a TLB for what we think is the current (old) CCSR.  Let's
 471         * verify that, otherwise we won't be able to move it.
 472         * CONFIG_SYS_CCSRBAR_DEFAULT is always a 32-bit number, so we only
 473         * need to compare the lower 32 bits of CCSRBAR on CoreNet systems.
 474         */
 475verify_old_ccsr:
 476        lis     r0, CONFIG_SYS_CCSRBAR_DEFAULT@h
 477        ori     r0, r0, CONFIG_SYS_CCSRBAR_DEFAULT@l
 478#ifdef CONFIG_FSL_CORENET
 479        lwz     r1, 4(r9)               /* CCSRBARL */
 480#else
 481        lwz     r1, 0(r9)               /* CCSRBAR, shifted right by 12 */
 482        slwi    r1, r1, 12
 483#endif
 484
 485        cmpl    0, r0, r1
 486
 487        /*
 488         * If the value we read from CCSRBARL is not what we expect, then
 489         * enter an infinite loop.  This will at least allow a debugger to
 490         * halt execution and examine TLBs, etc.  There's no point in going
 491         * on.
 492         */
 493infinite_debug_loop:
 494        bne     infinite_debug_loop
 495
 496#ifdef CONFIG_FSL_CORENET
 497
 498#define CCSR_LAWBARH0   (CONFIG_SYS_CCSRBAR + 0x1000)
 499#define LAW_EN          0x80000000
 500#define LAW_SIZE_4K     0xb
 501#define CCSRBAR_LAWAR   (LAW_EN | (0x1e << 20) | LAW_SIZE_4K)
 502#define CCSRAR_C        0x80000000      /* Commit */
 503
 504create_temp_law:
 505        /*
 506         * On CoreNet systems, we create the temporary LAW using a special LAW
 507         * target ID of 0x1e.  LAWBARH is at offset 0xc00 in CCSR.
 508         */
 509        lis     r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h
 510        ori     r0, r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l
 511        lis     r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@h
 512        ori     r1, r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@l
 513        lis     r2, CCSRBAR_LAWAR@h
 514        ori     r2, r2, CCSRBAR_LAWAR@l
 515
 516        stw     r0, 0xc00(r9)   /* LAWBARH0 */
 517        stw     r1, 0xc04(r9)   /* LAWBARL0 */
 518        sync
 519        stw     r2, 0xc08(r9)   /* LAWAR0 */
 520
 521        /*
 522         * Read back from LAWAR to ensure the update is complete.  e500mc
 523         * cores also require an isync.
 524         */
 525        lwz     r0, 0xc08(r9)   /* LAWAR0 */
 526        isync
 527
 528        /*
 529         * Read the current CCSRBARH and CCSRBARL using load word instructions.
 530         * Follow this with an isync instruction. This forces any outstanding
 531         * accesses to configuration space to completion.
 532         */
 533read_old_ccsrbar:
 534        lwz     r0, 0(r9)       /* CCSRBARH */
 535        lwz     r0, 4(r9)       /* CCSRBARL */
 536        isync
 537
 538        /*
 539         * Write the new values for CCSRBARH and CCSRBARL to their old
 540         * locations.  The CCSRBARH has a shadow register. When the CCSRBARH
 541         * has a new value written it loads a CCSRBARH shadow register. When
 542         * the CCSRBARL is written, the CCSRBARH shadow register contents
 543         * along with the CCSRBARL value are loaded into the CCSRBARH and
 544         * CCSRBARL registers, respectively.  Follow this with a sync
 545         * instruction.
 546         */
 547write_new_ccsrbar:
 548        lis     r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h
 549        ori     r0, r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l
 550        lis     r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@h
 551        ori     r1, r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@l
 552        lis     r2, CCSRAR_C@h
 553        ori     r2, r2, CCSRAR_C@l
 554
 555        stw     r0, 0(r9)       /* Write to CCSRBARH */
 556        sync                    /* Make sure we write to CCSRBARH first */
 557        stw     r1, 4(r9)       /* Write to CCSRBARL */
 558        sync
 559
 560        /*
 561         * Write a 1 to the commit bit (C) of CCSRAR at the old location.
 562         * Follow this with a sync instruction.
 563         */
 564        stw     r2, 8(r9)
 565        sync
 566
 567        /* Delete the temporary LAW */
 568delete_temp_law:
 569        li      r1, 0
 570        stw     r1, 0xc08(r8)
 571        sync
 572        stw     r1, 0xc00(r8)
 573        stw     r1, 0xc04(r8)
 574        sync
 575
 576#else /* #ifdef CONFIG_FSL_CORENET */
 577
 578write_new_ccsrbar:
 579        /*
 580         * Read the current value of CCSRBAR using a load word instruction
 581         * followed by an isync. This forces all accesses to configuration
 582         * space to complete.
 583         */
 584        sync
 585        lwz     r0, 0(r9)
 586        isync
 587
 588/* CONFIG_SYS_CCSRBAR_PHYS right shifted by 12 */
 589#define CCSRBAR_PHYS_RS12 ((CONFIG_SYS_CCSRBAR_PHYS_HIGH << 20) | \
 590                           (CONFIG_SYS_CCSRBAR_PHYS_LOW >> 12))
 591
 592        /* Write the new value to CCSRBAR. */
 593        lis     r0, CCSRBAR_PHYS_RS12@h
 594        ori     r0, r0, CCSRBAR_PHYS_RS12@l
 595        stw     r0, 0(r9)
 596        sync
 597
 598        /*
 599         * The manual says to perform a load of an address that does not
 600         * access configuration space or the on-chip SRAM using an existing TLB,
 601         * but that doesn't appear to be necessary.  We will do the isync,
 602         * though.
 603         */
 604        isync
 605
 606        /*
 607         * Read the contents of CCSRBAR from its new location, followed by
 608         * another isync.
 609         */
 610        lwz     r0, 0(r8)
 611        isync
 612
 613#endif  /* #ifdef CONFIG_FSL_CORENET */
 614
 615        /* Delete the temporary TLBs */
 616delete_temp_tlbs:
 617        lis     r0, FSL_BOOKE_MAS0(0, 0, 0)@h
 618        ori     r0, r0, FSL_BOOKE_MAS0(0, 0, 0)@l
 619        li      r1, 0
 620        lis     r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR, (MAS2_I|MAS2_G))@h
 621        ori     r2, r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR, (MAS2_I|MAS2_G))@l
 622        mtspr   MAS0, r0
 623        mtspr   MAS1, r1
 624        mtspr   MAS2, r2
 625        isync
 626        msync
 627        tlbwe
 628
 629        lis     r0, FSL_BOOKE_MAS0(0, 1, 0)@h
 630        ori     r0, r0, FSL_BOOKE_MAS0(0, 1, 0)@l
 631        lis     r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR + 0x1000, (MAS2_I|MAS2_G))@h
 632        ori     r2, r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR + 0x1000, (MAS2_I|MAS2_G))@l
 633        mtspr   MAS0, r0
 634        mtspr   MAS2, r2
 635        isync
 636        msync
 637        tlbwe
 638#endif /* #if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR_PHYS) */
 639
 640create_init_ram_area:
 641        lis     r6,FSL_BOOKE_MAS0(1, 15, 0)@h
 642        ori     r6,r6,FSL_BOOKE_MAS0(1, 15, 0)@l
 643
 644#if !defined(CONFIG_SYS_RAMBOOT) && !defined(CONFIG_SECURE_BOOT)
 645        /* create a temp mapping in AS=1 to the 4M boot window */
 646        lis     r7,FSL_BOOKE_MAS1(1, 1, 0, 1, BOOKE_PAGESZ_4M)@h
 647        ori     r7,r7,FSL_BOOKE_MAS1(1, 1, 0, 1, BOOKE_PAGESZ_4M)@l
 648
 649        lis     r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE & 0xffc00000, (MAS2_I|MAS2_G))@h
 650        ori     r8,r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE & 0xffc00000, (MAS2_I|MAS2_G))@l
 651
 652        /* The 85xx has the default boot window 0xff800000 - 0xffffffff */
 653        lis     r9,FSL_BOOKE_MAS3(0xffc00000, 0, (MAS3_SX|MAS3_SW|MAS3_SR))@h
 654        ori     r9,r9,FSL_BOOKE_MAS3(0xffc00000, 0, (MAS3_SX|MAS3_SW|MAS3_SR))@l
 655#elif !defined(CONFIG_SYS_RAMBOOT) && defined(CONFIG_SECURE_BOOT)
 656        /* create a temp mapping in AS = 1 for Flash mapping
 657         * created by PBL for ISBC code
 658        */
 659        lis     r7,FSL_BOOKE_MAS1(1, 1, 0, 1, BOOKE_PAGESZ_1M)@h
 660        ori     r7,r7,FSL_BOOKE_MAS1(1, 1, 0, 1, BOOKE_PAGESZ_1M)@l
 661
 662        lis     r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE, (MAS2_I|MAS2_G))@h
 663        ori     r8,r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE, (MAS2_I|MAS2_G))@l
 664
 665        lis     r9,FSL_BOOKE_MAS3(CONFIG_SYS_PBI_FLASH_WINDOW, 0,
 666                                                (MAS3_SX|MAS3_SW|MAS3_SR))@h
 667        ori     r9,r9,FSL_BOOKE_MAS3(CONFIG_SYS_PBI_FLASH_WINDOW, 0,
 668                                                (MAS3_SX|MAS3_SW|MAS3_SR))@l
 669#else
 670        /*
 671         * create a temp mapping in AS=1 to the 1M CONFIG_SYS_MONITOR_BASE space, the main
 672         * image has been relocated to CONFIG_SYS_MONITOR_BASE on the second stage.
 673         */
 674        lis     r7,FSL_BOOKE_MAS1(1, 1, 0, 1, BOOKE_PAGESZ_1M)@h
 675        ori     r7,r7,FSL_BOOKE_MAS1(1, 1, 0, 1, BOOKE_PAGESZ_1M)@l
 676
 677        lis     r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE, (MAS2_I|MAS2_G))@h
 678        ori     r8,r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE, (MAS2_I|MAS2_G))@l
 679
 680        lis     r9,FSL_BOOKE_MAS3(CONFIG_SYS_MONITOR_BASE, 0, (MAS3_SX|MAS3_SW|MAS3_SR))@h
 681        ori     r9,r9,FSL_BOOKE_MAS3(CONFIG_SYS_MONITOR_BASE, 0, (MAS3_SX|MAS3_SW|MAS3_SR))@l
 682#endif
 683
 684        mtspr   MAS0,r6
 685        mtspr   MAS1,r7
 686        mtspr   MAS2,r8
 687        mtspr   MAS3,r9
 688        isync
 689        msync
 690        tlbwe
 691
 692        /* create a temp mapping in AS=1 to the stack */
 693        lis     r6,FSL_BOOKE_MAS0(1, 14, 0)@h
 694        ori     r6,r6,FSL_BOOKE_MAS0(1, 14, 0)@l
 695
 696        lis     r7,FSL_BOOKE_MAS1(1, 1, 0, 1, BOOKE_PAGESZ_16K)@h
 697        ori     r7,r7,FSL_BOOKE_MAS1(1, 1, 0, 1, BOOKE_PAGESZ_16K)@l
 698
 699        lis     r8,FSL_BOOKE_MAS2(CONFIG_SYS_INIT_RAM_ADDR, 0)@h
 700        ori     r8,r8,FSL_BOOKE_MAS2(CONFIG_SYS_INIT_RAM_ADDR, 0)@l
 701
 702#if defined(CONFIG_SYS_INIT_RAM_ADDR_PHYS_LOW) && \
 703    defined(CONFIG_SYS_INIT_RAM_ADDR_PHYS_HIGH)
 704        lis     r9,FSL_BOOKE_MAS3(CONFIG_SYS_INIT_RAM_ADDR_PHYS_LOW, 0,
 705                                (MAS3_SX|MAS3_SW|MAS3_SR))@h
 706        ori     r9,r9,FSL_BOOKE_MAS3(CONFIG_SYS_INIT_RAM_ADDR_PHYS_LOW, 0,
 707                                (MAS3_SX|MAS3_SW|MAS3_SR))@l
 708        li      r10,CONFIG_SYS_INIT_RAM_ADDR_PHYS_HIGH
 709        mtspr   MAS7,r10
 710#else
 711        lis     r9,FSL_BOOKE_MAS3(CONFIG_SYS_INIT_RAM_ADDR, 0, (MAS3_SX|MAS3_SW|MAS3_SR))@h
 712        ori     r9,r9,FSL_BOOKE_MAS3(CONFIG_SYS_INIT_RAM_ADDR, 0, (MAS3_SX|MAS3_SW|MAS3_SR))@l
 713#endif
 714
 715        mtspr   MAS0,r6
 716        mtspr   MAS1,r7
 717        mtspr   MAS2,r8
 718        mtspr   MAS3,r9
 719        isync
 720        msync
 721        tlbwe
 722
 723        lis     r6,MSR_IS|MSR_DS@h
 724        ori     r6,r6,MSR_IS|MSR_DS@l
 725        lis     r7,switch_as@h
 726        ori     r7,r7,switch_as@l
 727
 728        mtspr   SPRN_SRR0,r7
 729        mtspr   SPRN_SRR1,r6
 730        rfi
 731
 732switch_as:
 733/* L1 DCache is used for initial RAM */
 734
 735        /* Allocate Initial RAM in data cache.
 736         */
 737        lis     r3,CONFIG_SYS_INIT_RAM_ADDR@h
 738        ori     r3,r3,CONFIG_SYS_INIT_RAM_ADDR@l
 739        mfspr   r2, L1CFG0
 740        andi.   r2, r2, 0x1ff
 741        /* cache size * 1024 / (2 * L1 line size) */
 742        slwi    r2, r2, (10 - 1 - L1_CACHE_SHIFT)
 743        mtctr   r2
 744        li      r0,0
 7451:
 746        dcbz    r0,r3
 747        dcbtls  0,r0,r3
 748        addi    r3,r3,CONFIG_SYS_CACHELINE_SIZE
 749        bdnz    1b
 750
 751        /* Jump out the last 4K page and continue to 'normal' start */
 752#ifdef CONFIG_SYS_RAMBOOT
 753        b       _start_cont
 754#else
 755        /* Calculate absolute address in FLASH and jump there           */
 756        /*--------------------------------------------------------------*/
 757        lis     r3,CONFIG_SYS_MONITOR_BASE@h
 758        ori     r3,r3,CONFIG_SYS_MONITOR_BASE@l
 759        addi    r3,r3,_start_cont - _start + _START_OFFSET
 760        mtlr    r3
 761        blr
 762#endif
 763
 764        .text
 765        .globl  _start
 766_start:
 767        .long   0x27051956              /* U-BOOT Magic Number */
 768        .globl  version_string
 769version_string:
 770        .ascii U_BOOT_VERSION_STRING, "\0"
 771
 772        .align  4
 773        .globl  _start_cont
 774_start_cont:
 775        /* Setup the stack in initial RAM,could be L2-as-SRAM or L1 dcache*/
 776        lis     r1,CONFIG_SYS_INIT_RAM_ADDR@h
 777        ori     r1,r1,CONFIG_SYS_INIT_SP_OFFSET@l
 778
 779        li      r0,0
 780        stwu    r0,-4(r1)
 781        stwu    r0,-4(r1)               /* Terminate call chain */
 782
 783        stwu    r1,-8(r1)               /* Save back chain and move SP */
 784        lis     r0,RESET_VECTOR@h       /* Address of reset vector */
 785        ori     r0,r0,RESET_VECTOR@l
 786        stwu    r1,-8(r1)               /* Save back chain and move SP */
 787        stw     r0,+12(r1)              /* Save return addr (underflow vect) */
 788
 789        GET_GOT
 790        bl      cpu_init_early_f
 791
 792        /* switch back to AS = 0 */
 793        lis     r3,(MSR_CE|MSR_ME|MSR_DE)@h
 794        ori     r3,r3,(MSR_CE|MSR_ME|MSR_DE)@l
 795        mtmsr   r3
 796        isync
 797
 798        bl      cpu_init_f
 799        bl      board_init_f
 800        isync
 801
 802        /* NOTREACHED - board_init_f() does not return */
 803
 804#ifndef CONFIG_NAND_SPL
 805        . = EXC_OFF_SYS_RESET
 806        .globl  _start_of_vectors
 807_start_of_vectors:
 808
 809/* Critical input. */
 810        CRIT_EXCEPTION(0x0100, CriticalInput, CritcalInputException)
 811
 812/* Machine check */
 813        MCK_EXCEPTION(0x200, MachineCheck, MachineCheckException)
 814
 815/* Data Storage exception. */
 816        STD_EXCEPTION(0x0300, DataStorage, UnknownException)
 817
 818/* Instruction Storage exception. */
 819        STD_EXCEPTION(0x0400, InstStorage, UnknownException)
 820
 821/* External Interrupt exception. */
 822        STD_EXCEPTION(0x0500, ExtInterrupt, ExtIntException)
 823
 824/* Alignment exception. */
 825        . = 0x0600
 826Alignment:
 827        EXCEPTION_PROLOG(SRR0, SRR1)
 828        mfspr   r4,DAR
 829        stw     r4,_DAR(r21)
 830        mfspr   r5,DSISR
 831        stw     r5,_DSISR(r21)
 832        addi    r3,r1,STACK_FRAME_OVERHEAD
 833        EXC_XFER_TEMPLATE(Alignment, AlignmentException, MSR_KERNEL, COPY_EE)
 834
 835/* Program check exception */
 836        . = 0x0700
 837ProgramCheck:
 838        EXCEPTION_PROLOG(SRR0, SRR1)
 839        addi    r3,r1,STACK_FRAME_OVERHEAD
 840        EXC_XFER_TEMPLATE(ProgramCheck, ProgramCheckException,
 841                MSR_KERNEL, COPY_EE)
 842
 843        /* No FPU on MPC85xx.  This exception is not supposed to happen.
 844        */
 845        STD_EXCEPTION(0x0800, FPUnavailable, UnknownException)
 846
 847        . = 0x0900
 848/*
 849 * r0 - SYSCALL number
 850 * r3-... arguments
 851 */
 852SystemCall:
 853        addis   r11,r0,0        /* get functions table addr */
 854        ori     r11,r11,0       /* Note: this code is patched in trap_init */
 855        addis   r12,r0,0        /* get number of functions */
 856        ori     r12,r12,0
 857
 858        cmplw   0,r0,r12
 859        bge     1f
 860
 861        rlwinm  r0,r0,2,0,31    /* fn_addr = fn_tbl[r0] */
 862        add     r11,r11,r0
 863        lwz     r11,0(r11)
 864
 865        li      r20,0xd00-4     /* Get stack pointer */
 866        lwz     r12,0(r20)
 867        subi    r12,r12,12      /* Adjust stack pointer */
 868        li      r0,0xc00+_end_back-SystemCall
 869        cmplw   0,r0,r12        /* Check stack overflow */
 870        bgt     1f
 871        stw     r12,0(r20)
 872
 873        mflr    r0
 874        stw     r0,0(r12)
 875        mfspr   r0,SRR0
 876        stw     r0,4(r12)
 877        mfspr   r0,SRR1
 878        stw     r0,8(r12)
 879
 880        li      r12,0xc00+_back-SystemCall
 881        mtlr    r12
 882        mtspr   SRR0,r11
 883
 8841:      SYNC
 885        rfi
 886_back:
 887
 888        mfmsr   r11                     /* Disable interrupts */
 889        li      r12,0
 890        ori     r12,r12,MSR_EE
 891        andc    r11,r11,r12
 892        SYNC                            /* Some chip revs need this... */
 893        mtmsr   r11
 894        SYNC
 895
 896        li      r12,0xd00-4             /* restore regs */
 897        lwz     r12,0(r12)
 898
 899        lwz     r11,0(r12)
 900        mtlr    r11
 901        lwz     r11,4(r12)
 902        mtspr   SRR0,r11
 903        lwz     r11,8(r12)
 904        mtspr   SRR1,r11
 905
 906        addi    r12,r12,12              /* Adjust stack pointer */
 907        li      r20,0xd00-4
 908        stw     r12,0(r20)
 909
 910        SYNC
 911        rfi
 912_end_back:
 913
 914        STD_EXCEPTION(0x0a00, Decrementer, timer_interrupt)
 915        STD_EXCEPTION(0x0b00, IntervalTimer, UnknownException)
 916        STD_EXCEPTION(0x0c00, WatchdogTimer, UnknownException)
 917
 918        STD_EXCEPTION(0x0d00, DataTLBError, UnknownException)
 919        STD_EXCEPTION(0x0e00, InstructionTLBError, UnknownException)
 920
 921        CRIT_EXCEPTION(0x0f00, DebugBreakpoint, DebugException )
 922
 923        .globl  _end_of_vectors
 924_end_of_vectors:
 925
 926
 927        . = . + (0x100 - ( . & 0xff ))  /* align for debug */
 928
 929/*
 930 * This code finishes saving the registers to the exception frame
 931 * and jumps to the appropriate handler for the exception.
 932 * Register r21 is pointer into trap frame, r1 has new stack pointer.
 933 */
 934        .globl  transfer_to_handler
 935transfer_to_handler:
 936        stw     r22,_NIP(r21)
 937        lis     r22,MSR_POW@h
 938        andc    r23,r23,r22
 939        stw     r23,_MSR(r21)
 940        SAVE_GPR(7, r21)
 941        SAVE_4GPRS(8, r21)
 942        SAVE_8GPRS(12, r21)
 943        SAVE_8GPRS(24, r21)
 944
 945        mflr    r23
 946        andi.   r24,r23,0x3f00          /* get vector offset */
 947        stw     r24,TRAP(r21)
 948        li      r22,0
 949        stw     r22,RESULT(r21)
 950        mtspr   SPRG2,r22               /* r1 is now kernel sp */
 951
 952        lwz     r24,0(r23)              /* virtual address of handler */
 953        lwz     r23,4(r23)              /* where to go when done */
 954        mtspr   SRR0,r24
 955        mtspr   SRR1,r20
 956        mtlr    r23
 957        SYNC
 958        rfi                             /* jump to handler, enable MMU */
 959
 960int_return:
 961        mfmsr   r28             /* Disable interrupts */
 962        li      r4,0
 963        ori     r4,r4,MSR_EE
 964        andc    r28,r28,r4
 965        SYNC                    /* Some chip revs need this... */
 966        mtmsr   r28
 967        SYNC
 968        lwz     r2,_CTR(r1)
 969        lwz     r0,_LINK(r1)
 970        mtctr   r2
 971        mtlr    r0
 972        lwz     r2,_XER(r1)
 973        lwz     r0,_CCR(r1)
 974        mtspr   XER,r2
 975        mtcrf   0xFF,r0
 976        REST_10GPRS(3, r1)
 977        REST_10GPRS(13, r1)
 978        REST_8GPRS(23, r1)
 979        REST_GPR(31, r1)
 980        lwz     r2,_NIP(r1)     /* Restore environment */
 981        lwz     r0,_MSR(r1)
 982        mtspr   SRR0,r2
 983        mtspr   SRR1,r0
 984        lwz     r0,GPR0(r1)
 985        lwz     r2,GPR2(r1)
 986        lwz     r1,GPR1(r1)
 987        SYNC
 988        rfi
 989
 990crit_return:
 991        mfmsr   r28             /* Disable interrupts */
 992        li      r4,0
 993        ori     r4,r4,MSR_EE
 994        andc    r28,r28,r4
 995        SYNC                    /* Some chip revs need this... */
 996        mtmsr   r28
 997        SYNC
 998        lwz     r2,_CTR(r1)
 999        lwz     r0,_LINK(r1)
1000        mtctr   r2
1001        mtlr    r0
1002        lwz     r2,_XER(r1)
1003        lwz     r0,_CCR(r1)
1004        mtspr   XER,r2
1005        mtcrf   0xFF,r0
1006        REST_10GPRS(3, r1)
1007        REST_10GPRS(13, r1)
1008        REST_8GPRS(23, r1)
1009        REST_GPR(31, r1)
1010        lwz     r2,_NIP(r1)     /* Restore environment */
1011        lwz     r0,_MSR(r1)
1012        mtspr   SPRN_CSRR0,r2
1013        mtspr   SPRN_CSRR1,r0
1014        lwz     r0,GPR0(r1)
1015        lwz     r2,GPR2(r1)
1016        lwz     r1,GPR1(r1)
1017        SYNC
1018        rfci
1019
1020mck_return:
1021        mfmsr   r28             /* Disable interrupts */
1022        li      r4,0
1023        ori     r4,r4,MSR_EE
1024        andc    r28,r28,r4
1025        SYNC                    /* Some chip revs need this... */
1026        mtmsr   r28
1027        SYNC
1028        lwz     r2,_CTR(r1)
1029        lwz     r0,_LINK(r1)
1030        mtctr   r2
1031        mtlr    r0
1032        lwz     r2,_XER(r1)
1033        lwz     r0,_CCR(r1)
1034        mtspr   XER,r2
1035        mtcrf   0xFF,r0
1036        REST_10GPRS(3, r1)
1037        REST_10GPRS(13, r1)
1038        REST_8GPRS(23, r1)
1039        REST_GPR(31, r1)
1040        lwz     r2,_NIP(r1)     /* Restore environment */
1041        lwz     r0,_MSR(r1)
1042        mtspr   SPRN_MCSRR0,r2
1043        mtspr   SPRN_MCSRR1,r0
1044        lwz     r0,GPR0(r1)
1045        lwz     r2,GPR2(r1)
1046        lwz     r1,GPR1(r1)
1047        SYNC
1048        rfmci
1049
1050/* Cache functions.
1051*/
1052.globl flush_icache
1053flush_icache:
1054.globl invalidate_icache
1055invalidate_icache:
1056        mfspr   r0,L1CSR1
1057        ori     r0,r0,L1CSR1_ICFI
1058        msync
1059        isync
1060        mtspr   L1CSR1,r0
1061        isync
1062        blr                             /* entire I cache */
1063
1064.globl invalidate_dcache
1065invalidate_dcache:
1066        mfspr   r0,L1CSR0
1067        ori     r0,r0,L1CSR0_DCFI
1068        msync
1069        isync
1070        mtspr   L1CSR0,r0
1071        isync
1072        blr
1073
1074        .globl  icache_enable
1075icache_enable:
1076        mflr    r8
1077        bl      invalidate_icache
1078        mtlr    r8
1079        isync
1080        mfspr   r4,L1CSR1
1081        ori     r4,r4,0x0001
1082        oris    r4,r4,0x0001
1083        mtspr   L1CSR1,r4
1084        isync
1085        blr
1086
1087        .globl  icache_disable
1088icache_disable:
1089        mfspr   r0,L1CSR1
1090        lis     r3,0
1091        ori     r3,r3,L1CSR1_ICE
1092        andc    r0,r0,r3
1093        mtspr   L1CSR1,r0
1094        isync
1095        blr
1096
1097        .globl  icache_status
1098icache_status:
1099        mfspr   r3,L1CSR1
1100        andi.   r3,r3,L1CSR1_ICE
1101        blr
1102
1103        .globl  dcache_enable
1104dcache_enable:
1105        mflr    r8
1106        bl      invalidate_dcache
1107        mtlr    r8
1108        isync
1109        mfspr   r0,L1CSR0
1110        ori     r0,r0,0x0001
1111        oris    r0,r0,0x0001
1112        msync
1113        isync
1114        mtspr   L1CSR0,r0
1115        isync
1116        blr
1117
1118        .globl  dcache_disable
1119dcache_disable:
1120        mfspr   r3,L1CSR0
1121        lis     r4,0
1122        ori     r4,r4,L1CSR0_DCE
1123        andc    r3,r3,r4
1124        mtspr   L1CSR0,r3
1125        isync
1126        blr
1127
1128        .globl  dcache_status
1129dcache_status:
1130        mfspr   r3,L1CSR0
1131        andi.   r3,r3,L1CSR0_DCE
1132        blr
1133
1134        .globl get_pir
1135get_pir:
1136        mfspr   r3,PIR
1137        blr
1138
1139        .globl get_pvr
1140get_pvr:
1141        mfspr   r3,PVR
1142        blr
1143
1144        .globl get_svr
1145get_svr:
1146        mfspr   r3,SVR
1147        blr
1148
1149        .globl wr_tcr
1150wr_tcr:
1151        mtspr   TCR,r3
1152        blr
1153
1154/*------------------------------------------------------------------------------- */
1155/* Function:     in8 */
1156/* Description:  Input 8 bits */
1157/*------------------------------------------------------------------------------- */
1158        .globl  in8
1159in8:
1160        lbz     r3,0x0000(r3)
1161        blr
1162
1163/*------------------------------------------------------------------------------- */
1164/* Function:     out8 */
1165/* Description:  Output 8 bits */
1166/*------------------------------------------------------------------------------- */
1167        .globl  out8
1168out8:
1169        stb     r4,0x0000(r3)
1170        sync
1171        blr
1172
1173/*------------------------------------------------------------------------------- */
1174/* Function:     out16 */
1175/* Description:  Output 16 bits */
1176/*------------------------------------------------------------------------------- */
1177        .globl  out16
1178out16:
1179        sth     r4,0x0000(r3)
1180        sync
1181        blr
1182
1183/*------------------------------------------------------------------------------- */
1184/* Function:     out16r */
1185/* Description:  Byte reverse and output 16 bits */
1186/*------------------------------------------------------------------------------- */
1187        .globl  out16r
1188out16r:
1189        sthbrx  r4,r0,r3
1190        sync
1191        blr
1192
1193/*------------------------------------------------------------------------------- */
1194/* Function:     out32 */
1195/* Description:  Output 32 bits */
1196/*------------------------------------------------------------------------------- */
1197        .globl  out32
1198out32:
1199        stw     r4,0x0000(r3)
1200        sync
1201        blr
1202
1203/*------------------------------------------------------------------------------- */
1204/* Function:     out32r */
1205/* Description:  Byte reverse and output 32 bits */
1206/*------------------------------------------------------------------------------- */
1207        .globl  out32r
1208out32r:
1209        stwbrx  r4,r0,r3
1210        sync
1211        blr
1212
1213/*------------------------------------------------------------------------------- */
1214/* Function:     in16 */
1215/* Description:  Input 16 bits */
1216/*------------------------------------------------------------------------------- */
1217        .globl  in16
1218in16:
1219        lhz     r3,0x0000(r3)
1220        blr
1221
1222/*------------------------------------------------------------------------------- */
1223/* Function:     in16r */
1224/* Description:  Input 16 bits and byte reverse */
1225/*------------------------------------------------------------------------------- */
1226        .globl  in16r
1227in16r:
1228        lhbrx   r3,r0,r3
1229        blr
1230
1231/*------------------------------------------------------------------------------- */
1232/* Function:     in32 */
1233/* Description:  Input 32 bits */
1234/*------------------------------------------------------------------------------- */
1235        .globl  in32
1236in32:
1237        lwz     3,0x0000(3)
1238        blr
1239
1240/*------------------------------------------------------------------------------- */
1241/* Function:     in32r */
1242/* Description:  Input 32 bits and byte reverse */
1243/*------------------------------------------------------------------------------- */
1244        .globl  in32r
1245in32r:
1246        lwbrx   r3,r0,r3
1247        blr
1248#endif  /* !CONFIG_NAND_SPL */
1249
1250/*------------------------------------------------------------------------------*/
1251
1252/*
1253 * void write_tlb(mas0, mas1, mas2, mas3, mas7)
1254 */
1255        .globl  write_tlb
1256write_tlb:
1257        mtspr   MAS0,r3
1258        mtspr   MAS1,r4
1259        mtspr   MAS2,r5
1260        mtspr   MAS3,r6
1261#ifdef CONFIG_ENABLE_36BIT_PHYS
1262        mtspr   MAS7,r7
1263#endif
1264        li      r3,0
1265#ifdef CONFIG_SYS_BOOK3E_HV
1266        mtspr   MAS8,r3
1267#endif
1268        isync
1269        tlbwe
1270        msync
1271        isync
1272        blr
1273
1274/*
1275 * void relocate_code (addr_sp, gd, addr_moni)
1276 *
1277 * This "function" does not return, instead it continues in RAM
1278 * after relocating the monitor code.
1279 *
1280 * r3 = dest
1281 * r4 = src
1282 * r5 = length in bytes
1283 * r6 = cachelinesize
1284 */
1285        .globl  relocate_code
1286relocate_code:
1287        mr      r1,r3           /* Set new stack pointer                */
1288        mr      r9,r4           /* Save copy of Init Data pointer       */
1289        mr      r10,r5          /* Save copy of Destination Address     */
1290
1291        GET_GOT
1292        mr      r3,r5                           /* Destination Address  */
1293        lis     r4,CONFIG_SYS_MONITOR_BASE@h            /* Source      Address  */
1294        ori     r4,r4,CONFIG_SYS_MONITOR_BASE@l
1295        lwz     r5,GOT(__init_end)
1296        sub     r5,r5,r4
1297        li      r6,CONFIG_SYS_CACHELINE_SIZE            /* Cache Line Size      */
1298
1299        /*
1300         * Fix GOT pointer:
1301         *
1302         * New GOT-PTR = (old GOT-PTR - CONFIG_SYS_MONITOR_BASE) + Destination Address
1303         *
1304         * Offset:
1305         */
1306        sub     r15,r10,r4
1307
1308        /* First our own GOT */
1309        add     r12,r12,r15
1310        /* the the one used by the C code */
1311        add     r30,r30,r15
1312
1313        /*
1314         * Now relocate code
1315         */
1316
1317        cmplw   cr1,r3,r4
1318        addi    r0,r5,3
1319        srwi.   r0,r0,2
1320        beq     cr1,4f          /* In place copy is not necessary       */
1321        beq     7f              /* Protect against 0 count              */
1322        mtctr   r0
1323        bge     cr1,2f
1324
1325        la      r8,-4(r4)
1326        la      r7,-4(r3)
13271:      lwzu    r0,4(r8)
1328        stwu    r0,4(r7)
1329        bdnz    1b
1330        b       4f
1331
13322:      slwi    r0,r0,2
1333        add     r8,r4,r0
1334        add     r7,r3,r0
13353:      lwzu    r0,-4(r8)
1336        stwu    r0,-4(r7)
1337        bdnz    3b
1338
1339/*
1340 * Now flush the cache: note that we must start from a cache aligned
1341 * address. Otherwise we might miss one cache line.
1342 */
13434:      cmpwi   r6,0
1344        add     r5,r3,r5
1345        beq     7f              /* Always flush prefetch queue in any case */
1346        subi    r0,r6,1
1347        andc    r3,r3,r0
1348        mr      r4,r3
13495:      dcbst   0,r4
1350        add     r4,r4,r6
1351        cmplw   r4,r5
1352        blt     5b
1353        sync                    /* Wait for all dcbst to complete on bus */
1354        mr      r4,r3
13556:      icbi    0,r4
1356        add     r4,r4,r6
1357        cmplw   r4,r5
1358        blt     6b
13597:      sync                    /* Wait for all icbi to complete on bus */
1360        isync
1361
1362        /*
1363         * Re-point the IVPR at RAM
1364         */
1365        mtspr   IVPR,r10
1366
1367/*
1368 * We are done. Do not return, instead branch to second part of board
1369 * initialization, now running from RAM.
1370 */
1371
1372        addi    r0,r10,in_ram - _start + _START_OFFSET
1373        mtlr    r0
1374        blr                             /* NEVER RETURNS! */
1375        .globl  in_ram
1376in_ram:
1377
1378        /*
1379         * Relocation Function, r12 point to got2+0x8000
1380         *
1381         * Adjust got2 pointers, no need to check for 0, this code
1382         * already puts a few entries in the table.
1383         */
1384        li      r0,__got2_entries@sectoff@l
1385        la      r3,GOT(_GOT2_TABLE_)
1386        lwz     r11,GOT(_GOT2_TABLE_)
1387        mtctr   r0
1388        sub     r11,r3,r11
1389        addi    r3,r3,-4
13901:      lwzu    r0,4(r3)
1391        cmpwi   r0,0
1392        beq-    2f
1393        add     r0,r0,r11
1394        stw     r0,0(r3)
13952:      bdnz    1b
1396
1397        /*
1398         * Now adjust the fixups and the pointers to the fixups
1399         * in case we need to move ourselves again.
1400         */
1401        li      r0,__fixup_entries@sectoff@l
1402        lwz     r3,GOT(_FIXUP_TABLE_)
1403        cmpwi   r0,0
1404        mtctr   r0
1405        addi    r3,r3,-4
1406        beq     4f
14073:      lwzu    r4,4(r3)
1408        lwzux   r0,r4,r11
1409        cmpwi   r0,0
1410        add     r0,r0,r11
1411        stw     r4,0(r3)
1412        beq-    5f
1413        stw     r0,0(r4)
14145:      bdnz    3b
14154:
1416clear_bss:
1417        /*
1418         * Now clear BSS segment
1419         */
1420        lwz     r3,GOT(__bss_start)
1421        lwz     r4,GOT(__bss_end__)
1422
1423        cmplw   0,r3,r4
1424        beq     6f
1425
1426        li      r0,0
14275:
1428        stw     r0,0(r3)
1429        addi    r3,r3,4
1430        cmplw   0,r3,r4
1431        bne     5b
14326:
1433
1434        mr      r3,r9           /* Init Data pointer            */
1435        mr      r4,r10          /* Destination Address          */
1436        bl      board_init_r
1437
1438#ifndef CONFIG_NAND_SPL
1439        /*
1440         * Copy exception vector code to low memory
1441         *
1442         * r3: dest_addr
1443         * r7: source address, r8: end address, r9: target address
1444         */
1445        .globl  trap_init
1446trap_init:
1447        mflr    r4                      /* save link register           */
1448        GET_GOT
1449        lwz     r7,GOT(_start_of_vectors)
1450        lwz     r8,GOT(_end_of_vectors)
1451
1452        li      r9,0x100                /* reset vector always at 0x100 */
1453
1454        cmplw   0,r7,r8
1455        bgelr                           /* return if r7>=r8 - just in case */
14561:
1457        lwz     r0,0(r7)
1458        stw     r0,0(r9)
1459        addi    r7,r7,4
1460        addi    r9,r9,4
1461        cmplw   0,r7,r8
1462        bne     1b
1463
1464        /*
1465         * relocate `hdlr' and `int_return' entries
1466         */
1467        li      r7,.L_CriticalInput - _start + _START_OFFSET
1468        bl      trap_reloc
1469        li      r7,.L_MachineCheck - _start + _START_OFFSET
1470        bl      trap_reloc
1471        li      r7,.L_DataStorage - _start + _START_OFFSET
1472        bl      trap_reloc
1473        li      r7,.L_InstStorage - _start + _START_OFFSET
1474        bl      trap_reloc
1475        li      r7,.L_ExtInterrupt - _start + _START_OFFSET
1476        bl      trap_reloc
1477        li      r7,.L_Alignment - _start + _START_OFFSET
1478        bl      trap_reloc
1479        li      r7,.L_ProgramCheck - _start + _START_OFFSET
1480        bl      trap_reloc
1481        li      r7,.L_FPUnavailable - _start + _START_OFFSET
1482        bl      trap_reloc
1483        li      r7,.L_Decrementer - _start + _START_OFFSET
1484        bl      trap_reloc
1485        li      r7,.L_IntervalTimer - _start + _START_OFFSET
1486        li      r8,_end_of_vectors - _start + _START_OFFSET
14872:
1488        bl      trap_reloc
1489        addi    r7,r7,0x100             /* next exception vector        */
1490        cmplw   0,r7,r8
1491        blt     2b
1492
1493        lis     r7,0x0
1494        mtspr   IVPR,r7
1495
1496        mtlr    r4                      /* restore link register        */
1497        blr
1498
1499.globl unlock_ram_in_cache
1500unlock_ram_in_cache:
1501        /* invalidate the INIT_RAM section */
1502        lis     r3,(CONFIG_SYS_INIT_RAM_ADDR & ~(CONFIG_SYS_CACHELINE_SIZE-1))@h
1503        ori     r3,r3,(CONFIG_SYS_INIT_RAM_ADDR & ~(CONFIG_SYS_CACHELINE_SIZE-1))@l
1504        mfspr   r4,L1CFG0
1505        andi.   r4,r4,0x1ff
1506        slwi    r4,r4,(10 - 1 - L1_CACHE_SHIFT)
1507        mtctr   r4
15081:      dcbi    r0,r3
1509        addi    r3,r3,CONFIG_SYS_CACHELINE_SIZE
1510        bdnz    1b
1511        sync
1512
1513        /* Invalidate the TLB entries for the cache */
1514        lis     r3,CONFIG_SYS_INIT_RAM_ADDR@h
1515        ori     r3,r3,CONFIG_SYS_INIT_RAM_ADDR@l
1516        tlbivax 0,r3
1517        addi    r3,r3,0x1000
1518        tlbivax 0,r3
1519        addi    r3,r3,0x1000
1520        tlbivax 0,r3
1521        addi    r3,r3,0x1000
1522        tlbivax 0,r3
1523        isync
1524        blr
1525
1526.globl flush_dcache
1527flush_dcache:
1528        mfspr   r3,SPRN_L1CFG0
1529
1530        rlwinm  r5,r3,9,3       /* Extract cache block size */
1531        twlgti  r5,1            /* Only 32 and 64 byte cache blocks
1532                                 * are currently defined.
1533                                 */
1534        li      r4,32
1535        subfic  r6,r5,2         /* r6 = log2(1KiB / cache block size) -
1536                                 *      log2(number of ways)
1537                                 */
1538        slw     r5,r4,r5        /* r5 = cache block size */
1539
1540        rlwinm  r7,r3,0,0xff    /* Extract number of KiB in the cache */
1541        mulli   r7,r7,13        /* An 8-way cache will require 13
1542                                 * loads per set.
1543                                 */
1544        slw     r7,r7,r6
1545
1546        /* save off HID0 and set DCFA */
1547        mfspr   r8,SPRN_HID0
1548        ori     r9,r8,HID0_DCFA@l
1549        mtspr   SPRN_HID0,r9
1550        isync
1551
1552        lis     r4,0
1553        mtctr   r7
1554
15551:      lwz     r3,0(r4)        /* Load... */
1556        add     r4,r4,r5
1557        bdnz    1b
1558
1559        msync
1560        lis     r4,0
1561        mtctr   r7
1562
15631:      dcbf    0,r4            /* ...and flush. */
1564        add     r4,r4,r5
1565        bdnz    1b
1566
1567        /* restore HID0 */
1568        mtspr   SPRN_HID0,r8
1569        isync
1570
1571        blr
1572
1573.globl setup_ivors
1574setup_ivors:
1575
1576#include "fixed_ivor.S"
1577        blr
1578#endif /* !CONFIG_NAND_SPL */
1579