uboot/arch/powerpc/cpu/mpc5xxx/start.S
<<
>>
Prefs
   1/*
   2 *  Copyright (C) 1998  Dan Malek <dmalek@jlc.net>
   3 *  Copyright (C) 1999  Magnus Damm <kieraypc01.p.y.kie.era.ericsson.se>
   4 *  Copyright (C) 2000 - 2003 Wolfgang Denk <wd@denx.de>
   5 *
   6 * See file CREDITS for list of people who contributed to this
   7 * project.
   8 *
   9 * This program is free software; you can redistribute it and/or
  10 * modify it under the terms of the GNU General Public License as
  11 * published by the Free Software Foundation; either version 2 of
  12 * the License, or (at your option) any later version.
  13 *
  14 * This program is distributed in the hope that it will be useful,
  15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  17 * GNU General Public License for more details.
  18 *
  19 * You should have received a copy of the GNU General Public License
  20 * along with this program; if not, write to the Free Software
  21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
  22 * MA 02111-1307 USA
  23 */
  24
  25/*
  26 *  U-Boot - Startup Code for MPC5xxx CPUs
  27 */
  28#include <asm-offsets.h>
  29#include <config.h>
  30#include <mpc5xxx.h>
  31#include <version.h>
  32
  33#define CONFIG_MPC5xxx 1        /* needed for Linux kernel header files */
  34#define _LINUX_CONFIG_H 1       /* avoid reading Linux autoconf.h file  */
  35
  36#include <ppc_asm.tmpl>
  37#include <ppc_defs.h>
  38
  39#include <asm/cache.h>
  40#include <asm/mmu.h>
  41#include <asm/u-boot.h>
  42
  43/* We don't want the  MMU yet.
  44*/
  45#undef  MSR_KERNEL
  46/* Floating Point enable, Machine Check and Recoverable Interr. */
  47#ifdef DEBUG
  48#define MSR_KERNEL (MSR_FP|MSR_RI)
  49#else
  50#define MSR_KERNEL (MSR_FP|MSR_ME|MSR_RI)
  51#endif
  52
  53/*
  54 * Set up GOT: Global Offset Table
  55 *
  56 * Use r12 to access the GOT
  57 */
  58        START_GOT
  59        GOT_ENTRY(_GOT2_TABLE_)
  60        GOT_ENTRY(_FIXUP_TABLE_)
  61
  62        GOT_ENTRY(_start)
  63        GOT_ENTRY(_start_of_vectors)
  64        GOT_ENTRY(_end_of_vectors)
  65        GOT_ENTRY(transfer_to_handler)
  66
  67        GOT_ENTRY(__init_end)
  68        GOT_ENTRY(__bss_end__)
  69        GOT_ENTRY(__bss_start)
  70        END_GOT
  71
  72/*
  73 * Version string
  74 */
  75        .data
  76        .globl  version_string
  77version_string:
  78        .ascii U_BOOT_VERSION_STRING, "\0"
  79
  80/*
  81 * Exception vectors
  82 */
  83        .text
  84        . = EXC_OFF_SYS_RESET
  85        .globl  _start
  86_start:
  87        mfmsr   r5                      /* save msr contents            */
  88
  89        /* Move CSBoot and adjust instruction pointer                   */
  90        /*--------------------------------------------------------------*/
  91
  92#if defined(CONFIG_SYS_LOWBOOT)
  93# if defined(CONFIG_SYS_RAMBOOT)
  94#  error CONFIG_SYS_LOWBOOT is incompatible with CONFIG_SYS_RAMBOOT
  95# endif /* CONFIG_SYS_RAMBOOT */
  96        lis     r4, CONFIG_SYS_DEFAULT_MBAR@h
  97        lis     r3,     START_REG(CONFIG_SYS_BOOTCS_START)@h
  98        ori     r3, r3, START_REG(CONFIG_SYS_BOOTCS_START)@l
  99        stw     r3, 0x4(r4)             /* CS0 start */
 100        lis     r3,     STOP_REG(CONFIG_SYS_BOOTCS_START, CONFIG_SYS_BOOTCS_SIZE)@h
 101        ori     r3, r3, STOP_REG(CONFIG_SYS_BOOTCS_START, CONFIG_SYS_BOOTCS_SIZE)@l
 102        stw     r3, 0x8(r4)             /* CS0 stop */
 103        lis     r3,     0x02010000@h
 104        ori     r3, r3, 0x02010000@l
 105        stw     r3, 0x54(r4)            /* CS0 and Boot enable */
 106
 107        lis     r3,     lowboot_reentry@h       /* jump from bootlow address space (0x0000xxxx) */
 108        ori     r3, r3, lowboot_reentry@l       /* to the address space the linker used */
 109        mtlr    r3
 110        blr
 111
 112lowboot_reentry:
 113        lis     r3,     START_REG(CONFIG_SYS_BOOTCS_START)@h
 114        ori     r3, r3, START_REG(CONFIG_SYS_BOOTCS_START)@l
 115        stw     r3, 0x4c(r4)            /* Boot start */
 116        lis     r3,     STOP_REG(CONFIG_SYS_BOOTCS_START, CONFIG_SYS_BOOTCS_SIZE)@h
 117        ori     r3, r3, STOP_REG(CONFIG_SYS_BOOTCS_START, CONFIG_SYS_BOOTCS_SIZE)@l
 118        stw     r3, 0x50(r4)            /* Boot stop */
 119        lis     r3,     0x02000001@h
 120        ori     r3, r3, 0x02000001@l
 121        stw     r3, 0x54(r4)            /* Boot enable, CS0 disable */
 122#endif  /* CONFIG_SYS_LOWBOOT */
 123
 124#if defined(CONFIG_SYS_DEFAULT_MBAR) && !defined(CONFIG_SYS_RAMBOOT)
 125        lis     r3, CONFIG_SYS_MBAR@h
 126        ori     r3, r3, CONFIG_SYS_MBAR@l
 127        /* MBAR is mirrored into the MBAR SPR */
 128        mtspr   MBAR,r3
 129        rlwinm  r3, r3, 16, 16, 31
 130        lis     r4, CONFIG_SYS_DEFAULT_MBAR@h
 131        stw     r3, 0(r4)
 132#endif /* CONFIG_SYS_DEFAULT_MBAR */
 133
 134        /* Initialise the MPC5xxx processor core                        */
 135        /*--------------------------------------------------------------*/
 136
 137        bl      init_5xxx_core
 138
 139        /* initialize some things that are hard to access from C        */
 140        /*--------------------------------------------------------------*/
 141
 142        /* set up stack in on-chip SRAM */
 143        lis     r3, CONFIG_SYS_INIT_RAM_ADDR@h
 144        ori     r3, r3, CONFIG_SYS_INIT_RAM_ADDR@l
 145        ori     r1, r3, CONFIG_SYS_INIT_SP_OFFSET
 146        li      r0, 0                   /* Make room for stack frame header and */
 147        stwu    r0, -4(r1)              /* clear final stack frame so that      */
 148        stwu    r0, -4(r1)              /* stack backtraces terminate cleanly   */
 149
 150        /* let the C-code set up the rest                               */
 151        /*                                                              */
 152        /* Be careful to keep code relocatable !                        */
 153        /*--------------------------------------------------------------*/
 154
 155        GET_GOT                 /* initialize GOT access                */
 156
 157        /* r3: IMMR */
 158        bl      cpu_init_f      /* run low-level CPU init code (in Flash)*/
 159
 160        bl      board_init_f    /* run 1st part of board init code (in Flash)*/
 161
 162        /* NOTREACHED - board_init_f() does not return */
 163
 164/*
 165 * Vector Table
 166 */
 167
 168        .globl  _start_of_vectors
 169_start_of_vectors:
 170
 171/* Machine check */
 172        STD_EXCEPTION(0x200, MachineCheck, MachineCheckException)
 173
 174/* Data Storage exception. */
 175        STD_EXCEPTION(0x300, DataStorage, UnknownException)
 176
 177/* Instruction Storage exception. */
 178        STD_EXCEPTION(0x400, InstStorage, UnknownException)
 179
 180/* External Interrupt exception. */
 181        STD_EXCEPTION(0x500, ExtInterrupt, external_interrupt)
 182
 183/* Alignment exception. */
 184        . = 0x600
 185Alignment:
 186        EXCEPTION_PROLOG(SRR0, SRR1)
 187        mfspr   r4,DAR
 188        stw     r4,_DAR(r21)
 189        mfspr   r5,DSISR
 190        stw     r5,_DSISR(r21)
 191        addi    r3,r1,STACK_FRAME_OVERHEAD
 192        EXC_XFER_TEMPLATE(Alignment, AlignmentException, MSR_KERNEL, COPY_EE)
 193
 194/* Program check exception */
 195        . = 0x700
 196ProgramCheck:
 197        EXCEPTION_PROLOG(SRR0, SRR1)
 198        addi    r3,r1,STACK_FRAME_OVERHEAD
 199        EXC_XFER_TEMPLATE(ProgramCheck, ProgramCheckException,
 200                MSR_KERNEL, COPY_EE)
 201
 202        STD_EXCEPTION(0x800, FPUnavailable, UnknownException)
 203
 204        /* I guess we could implement decrementer, and may have
 205         * to someday for timekeeping.
 206         */
 207        STD_EXCEPTION(0x900, Decrementer, timer_interrupt)
 208
 209        STD_EXCEPTION(0xa00, Trap_0a, UnknownException)
 210        STD_EXCEPTION(0xb00, Trap_0b, UnknownException)
 211        STD_EXCEPTION(0xc00, SystemCall, UnknownException)
 212        STD_EXCEPTION(0xd00, SingleStep, UnknownException)
 213
 214        STD_EXCEPTION(0xe00, Trap_0e, UnknownException)
 215        STD_EXCEPTION(0xf00, Trap_0f, UnknownException)
 216
 217        STD_EXCEPTION(0x1000, InstructionTLBMiss, UnknownException)
 218        STD_EXCEPTION(0x1100, DataLoadTLBMiss, UnknownException)
 219        STD_EXCEPTION(0x1200, DataStoreTLBMiss, UnknownException)
 220#ifdef DEBUG
 221        . = 0x1300
 222        /*
 223         * This exception occurs when the program counter matches the
 224         * Instruction Address Breakpoint Register (IABR).
 225         *
 226         * I want the cpu to halt if this occurs so I can hunt around
 227         * with the debugger and look at things.
 228         *
 229         * When DEBUG is defined, both machine check enable (in the MSR)
 230         * and checkstop reset enable (in the reset mode register) are
 231         * turned off and so a checkstop condition will result in the cpu
 232         * halting.
 233         *
 234         * I force the cpu into a checkstop condition by putting an illegal
 235         * instruction here (at least this is the theory).
 236         *
 237         * well - that didnt work, so just do an infinite loop!
 238         */
 2391:      b       1b
 240#else
 241        STD_EXCEPTION(0x1300, InstructionBreakpoint, DebugException)
 242#endif
 243        STD_EXCEPTION(0x1400, SMI, UnknownException)
 244
 245        STD_EXCEPTION(0x1500, Trap_15, UnknownException)
 246        STD_EXCEPTION(0x1600, Trap_16, UnknownException)
 247        STD_EXCEPTION(0x1700, Trap_17, UnknownException)
 248        STD_EXCEPTION(0x1800, Trap_18, UnknownException)
 249        STD_EXCEPTION(0x1900, Trap_19, UnknownException)
 250        STD_EXCEPTION(0x1a00, Trap_1a, UnknownException)
 251        STD_EXCEPTION(0x1b00, Trap_1b, UnknownException)
 252        STD_EXCEPTION(0x1c00, Trap_1c, UnknownException)
 253        STD_EXCEPTION(0x1d00, Trap_1d, UnknownException)
 254        STD_EXCEPTION(0x1e00, Trap_1e, UnknownException)
 255        STD_EXCEPTION(0x1f00, Trap_1f, UnknownException)
 256        STD_EXCEPTION(0x2000, Trap_20, UnknownException)
 257        STD_EXCEPTION(0x2100, Trap_21, UnknownException)
 258        STD_EXCEPTION(0x2200, Trap_22, UnknownException)
 259        STD_EXCEPTION(0x2300, Trap_23, UnknownException)
 260        STD_EXCEPTION(0x2400, Trap_24, UnknownException)
 261        STD_EXCEPTION(0x2500, Trap_25, UnknownException)
 262        STD_EXCEPTION(0x2600, Trap_26, UnknownException)
 263        STD_EXCEPTION(0x2700, Trap_27, UnknownException)
 264        STD_EXCEPTION(0x2800, Trap_28, UnknownException)
 265        STD_EXCEPTION(0x2900, Trap_29, UnknownException)
 266        STD_EXCEPTION(0x2a00, Trap_2a, UnknownException)
 267        STD_EXCEPTION(0x2b00, Trap_2b, UnknownException)
 268        STD_EXCEPTION(0x2c00, Trap_2c, UnknownException)
 269        STD_EXCEPTION(0x2d00, Trap_2d, UnknownException)
 270        STD_EXCEPTION(0x2e00, Trap_2e, UnknownException)
 271        STD_EXCEPTION(0x2f00, Trap_2f, UnknownException)
 272
 273
 274        .globl  _end_of_vectors
 275_end_of_vectors:
 276
 277        . = 0x3000
 278
 279/*
 280 * This code finishes saving the registers to the exception frame
 281 * and jumps to the appropriate handler for the exception.
 282 * Register r21 is pointer into trap frame, r1 has new stack pointer.
 283 */
 284        .globl  transfer_to_handler
 285transfer_to_handler:
 286        stw     r22,_NIP(r21)
 287        lis     r22,MSR_POW@h
 288        andc    r23,r23,r22
 289        stw     r23,_MSR(r21)
 290        SAVE_GPR(7, r21)
 291        SAVE_4GPRS(8, r21)
 292        SAVE_8GPRS(12, r21)
 293        SAVE_8GPRS(24, r21)
 294        mflr    r23
 295        andi.   r24,r23,0x3f00          /* get vector offset */
 296        stw     r24,TRAP(r21)
 297        li      r22,0
 298        stw     r22,RESULT(r21)
 299        lwz     r24,0(r23)              /* virtual address of handler */
 300        lwz     r23,4(r23)              /* where to go when done */
 301        mtspr   SRR0,r24
 302        mtspr   SRR1,r20
 303        mtlr    r23
 304        SYNC
 305        rfi                             /* jump to handler, enable MMU */
 306
 307int_return:
 308        mfmsr   r28             /* Disable interrupts */
 309        li      r4,0
 310        ori     r4,r4,MSR_EE
 311        andc    r28,r28,r4
 312        SYNC                    /* Some chip revs need this... */
 313        mtmsr   r28
 314        SYNC
 315        lwz     r2,_CTR(r1)
 316        lwz     r0,_LINK(r1)
 317        mtctr   r2
 318        mtlr    r0
 319        lwz     r2,_XER(r1)
 320        lwz     r0,_CCR(r1)
 321        mtspr   XER,r2
 322        mtcrf   0xFF,r0
 323        REST_10GPRS(3, r1)
 324        REST_10GPRS(13, r1)
 325        REST_8GPRS(23, r1)
 326        REST_GPR(31, r1)
 327        lwz     r2,_NIP(r1)     /* Restore environment */
 328        lwz     r0,_MSR(r1)
 329        mtspr   SRR0,r2
 330        mtspr   SRR1,r0
 331        lwz     r0,GPR0(r1)
 332        lwz     r2,GPR2(r1)
 333        lwz     r1,GPR1(r1)
 334        SYNC
 335        rfi
 336
 337/*
 338 * This code initialises the MPC5xxx processor core
 339 * (conforms to PowerPC 603e spec)
 340 * Note: expects original MSR contents to be in r5.
 341 */
 342
 343        .globl  init_5xx_core
 344init_5xxx_core:
 345
 346        /* Initialize machine status; enable machine check interrupt    */
 347        /*--------------------------------------------------------------*/
 348
 349        li      r3, MSR_KERNEL          /* Set ME and RI flags */
 350        rlwimi  r3, r5, 0, 25, 25       /* preserve IP bit set by HRCW */
 351#ifdef DEBUG
 352        rlwimi  r3, r5, 0, 21, 22       /* debugger might set SE & BE bits */
 353#endif
 354        SYNC                            /* Some chip revs need this... */
 355        mtmsr   r3
 356        SYNC
 357        mtspr   SRR1, r3                /* Make SRR1 match MSR */
 358
 359        /* Initialize the Hardware Implementation-dependent Registers   */
 360        /* HID0 also contains cache control                             */
 361        /*--------------------------------------------------------------*/
 362
 363        lis     r3, CONFIG_SYS_HID0_INIT@h
 364        ori     r3, r3, CONFIG_SYS_HID0_INIT@l
 365        SYNC
 366        mtspr   HID0, r3
 367
 368        lis     r3, CONFIG_SYS_HID0_FINAL@h
 369        ori     r3, r3, CONFIG_SYS_HID0_FINAL@l
 370        SYNC
 371        mtspr   HID0, r3
 372
 373        /* clear all BAT's                                              */
 374        /*--------------------------------------------------------------*/
 375
 376        li      r0, 0
 377        mtspr   DBAT0U, r0
 378        mtspr   DBAT0L, r0
 379        mtspr   DBAT1U, r0
 380        mtspr   DBAT1L, r0
 381        mtspr   DBAT2U, r0
 382        mtspr   DBAT2L, r0
 383        mtspr   DBAT3U, r0
 384        mtspr   DBAT3L, r0
 385        mtspr   DBAT4U, r0
 386        mtspr   DBAT4L, r0
 387        mtspr   DBAT5U, r0
 388        mtspr   DBAT5L, r0
 389        mtspr   DBAT6U, r0
 390        mtspr   DBAT6L, r0
 391        mtspr   DBAT7U, r0
 392        mtspr   DBAT7L, r0
 393        mtspr   IBAT0U, r0
 394        mtspr   IBAT0L, r0
 395        mtspr   IBAT1U, r0
 396        mtspr   IBAT1L, r0
 397        mtspr   IBAT2U, r0
 398        mtspr   IBAT2L, r0
 399        mtspr   IBAT3U, r0
 400        mtspr   IBAT3L, r0
 401        mtspr   IBAT4U, r0
 402        mtspr   IBAT4L, r0
 403        mtspr   IBAT5U, r0
 404        mtspr   IBAT5L, r0
 405        mtspr   IBAT6U, r0
 406        mtspr   IBAT6L, r0
 407        mtspr   IBAT7U, r0
 408        mtspr   IBAT7L, r0
 409        SYNC
 410
 411        /* invalidate all tlb's                                         */
 412        /*                                                              */
 413        /* From the 603e User Manual: "The 603e provides the ability to */
 414        /* invalidate a TLB entry. The TLB Invalidate Entry (tlbie)     */
 415        /* instruction invalidates the TLB entry indexed by the EA, and */
 416        /* operates on both the instruction and data TLBs simultaneously*/
 417        /* invalidating four TLB entries (both sets in each TLB). The   */
 418        /* index corresponds to bits 15-19 of the EA. To invalidate all */
 419        /* entries within both TLBs, 32 tlbie instructions should be    */
 420        /* issued, incrementing this field by one each time."           */
 421        /*                                                              */
 422        /* "Note that the tlbia instruction is not implemented on the   */
 423        /* 603e."                                                       */
 424        /*                                                              */
 425        /* bits 15-19 correspond to addresses 0x00000000 to 0x0001F000  */
 426        /* incrementing by 0x1000 each time. The code below is sort of  */
 427        /* based on code in "flush_tlbs" from arch/powerpc/kernel/head.S        */
 428        /*                                                              */
 429        /*--------------------------------------------------------------*/
 430
 431        li      r3, 32
 432        mtctr   r3
 433        li      r3, 0
 4341:      tlbie   r3
 435        addi    r3, r3, 0x1000
 436        bdnz    1b
 437        SYNC
 438
 439        /* Done!                                                        */
 440        /*--------------------------------------------------------------*/
 441
 442        blr
 443
 444/* Cache functions.
 445 *
 446 * Note: requires that all cache bits in
 447 * HID0 are in the low half word.
 448 */
 449        .globl  icache_enable
 450icache_enable:
 451        mfspr   r3, HID0
 452        ori     r3, r3, HID0_ICE
 453        lis     r4, 0
 454        ori     r4, r4, HID0_ILOCK
 455        andc    r3, r3, r4
 456        ori     r4, r3, HID0_ICFI
 457        isync
 458        mtspr   HID0, r4        /* sets enable and invalidate, clears lock */
 459        isync
 460        mtspr   HID0, r3        /* clears invalidate */
 461        blr
 462
 463        .globl  icache_disable
 464icache_disable:
 465        mfspr   r3, HID0
 466        lis     r4, 0
 467        ori     r4, r4, HID0_ICE|HID0_ILOCK
 468        andc    r3, r3, r4
 469        ori     r4, r3, HID0_ICFI
 470        isync
 471        mtspr   HID0, r4        /* sets invalidate, clears enable and lock */
 472        isync
 473        mtspr   HID0, r3        /* clears invalidate */
 474        blr
 475
 476        .globl  icache_status
 477icache_status:
 478        mfspr   r3, HID0
 479        rlwinm  r3, r3, HID0_ICE_BITPOS + 1, 31, 31
 480        blr
 481
 482        .globl  dcache_enable
 483dcache_enable:
 484        mfspr   r3, HID0
 485        ori     r3, r3, HID0_DCE
 486        lis     r4, 0
 487        ori     r4, r4, HID0_DLOCK
 488        andc    r3, r3, r4
 489        ori     r4, r3, HID0_DCI
 490        sync
 491        mtspr   HID0, r4        /* sets enable and invalidate, clears lock */
 492        sync
 493        mtspr   HID0, r3        /* clears invalidate */
 494        blr
 495
 496        .globl  dcache_disable
 497dcache_disable:
 498        mfspr   r3, HID0
 499        lis     r4, 0
 500        ori     r4, r4, HID0_DCE|HID0_DLOCK
 501        andc    r3, r3, r4
 502        ori     r4, r3, HID0_DCI
 503        sync
 504        mtspr   HID0, r4        /* sets invalidate, clears enable and lock */
 505        sync
 506        mtspr   HID0, r3        /* clears invalidate */
 507        blr
 508
 509        .globl  dcache_status
 510dcache_status:
 511        mfspr   r3, HID0
 512        rlwinm  r3, r3, HID0_DCE_BITPOS + 1, 31, 31
 513        blr
 514
 515        .globl get_svr
 516get_svr:
 517        mfspr   r3, SVR
 518        blr
 519
 520        .globl get_pvr
 521get_pvr:
 522        mfspr   r3, PVR
 523        blr
 524
 525/*------------------------------------------------------------------------------*/
 526
 527/*
 528 * void relocate_code (addr_sp, gd, addr_moni)
 529 *
 530 * This "function" does not return, instead it continues in RAM
 531 * after relocating the monitor code.
 532 *
 533 * r3 = dest
 534 * r4 = src
 535 * r5 = length in bytes
 536 * r6 = cachelinesize
 537 */
 538        .globl  relocate_code
 539relocate_code:
 540        mr      r1,  r3         /* Set new stack pointer                */
 541        mr      r9,  r4         /* Save copy of Global Data pointer     */
 542        mr      r10, r5         /* Save copy of Destination Address     */
 543
 544        GET_GOT
 545        mr      r3,  r5                         /* Destination Address  */
 546        lis     r4, CONFIG_SYS_MONITOR_BASE@h           /* Source      Address  */
 547        ori     r4, r4, CONFIG_SYS_MONITOR_BASE@l
 548        lwz     r5, GOT(__init_end)
 549        sub     r5, r5, r4
 550        li      r6, CONFIG_SYS_CACHELINE_SIZE           /* Cache Line Size      */
 551
 552        /*
 553         * Fix GOT pointer:
 554         *
 555         * New GOT-PTR = (old GOT-PTR - CONFIG_SYS_MONITOR_BASE) + Destination Address
 556         *
 557         * Offset:
 558         */
 559        sub     r15, r10, r4
 560
 561        /* First our own GOT */
 562        add     r12, r12, r15
 563        /* then the one used by the C code */
 564        add     r30, r30, r15
 565
 566        /*
 567         * Now relocate code
 568         */
 569
 570        cmplw   cr1,r3,r4
 571        addi    r0,r5,3
 572        srwi.   r0,r0,2
 573        beq     cr1,4f          /* In place copy is not necessary       */
 574        beq     7f              /* Protect against 0 count              */
 575        mtctr   r0
 576        bge     cr1,2f
 577
 578        la      r8,-4(r4)
 579        la      r7,-4(r3)
 5801:      lwzu    r0,4(r8)
 581        stwu    r0,4(r7)
 582        bdnz    1b
 583        b       4f
 584
 5852:      slwi    r0,r0,2
 586        add     r8,r4,r0
 587        add     r7,r3,r0
 5883:      lwzu    r0,-4(r8)
 589        stwu    r0,-4(r7)
 590        bdnz    3b
 591
 592/*
 593 * Now flush the cache: note that we must start from a cache aligned
 594 * address. Otherwise we might miss one cache line.
 595 */
 5964:      cmpwi   r6,0
 597        add     r5,r3,r5
 598        beq     7f              /* Always flush prefetch queue in any case */
 599        subi    r0,r6,1
 600        andc    r3,r3,r0
 601        mfspr   r7,HID0         /* don't do dcbst if dcache is disabled */
 602        rlwinm  r7,r7,HID0_DCE_BITPOS+1,31,31
 603        cmpwi   r7,0
 604        beq     9f
 605        mr      r4,r3
 6065:      dcbst   0,r4
 607        add     r4,r4,r6
 608        cmplw   r4,r5
 609        blt     5b
 610        sync                    /* Wait for all dcbst to complete on bus */
 6119:      mfspr   r7,HID0         /* don't do icbi if icache is disabled */
 612        rlwinm  r7,r7,HID0_ICE_BITPOS+1,31,31
 613        cmpwi   r7,0
 614        beq     7f
 615        mr      r4,r3
 6166:      icbi    0,r4
 617        add     r4,r4,r6
 618        cmplw   r4,r5
 619        blt     6b
 6207:      sync                    /* Wait for all icbi to complete on bus */
 621        isync
 622
 623/*
 624 * We are done. Do not return, instead branch to second part of board
 625 * initialization, now running from RAM.
 626 */
 627
 628        addi    r0, r10, in_ram - _start + EXC_OFF_SYS_RESET
 629        mtlr    r0
 630        blr
 631
 632in_ram:
 633
 634        /*
 635         * Relocation Function, r12 point to got2+0x8000
 636         *
 637         * Adjust got2 pointers, no need to check for 0, this code
 638         * already puts a few entries in the table.
 639         */
 640        li      r0,__got2_entries@sectoff@l
 641        la      r3,GOT(_GOT2_TABLE_)
 642        lwz     r11,GOT(_GOT2_TABLE_)
 643        mtctr   r0
 644        sub     r11,r3,r11
 645        addi    r3,r3,-4
 6461:      lwzu    r0,4(r3)
 647        cmpwi   r0,0
 648        beq-    2f
 649        add     r0,r0,r11
 650        stw     r0,0(r3)
 6512:      bdnz    1b
 652
 653        /*
 654         * Now adjust the fixups and the pointers to the fixups
 655         * in case we need to move ourselves again.
 656         */
 657        li      r0,__fixup_entries@sectoff@l
 658        lwz     r3,GOT(_FIXUP_TABLE_)
 659        cmpwi   r0,0
 660        mtctr   r0
 661        addi    r3,r3,-4
 662        beq     4f
 6633:      lwzu    r4,4(r3)
 664        lwzux   r0,r4,r11
 665        cmpwi   r0,0
 666        add     r0,r0,r11
 667        stw     r4,0(r3)
 668        beq-    5f
 669        stw     r0,0(r4)
 6705:      bdnz    3b
 6714:
 672clear_bss:
 673        /*
 674         * Now clear BSS segment
 675         */
 676        lwz     r3,GOT(__bss_start)
 677        lwz     r4,GOT(__bss_end__)
 678
 679        cmplw   0, r3, r4
 680        beq     6f
 681
 682        li      r0, 0
 6835:
 684        stw     r0, 0(r3)
 685        addi    r3, r3, 4
 686        cmplw   0, r3, r4
 687        bne     5b
 6886:
 689
 690        mr      r3, r9          /* Global Data pointer          */
 691        mr      r4, r10         /* Destination Address          */
 692        bl      board_init_r
 693
 694        /*
 695         * Copy exception vector code to low memory
 696         *
 697         * r3: dest_addr
 698         * r7: source address, r8: end address, r9: target address
 699         */
 700        .globl  trap_init
 701trap_init:
 702        mflr    r4                      /* save link register           */
 703        GET_GOT
 704        lwz     r7, GOT(_start)
 705        lwz     r8, GOT(_end_of_vectors)
 706
 707        li      r9, 0x100               /* reset vector always at 0x100 */
 708
 709        cmplw   0, r7, r8
 710        bgelr                           /* return if r7>=r8 - just in case */
 7111:
 712        lwz     r0, 0(r7)
 713        stw     r0, 0(r9)
 714        addi    r7, r7, 4
 715        addi    r9, r9, 4
 716        cmplw   0, r7, r8
 717        bne     1b
 718
 719        /*
 720         * relocate `hdlr' and `int_return' entries
 721         */
 722        li      r7, .L_MachineCheck - _start + EXC_OFF_SYS_RESET
 723        li      r8, Alignment - _start + EXC_OFF_SYS_RESET
 7242:
 725        bl      trap_reloc
 726        addi    r7, r7, 0x100           /* next exception vector        */
 727        cmplw   0, r7, r8
 728        blt     2b
 729
 730        li      r7, .L_Alignment - _start + EXC_OFF_SYS_RESET
 731        bl      trap_reloc
 732
 733        li      r7, .L_ProgramCheck - _start + EXC_OFF_SYS_RESET
 734        bl      trap_reloc
 735
 736        li      r7, .L_FPUnavailable - _start + EXC_OFF_SYS_RESET
 737        li      r8, SystemCall - _start + EXC_OFF_SYS_RESET
 7383:
 739        bl      trap_reloc
 740        addi    r7, r7, 0x100           /* next exception vector        */
 741        cmplw   0, r7, r8
 742        blt     3b
 743
 744        li      r7, .L_SingleStep - _start + EXC_OFF_SYS_RESET
 745        li      r8, _end_of_vectors - _start + EXC_OFF_SYS_RESET
 7464:
 747        bl      trap_reloc
 748        addi    r7, r7, 0x100           /* next exception vector        */
 749        cmplw   0, r7, r8
 750        blt     4b
 751
 752        mfmsr   r3                      /* now that the vectors have    */
 753        lis     r7, MSR_IP@h            /* relocated into low memory    */
 754        ori     r7, r7, MSR_IP@l        /* MSR[IP] can be turned off    */
 755        andc    r3, r3, r7              /* (if it was on)               */
 756        SYNC                            /* Some chip revs need this... */
 757        mtmsr   r3
 758        SYNC
 759
 760        mtlr    r4                      /* restore link register    */
 761        blr
 762