uboot/arch/powerpc/cpu/mpc85xx/release.S
<<
>>
Prefs
   1/*
   2 * Copyright 2008-2012 Freescale Semiconductor, Inc.
   3 * Kumar Gala <kumar.gala@freescale.com>
   4 *
   5 * SPDX-License-Identifier:     GPL-2.0+
   6 */
   7
   8#include <asm-offsets.h>
   9#include <config.h>
  10#include <mpc85xx.h>
  11
  12#include <ppc_asm.tmpl>
  13#include <ppc_defs.h>
  14
  15#include <asm/cache.h>
  16#include <asm/mmu.h>
  17
  18/* To boot secondary cpus, we need a place for them to start up.
  19 * Normally, they start at 0xfffffffc, but that's usually the
  20 * firmware, and we don't want to have to run the firmware again.
  21 * Instead, the primary cpu will set the BPTR to point here to
  22 * this page.  We then set up the core, and head to
  23 * start_secondary.  Note that this means that the code below
  24 * must never exceed 1023 instructions (the branch at the end
  25 * would then be the 1024th).
  26 */
  27        .globl  __secondary_start_page
  28        .align  12
  29__secondary_start_page:
  30/* First do some preliminary setup */
  31        lis     r3, HID0_EMCP@h         /* enable machine check */
  32#ifndef CONFIG_E500MC
  33        ori     r3,r3,HID0_TBEN@l       /* enable Timebase */
  34#endif
  35#ifdef CONFIG_PHYS_64BIT
  36        ori     r3,r3,HID0_ENMAS7@l     /* enable MAS7 updates */
  37#endif
  38        mtspr   SPRN_HID0,r3
  39
  40#ifndef CONFIG_E500MC
  41        li      r3,(HID1_ASTME|HID1_ABE)@l      /* Addr streaming & broadcast */
  42        mfspr   r0,PVR
  43        andi.   r0,r0,0xff
  44        cmpwi   r0,0x50@l       /* if we are rev 5.0 or greater set MBDD */
  45        blt 1f
  46        /* Set MBDD bit also */
  47        ori r3, r3, HID1_MBDD@l
  481:
  49        mtspr   SPRN_HID1,r3
  50#endif
  51
  52#ifdef CONFIG_SYS_FSL_ERRATUM_CPU_A003999
  53        mfspr   r3,SPRN_HDBCR1
  54        oris    r3,r3,0x0100
  55        mtspr   SPRN_HDBCR1,r3
  56#endif
  57
  58#ifdef CONFIG_SYS_FSL_ERRATUM_A004510
  59        mfspr   r3,SPRN_SVR
  60        rlwinm  r3,r3,0,0xff
  61        li      r4,CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV
  62        cmpw    r3,r4
  63        beq     1f
  64
  65#ifdef CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV2
  66        li      r4,CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV2
  67        cmpw    r3,r4
  68        beq     1f
  69#endif
  70
  71        /* Not a supported revision affected by erratum */
  72        b       2f
  73
  741:      /* Erratum says set bits 55:60 to 001001 */
  75        msync
  76        isync
  77        mfspr   r3,SPRN_HDBCR0
  78        li      r4,0x48
  79        rlwimi  r3,r4,0,0x1f8
  80        mtspr   SPRN_HDBCR0,r3
  81        isync
  822:
  83#endif
  84
  85        /* Enable branch prediction */
  86        lis     r3,BUCSR_ENABLE@h
  87        ori     r3,r3,BUCSR_ENABLE@l
  88        mtspr   SPRN_BUCSR,r3
  89
  90        /* Ensure TB is 0 */
  91        li      r3,0
  92        mttbl   r3
  93        mttbu   r3
  94
  95        /* Enable/invalidate the I-Cache */
  96        lis     r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@h
  97        ori     r2,r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@l
  98        mtspr   SPRN_L1CSR1,r2
  991:
 100        mfspr   r3,SPRN_L1CSR1
 101        and.    r1,r3,r2
 102        bne     1b
 103
 104        lis     r3,(L1CSR1_CPE|L1CSR1_ICE)@h
 105        ori     r3,r3,(L1CSR1_CPE|L1CSR1_ICE)@l
 106        mtspr   SPRN_L1CSR1,r3
 107        isync
 1082:
 109        mfspr   r3,SPRN_L1CSR1
 110        andi.   r1,r3,L1CSR1_ICE@l
 111        beq     2b
 112
 113        /* Enable/invalidate the D-Cache */
 114        lis     r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@h
 115        ori     r2,r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@l
 116        mtspr   SPRN_L1CSR0,r2
 1171:
 118        mfspr   r3,SPRN_L1CSR0
 119        and.    r1,r3,r2
 120        bne     1b
 121
 122        lis     r3,(L1CSR0_CPE|L1CSR0_DCE)@h
 123        ori     r3,r3,(L1CSR0_CPE|L1CSR0_DCE)@l
 124        mtspr   SPRN_L1CSR0,r3
 125        isync
 1262:
 127        mfspr   r3,SPRN_L1CSR0
 128        andi.   r1,r3,L1CSR0_DCE@l
 129        beq     2b
 130
 131#define toreset(x) (x - __secondary_start_page + 0xfffff000)
 132
 133        /* get our PIR to figure out our table entry */
 134        lis     r3,toreset(__spin_table_addr)@h
 135        ori     r3,r3,toreset(__spin_table_addr)@l
 136        lwz     r3,0(r3)
 137
 138        mfspr   r0,SPRN_PIR
 139#ifdef CONFIG_SYS_FSL_QORIQ_CHASSIS2
 140/*
 141 * PIR definition for Chassis 2
 142 * 0-17 Reserved (logic 0s)
 143 * 18-19 CHIP_ID,    2'b00      - SoC 1
 144 *                  all others - reserved
 145 * 20-24 CLUSTER_ID 5'b00000   - CCM 1
 146 *                  all others - reserved
 147 * 25-26 CORE_CLUSTER_ID 2'b00 - cluster 1
 148 *                       2'b01 - cluster 2
 149 *                       2'b10 - cluster 3
 150 *                       2'b11 - cluster 4
 151 * 27-28 CORE_ID         2'b00 - core 0
 152 *                       2'b01 - core 1
 153 *                       2'b10 - core 2
 154 *                       2'b11 - core 3
 155 * 29-31 THREAD_ID       3'b000 - thread 0
 156 *                       3'b001 - thread 1
 157 *
 158 * Power-on PIR increments threads by 0x01, cores within a cluster by 0x08
 159 * and clusters by 0x20.
 160 *
 161 * We renumber PIR so that all threads in the system are consecutive.
 162 */
 163
 164        rlwinm  r8,r0,29,0x03   /* r8 = core within cluster */
 165        srwi    r10,r0,5        /* r10 = cluster */
 166
 167        mulli   r5,r10,CONFIG_SYS_FSL_CORES_PER_CLUSTER
 168        add     r5,r5,r8        /* for spin table index */
 169        mulli   r4,r5,CONFIG_SYS_FSL_THREADS_PER_CORE   /* for PIR */
 170#elif   defined(CONFIG_E500MC)
 171        rlwinm  r4,r0,27,27,31
 172        mr      r5,r4
 173#else
 174        mr      r4,r0
 175        mr      r5,r4
 176#endif
 177
 178        /*
 179         * r10 has the base address for the entry.
 180         * we cannot access it yet before setting up a new TLB
 181         */
 182        slwi    r8,r5,6 /* spin table is padded to 64 byte */
 183        add     r10,r3,r8
 184
 185        mtspr   SPRN_PIR,r4     /* write to PIR register */
 186
 187#ifdef CONFIG_SYS_CACHE_STASHING
 188        /* set stash id to (coreID) * 2 + 32 + L1 CT (0) */
 189        slwi    r8,r4,1
 190        addi    r8,r8,32
 191        mtspr   L1CSR2,r8
 192#endif
 193
 194#if defined(CONFIG_SYS_P4080_ERRATUM_CPU22) || \
 195        defined(CONFIG_SYS_FSL_ERRATUM_NMG_CPU_A011)
 196        /*
 197         * CPU22 applies to P4080 rev 1.0, 2.0, fixed in 3.0
 198         * NMG_CPU_A011 applies to P4080 rev 1.0, 2.0, fixed in 3.0
 199         * also appleis to P3041 rev 1.0, 1.1, P2041 rev 1.0, 1.1
 200         */
 201        mfspr   r3,SPRN_SVR
 202        rlwinm  r6,r3,24,~0x800         /* clear E bit */
 203
 204        lis     r5,SVR_P4080@h
 205        ori     r5,r5,SVR_P4080@l
 206        cmpw    r6,r5
 207        bne     1f
 208
 209        rlwinm  r3,r3,0,0xf0
 210        li      r5,0x30
 211        cmpw    r3,r5
 212        bge     2f
 2131:
 214#ifdef  CONFIG_SYS_FSL_ERRATUM_NMG_CPU_A011
 215        lis     r3,toreset(enable_cpu_a011_workaround)@ha
 216        lwz     r3,toreset(enable_cpu_a011_workaround)@l(r3)
 217        cmpwi   r3,0
 218        beq     2f
 219#endif
 220        mfspr   r3,L1CSR2
 221        oris    r3,r3,(L1CSR2_DCWS)@h
 222        mtspr   L1CSR2,r3
 2232:
 224#endif
 225
 226#ifdef CONFIG_SYS_FSL_ERRATUM_A005812
 227        /*
 228         * A-005812 workaround sets bit 32 of SPR 976 for SoCs running in
 229         * write shadow mode. This code should run after other code setting
 230         * DCWS.
 231         */
 232        mfspr   r3,L1CSR2
 233        andis.  r3,r3,(L1CSR2_DCWS)@h
 234        beq     1f
 235        mfspr   r3, SPRN_HDBCR0
 236        oris    r3, r3, 0x8000
 237        mtspr   SPRN_HDBCR0, r3
 2381:
 239#endif
 240
 241#ifdef CONFIG_BACKSIDE_L2_CACHE
 242        /* skip L2 setup on P2040/P2040E as they have no L2 */
 243        mfspr   r3,SPRN_SVR
 244        rlwinm  r6,r3,24,~0x800         /* clear E bit of SVR */
 245
 246        lis     r3,SVR_P2040@h
 247        ori     r3,r3,SVR_P2040@l
 248        cmpw    r6,r3
 249        beq 3f
 250
 251        /* Enable/invalidate the L2 cache */
 252        msync
 253        lis     r2,(L2CSR0_L2FI|L2CSR0_L2LFC)@h
 254        ori     r2,r2,(L2CSR0_L2FI|L2CSR0_L2LFC)@l
 255        mtspr   SPRN_L2CSR0,r2
 2561:
 257        mfspr   r3,SPRN_L2CSR0
 258        and.    r1,r3,r2
 259        bne     1b
 260
 261#ifdef CONFIG_SYS_CACHE_STASHING
 262        /* set stash id to (coreID) * 2 + 32 + L2 (1) */
 263        addi    r3,r8,1
 264        mtspr   SPRN_L2CSR1,r3
 265#endif
 266
 267        lis     r3,CONFIG_SYS_INIT_L2CSR0@h
 268        ori     r3,r3,CONFIG_SYS_INIT_L2CSR0@l
 269        mtspr   SPRN_L2CSR0,r3
 270        isync
 2712:
 272        mfspr   r3,SPRN_L2CSR0
 273        andis.  r1,r3,L2CSR0_L2E@h
 274        beq     2b
 275#endif
 2763:
 277        /* setup mapping for the spin table, WIMGE=0b00100 */
 278        lis     r13,toreset(__spin_table_addr)@h
 279        ori     r13,r13,toreset(__spin_table_addr)@l
 280        lwz     r13,0(r13)
 281        /* mask by 4K */
 282        rlwinm  r13,r13,0,0,19
 283
 284        lis     r11,(MAS0_TLBSEL(1)|MAS0_ESEL(1))@h
 285        mtspr   SPRN_MAS0,r11
 286        lis     r11,(MAS1_VALID|MAS1_IPROT)@h
 287        ori     r11,r11,(MAS1_TS|MAS1_TSIZE(BOOKE_PAGESZ_4K))@l
 288        mtspr   SPRN_MAS1,r11
 289        oris    r11,r13,(MAS2_M|MAS2_G)@h
 290        ori     r11,r13,(MAS2_M|MAS2_G)@l
 291        mtspr   SPRN_MAS2,r11
 292        oris    r11,r13,(MAS3_SX|MAS3_SW|MAS3_SR)@h
 293        ori     r11,r13,(MAS3_SX|MAS3_SW|MAS3_SR)@l
 294        mtspr   SPRN_MAS3,r11
 295        li      r11,0
 296        mtspr   SPRN_MAS7,r11
 297        tlbwe
 298
 299        /*
 300         * __bootpg_addr has the address of __second_half_boot_page
 301         * jump there in AS=1 space with cache enabled
 302         */
 303        lis     r13,toreset(__bootpg_addr)@h
 304        ori     r13,r13,toreset(__bootpg_addr)@l
 305        lwz     r11,0(r13)
 306        mtspr   SPRN_SRR0,r11
 307        mfmsr   r13
 308        ori     r12,r13,MSR_IS|MSR_DS@l
 309        mtspr   SPRN_SRR1,r12
 310        rfi
 311
 312        /*
 313         * Allocate some space for the SDRAM address of the bootpg.
 314         * This variable has to be in the boot page so that it can
 315         * be accessed by secondary cores when they come out of reset.
 316         */
 317        .align L1_CACHE_SHIFT
 318        .globl __bootpg_addr
 319__bootpg_addr:
 320        .long   0
 321
 322        .global __spin_table_addr
 323__spin_table_addr:
 324        .long   0
 325
 326        /*
 327         * This variable is set by cpu_init_r() after parsing hwconfig
 328         * to enable workaround for erratum NMG_CPU_A011.
 329         */
 330        .align L1_CACHE_SHIFT
 331        .global enable_cpu_a011_workaround
 332enable_cpu_a011_workaround:
 333        .long   1
 334
 335        /* Fill in the empty space.  The actual reset vector is
 336         * the last word of the page */
 337__secondary_start_code_end:
 338        .space 4092 - (__secondary_start_code_end - __secondary_start_page)
 339__secondary_reset_vector:
 340        b       __secondary_start_page
 341
 342
 343/* this is a separated page for the spin table and cacheable boot code */
 344        .align L1_CACHE_SHIFT
 345        .global __second_half_boot_page
 346__second_half_boot_page:
 347#ifdef CONFIG_PPC_SPINTABLE_COMPATIBLE
 348        lis     r3,(spin_table_compat - __second_half_boot_page)@h
 349        ori     r3,r3,(spin_table_compat - __second_half_boot_page)@l
 350        add     r3,r3,r11 /* r11 has the address of __second_half_boot_page */
 351        lwz     r14,0(r3)
 352#endif
 353
 354#define ENTRY_ADDR_UPPER        0
 355#define ENTRY_ADDR_LOWER        4
 356#define ENTRY_R3_UPPER          8
 357#define ENTRY_R3_LOWER          12
 358#define ENTRY_RESV              16
 359#define ENTRY_PIR               20
 360#define ENTRY_SIZE              64
 361        /*
 362         * setup the entry
 363         * r10 has the base address of the spin table.
 364         * spin table is defined as
 365         * struct {
 366         *      uint64_t entry_addr;
 367         *      uint64_t r3;
 368         *      uint32_t rsvd1;
 369         *      uint32_t pir;
 370         * };
 371         * we pad this struct to 64 bytes so each entry is in its own cacheline
 372         */
 373        li      r3,0
 374        li      r8,1
 375        mfspr   r4,SPRN_PIR
 376        stw     r3,ENTRY_ADDR_UPPER(r10)
 377        stw     r3,ENTRY_R3_UPPER(r10)
 378        stw     r4,ENTRY_R3_LOWER(r10)
 379        stw     r3,ENTRY_RESV(r10)
 380        stw     r4,ENTRY_PIR(r10)
 381        msync
 382        stw     r8,ENTRY_ADDR_LOWER(r10)
 383
 384        /* spin waiting for addr */
 3853:
 386/*
 387 * To comply with ePAPR 1.1, the spin table has been moved to cache-enabled
 388 * memory. Old OS may not work with this change. A patch is waiting to be
 389 * accepted for Linux kernel. Other OS needs similar fix to spin table.
 390 * For OSes with old spin table code, we can enable this temporary fix by
 391 * setting environmental variable "spin_table_compat". For new OSes, set
 392 * "spin_table_compat=no". After Linux is fixed, we can remove this macro
 393 * and related code. For now, it is enabled by default.
 394 */
 395#ifdef CONFIG_PPC_SPINTABLE_COMPATIBLE
 396        cmpwi   r14,0
 397        beq     4f
 398        dcbf    0, r10
 399        sync
 4004:
 401#endif
 402        lwz     r4,ENTRY_ADDR_LOWER(r10)
 403        andi.   r11,r4,1
 404        bne     3b
 405        isync
 406
 407        /* get the upper bits of the addr */
 408        lwz     r11,ENTRY_ADDR_UPPER(r10)
 409
 410        /* setup branch addr */
 411        mtspr   SPRN_SRR0,r4
 412
 413        /* mark the entry as released */
 414        li      r8,3
 415        stw     r8,ENTRY_ADDR_LOWER(r10)
 416
 417        /* mask by ~64M to setup our tlb we will jump to */
 418        rlwinm  r12,r4,0,0,5
 419
 420        /*
 421         * setup r3, r4, r5, r6, r7, r8, r9
 422         * r3 contains the value to put in the r3 register at secondary cpu
 423         * entry. The high 32-bits are ignored on 32-bit chip implementations.
 424         * 64-bit chip implementations however shall load all 64-bits
 425         */
 426#ifdef CONFIG_SYS_PPC64
 427        ld      r3,ENTRY_R3_UPPER(r10)
 428#else
 429        lwz     r3,ENTRY_R3_LOWER(r10)
 430#endif
 431        li      r4,0
 432        li      r5,0
 433        li      r6,0
 434        lis     r7,(64*1024*1024)@h
 435        li      r8,0
 436        li      r9,0
 437
 438        /* load up the pir */
 439        lwz     r0,ENTRY_PIR(r10)
 440        mtspr   SPRN_PIR,r0
 441        mfspr   r0,SPRN_PIR
 442        stw     r0,ENTRY_PIR(r10)
 443
 444        mtspr   IVPR,r12
 445/*
 446 * Coming here, we know the cpu has one TLB mapping in TLB1[0]
 447 * which maps 0xfffff000-0xffffffff one-to-one.  We set up a
 448 * second mapping that maps addr 1:1 for 64M, and then we jump to
 449 * addr
 450 */
 451        lis     r10,(MAS0_TLBSEL(1)|MAS0_ESEL(0))@h
 452        mtspr   SPRN_MAS0,r10
 453        lis     r10,(MAS1_VALID|MAS1_IPROT)@h
 454        ori     r10,r10,(MAS1_TSIZE(BOOKE_PAGESZ_64M))@l
 455        mtspr   SPRN_MAS1,r10
 456        /* WIMGE = 0b00000 for now */
 457        mtspr   SPRN_MAS2,r12
 458        ori     r12,r12,(MAS3_SX|MAS3_SW|MAS3_SR)
 459        mtspr   SPRN_MAS3,r12
 460#ifdef CONFIG_ENABLE_36BIT_PHYS
 461        mtspr   SPRN_MAS7,r11
 462#endif
 463        tlbwe
 464
 465/* Now we have another mapping for this page, so we jump to that
 466 * mapping
 467 */
 468        mtspr   SPRN_SRR1,r13
 469        rfi
 470
 471
 472        .align 6
 473        .globl __spin_table
 474__spin_table:
 475        .space CONFIG_MAX_CPUS*ENTRY_SIZE
 476
 477#ifdef CONFIG_PPC_SPINTABLE_COMPATIBLE
 478        .align L1_CACHE_SHIFT
 479        .global spin_table_compat
 480spin_table_compat:
 481        .long   1
 482
 483#endif
 484
 485__spin_table_end:
 486        .space 4096 - (__spin_table_end - __spin_table)
 487