linux/arch/powerpc/kernel/cpu_setup_6xx.S
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/*
   3 * This file contains low level CPU setup functions.
   4 *    Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org)
   5 */
   6
   7#include <asm/processor.h>
   8#include <asm/page.h>
   9#include <asm/cputable.h>
  10#include <asm/ppc_asm.h>
  11#include <asm/asm-offsets.h>
  12#include <asm/cache.h>
  13#include <asm/mmu.h>
  14#include <asm/feature-fixups.h>
  15
  16_GLOBAL(__setup_cpu_603)
  17        mflr    r5
  18BEGIN_MMU_FTR_SECTION
  19        li      r10,0
  20        mtspr   SPRN_SPRG_603_LRU,r10           /* init SW LRU tracking */
  21END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
  22
  23BEGIN_FTR_SECTION
  24        bl      __init_fpu_registers
  25END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE)
  26        bl      setup_common_caches
  27        mtlr    r5
  28        blr
  29_GLOBAL(__setup_cpu_604)
  30        mflr    r5
  31        bl      setup_common_caches
  32        bl      setup_604_hid0
  33        mtlr    r5
  34        blr
  35_GLOBAL(__setup_cpu_750)
  36        mflr    r5
  37        bl      __init_fpu_registers
  38        bl      setup_common_caches
  39        bl      setup_750_7400_hid0
  40        mtlr    r5
  41        blr
  42_GLOBAL(__setup_cpu_750cx)
  43        mflr    r5
  44        bl      __init_fpu_registers
  45        bl      setup_common_caches
  46        bl      setup_750_7400_hid0
  47        bl      setup_750cx
  48        mtlr    r5
  49        blr
  50_GLOBAL(__setup_cpu_750fx)
  51        mflr    r5
  52        bl      __init_fpu_registers
  53        bl      setup_common_caches
  54        bl      setup_750_7400_hid0
  55        bl      setup_750fx
  56        mtlr    r5
  57        blr
  58_GLOBAL(__setup_cpu_7400)
  59        mflr    r5
  60        bl      __init_fpu_registers
  61        bl      setup_7400_workarounds
  62        bl      setup_common_caches
  63        bl      setup_750_7400_hid0
  64        mtlr    r5
  65        blr
  66_GLOBAL(__setup_cpu_7410)
  67        mflr    r5
  68        bl      __init_fpu_registers
  69        bl      setup_7410_workarounds
  70        bl      setup_common_caches
  71        bl      setup_750_7400_hid0
  72        li      r3,0
  73        mtspr   SPRN_L2CR2,r3
  74        mtlr    r5
  75        blr
  76_GLOBAL(__setup_cpu_745x)
  77        mflr    r5
  78        bl      setup_common_caches
  79        bl      setup_745x_specifics
  80        mtlr    r5
  81        blr
  82
  83/* Enable caches for 603's, 604, 750 & 7400 */
  84setup_common_caches:
  85        mfspr   r11,SPRN_HID0
  86        andi.   r0,r11,HID0_DCE
  87        ori     r11,r11,HID0_ICE|HID0_DCE
  88        ori     r8,r11,HID0_ICFI
  89        bne     1f                      /* don't invalidate the D-cache */
  90        ori     r8,r8,HID0_DCI          /* unless it wasn't enabled */
  911:      sync
  92        mtspr   SPRN_HID0,r8            /* enable and invalidate caches */
  93        sync
  94        mtspr   SPRN_HID0,r11           /* enable caches */
  95        sync
  96        isync
  97        blr
  98
  99/* 604, 604e, 604ev, ...
 100 * Enable superscalar execution & branch history table
 101 */
 102setup_604_hid0:
 103        mfspr   r11,SPRN_HID0
 104        ori     r11,r11,HID0_SIED|HID0_BHTE
 105        ori     r8,r11,HID0_BTCD
 106        sync
 107        mtspr   SPRN_HID0,r8    /* flush branch target address cache */
 108        sync                    /* on 604e/604r */
 109        mtspr   SPRN_HID0,r11
 110        sync
 111        isync
 112        blr
 113
 114/* 7400 <= rev 2.7 and 7410 rev = 1.0 suffer from some
 115 * erratas we work around here.
 116 * Moto MPC710CE.pdf describes them, those are errata
 117 * #3, #4 and #5
 118 * Note that we assume the firmware didn't choose to
 119 * apply other workarounds (there are other ones documented
 120 * in the .pdf). It appear that Apple firmware only works
 121 * around #3 and with the same fix we use. We may want to
 122 * check if the CPU is using 60x bus mode in which case
 123 * the workaround for errata #4 is useless. Also, we may
 124 * want to explicitly clear HID0_NOPDST as this is not
 125 * needed once we have applied workaround #5 (though it's
 126 * not set by Apple's firmware at least).
 127 */
 128setup_7400_workarounds:
 129        mfpvr   r3
 130        rlwinm  r3,r3,0,20,31
 131        cmpwi   0,r3,0x0207
 132        ble     1f
 133        blr
 134setup_7410_workarounds:
 135        mfpvr   r3
 136        rlwinm  r3,r3,0,20,31
 137        cmpwi   0,r3,0x0100
 138        bnelr
 1391:
 140        mfspr   r11,SPRN_MSSSR0
 141        /* Errata #3: Set L1OPQ_SIZE to 0x10 */
 142        rlwinm  r11,r11,0,9,6
 143        oris    r11,r11,0x0100
 144        /* Errata #4: Set L2MQ_SIZE to 1 (check for MPX mode first ?) */
 145        oris    r11,r11,0x0002
 146        /* Errata #5: Set DRLT_SIZE to 0x01 */
 147        rlwinm  r11,r11,0,5,2
 148        oris    r11,r11,0x0800
 149        sync
 150        mtspr   SPRN_MSSSR0,r11
 151        sync
 152        isync
 153        blr
 154
 155/* 740/750/7400/7410
 156 * Enable Store Gathering (SGE), Address Broadcast (ABE),
 157 * Branch History Table (BHTE), Branch Target ICache (BTIC)
 158 * Dynamic Power Management (DPM), Speculative (SPD)
 159 * Clear Instruction cache throttling (ICTC)
 160 */
 161setup_750_7400_hid0:
 162        mfspr   r11,SPRN_HID0
 163        ori     r11,r11,HID0_SGE | HID0_ABE | HID0_BHTE | HID0_BTIC
 164        oris    r11,r11,HID0_DPM@h
 165BEGIN_FTR_SECTION
 166        xori    r11,r11,HID0_BTIC
 167END_FTR_SECTION_IFSET(CPU_FTR_NO_BTIC)
 168BEGIN_FTR_SECTION
 169        xoris   r11,r11,HID0_DPM@h      /* disable dynamic power mgmt */
 170END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM)
 171        li      r3,HID0_SPD
 172        andc    r11,r11,r3              /* clear SPD: enable speculative */
 173        li      r3,0
 174        mtspr   SPRN_ICTC,r3            /* Instruction Cache Throttling off */
 175        isync
 176        mtspr   SPRN_HID0,r11
 177        sync
 178        isync
 179        blr
 180
 181/* 750cx specific
 182 * Looks like we have to disable NAP feature for some PLL settings...
 183 * (waiting for confirmation)
 184 */
 185setup_750cx:
 186        mfspr   r10, SPRN_HID1
 187        rlwinm  r10,r10,4,28,31
 188        cmpwi   cr0,r10,7
 189        cmpwi   cr1,r10,9
 190        cmpwi   cr2,r10,11
 191        cror    4*cr0+eq,4*cr0+eq,4*cr1+eq
 192        cror    4*cr0+eq,4*cr0+eq,4*cr2+eq
 193        bnelr
 194        lwz     r6,CPU_SPEC_FEATURES(r4)
 195        li      r7,CPU_FTR_CAN_NAP
 196        andc    r6,r6,r7
 197        stw     r6,CPU_SPEC_FEATURES(r4)
 198        blr
 199
 200/* 750fx specific
 201 */
 202setup_750fx:
 203        blr
 204
 205/* MPC 745x
 206 * Enable Store Gathering (SGE), Branch Folding (FOLD)
 207 * Branch History Table (BHTE), Branch Target ICache (BTIC)
 208 * Dynamic Power Management (DPM), Speculative (SPD)
 209 * Ensure our data cache instructions really operate.
 210 * Timebase has to be running or we wouldn't have made it here,
 211 * just ensure we don't disable it.
 212 * Clear Instruction cache throttling (ICTC)
 213 * Enable L2 HW prefetch
 214 */
 215setup_745x_specifics:
 216        /* We check for the presence of an L3 cache setup by
 217         * the firmware. If any, we disable NAP capability as
 218         * it's known to be bogus on rev 2.1 and earlier
 219         */
 220BEGIN_FTR_SECTION
 221        mfspr   r11,SPRN_L3CR
 222        andis.  r11,r11,L3CR_L3E@h
 223        beq     1f
 224END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
 225        lwz     r6,CPU_SPEC_FEATURES(r4)
 226        andis.  r0,r6,CPU_FTR_L3_DISABLE_NAP@h
 227        beq     1f
 228        li      r7,CPU_FTR_CAN_NAP
 229        andc    r6,r6,r7
 230        stw     r6,CPU_SPEC_FEATURES(r4)
 2311:
 232        mfspr   r11,SPRN_HID0
 233
 234        /* All of the bits we have to set.....
 235         */
 236        ori     r11,r11,HID0_SGE | HID0_FOLD | HID0_BHTE
 237        ori     r11,r11,HID0_LRSTK | HID0_BTIC
 238        oris    r11,r11,HID0_DPM@h
 239BEGIN_MMU_FTR_SECTION
 240        oris    r11,r11,HID0_HIGH_BAT@h
 241END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
 242BEGIN_FTR_SECTION
 243        xori    r11,r11,HID0_BTIC
 244END_FTR_SECTION_IFSET(CPU_FTR_NO_BTIC)
 245BEGIN_FTR_SECTION
 246        xoris   r11,r11,HID0_DPM@h      /* disable dynamic power mgmt */
 247END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM)
 248
 249        /* All of the bits we have to clear....
 250         */
 251        li      r3,HID0_SPD | HID0_NOPDST | HID0_NOPTI
 252        andc    r11,r11,r3              /* clear SPD: enable speculative */
 253        li      r3,0
 254
 255        mtspr   SPRN_ICTC,r3            /* Instruction Cache Throttling off */
 256        isync
 257        mtspr   SPRN_HID0,r11
 258        sync
 259        isync
 260
 261        /* Enable L2 HW prefetch, if L2 is enabled
 262         */
 263        mfspr   r3,SPRN_L2CR
 264        andis.  r3,r3,L2CR_L2E@h
 265        beqlr
 266        mfspr   r3,SPRN_MSSCR0
 267        ori     r3,r3,3
 268        sync
 269        mtspr   SPRN_MSSCR0,r3
 270        sync
 271        isync
 272        blr
 273
 274/*
 275 * Initialize the FPU registers. This is needed to work around an errata
 276 * in some 750 cpus where using a not yet initialized FPU register after
 277 * power on reset may hang the CPU
 278 */
 279_GLOBAL(__init_fpu_registers)
 280        mfmsr   r10
 281        ori     r11,r10,MSR_FP
 282        mtmsr   r11
 283        isync
 284        addis   r9,r3,empty_zero_page@ha
 285        addi    r9,r9,empty_zero_page@l
 286        REST_32FPRS(0,r9)
 287        sync
 288        mtmsr   r10
 289        isync
 290        blr
 291
 292
 293/* Definitions for the table use to save CPU states */
 294#define CS_HID0         0
 295#define CS_HID1         4
 296#define CS_HID2         8
 297#define CS_MSSCR0       12
 298#define CS_MSSSR0       16
 299#define CS_ICTRL        20
 300#define CS_LDSTCR       24
 301#define CS_LDSTDB       28
 302#define CS_SIZE         32
 303
 304        .data
 305        .balign L1_CACHE_BYTES
 306cpu_state_storage:
 307        .space  CS_SIZE
 308        .balign L1_CACHE_BYTES,0
 309        .text
 310
 311/* Called in normal context to backup CPU 0 state. This
 312 * does not include cache settings. This function is also
 313 * called for machine sleep. This does not include the MMU
 314 * setup, BATs, etc... but rather the "special" registers
 315 * like HID0, HID1, MSSCR0, etc...
 316 */
 317_GLOBAL(__save_cpu_setup)
 318        /* Some CR fields are volatile, we back it up all */
 319        mfcr    r7
 320
 321        /* Get storage ptr */
 322        lis     r5,cpu_state_storage@h
 323        ori     r5,r5,cpu_state_storage@l
 324
 325        /* Save HID0 (common to all CONFIG_PPC_BOOK3S_32 cpus) */
 326        mfspr   r3,SPRN_HID0
 327        stw     r3,CS_HID0(r5)
 328
 329        /* Now deal with CPU type dependent registers */
 330        mfspr   r3,SPRN_PVR
 331        srwi    r3,r3,16
 332        cmplwi  cr0,r3,0x8000   /* 7450 */
 333        cmplwi  cr1,r3,0x000c   /* 7400 */
 334        cmplwi  cr2,r3,0x800c   /* 7410 */
 335        cmplwi  cr3,r3,0x8001   /* 7455 */
 336        cmplwi  cr4,r3,0x8002   /* 7457 */
 337        cmplwi  cr5,r3,0x8003   /* 7447A */
 338        cmplwi  cr6,r3,0x7000   /* 750FX */
 339        cmplwi  cr7,r3,0x8004   /* 7448 */
 340        /* cr1 is 7400 || 7410 */
 341        cror    4*cr1+eq,4*cr1+eq,4*cr2+eq
 342        /* cr0 is 74xx */
 343        cror    4*cr0+eq,4*cr0+eq,4*cr3+eq
 344        cror    4*cr0+eq,4*cr0+eq,4*cr4+eq
 345        cror    4*cr0+eq,4*cr0+eq,4*cr1+eq
 346        cror    4*cr0+eq,4*cr0+eq,4*cr5+eq
 347        cror    4*cr0+eq,4*cr0+eq,4*cr7+eq
 348        bne     1f
 349        /* Backup 74xx specific regs */
 350        mfspr   r4,SPRN_MSSCR0
 351        stw     r4,CS_MSSCR0(r5)
 352        mfspr   r4,SPRN_MSSSR0
 353        stw     r4,CS_MSSSR0(r5)
 354        beq     cr1,1f
 355        /* Backup 745x specific registers */
 356        mfspr   r4,SPRN_HID1
 357        stw     r4,CS_HID1(r5)
 358        mfspr   r4,SPRN_ICTRL
 359        stw     r4,CS_ICTRL(r5)
 360        mfspr   r4,SPRN_LDSTCR
 361        stw     r4,CS_LDSTCR(r5)
 362        mfspr   r4,SPRN_LDSTDB
 363        stw     r4,CS_LDSTDB(r5)
 3641:
 365        bne     cr6,1f
 366        /* Backup 750FX specific registers */
 367        mfspr   r4,SPRN_HID1
 368        stw     r4,CS_HID1(r5)
 369        /* If rev 2.x, backup HID2 */
 370        mfspr   r3,SPRN_PVR
 371        andi.   r3,r3,0xff00
 372        cmpwi   cr0,r3,0x0200
 373        bne     1f
 374        mfspr   r4,SPRN_HID2
 375        stw     r4,CS_HID2(r5)
 3761:
 377        mtcr    r7
 378        blr
 379
 380/* Called with no MMU context (typically MSR:IR/DR off) to
 381 * restore CPU state as backed up by the previous
 382 * function. This does not include cache setting
 383 */
 384_GLOBAL(__restore_cpu_setup)
 385        /* Some CR fields are volatile, we back it up all */
 386        mfcr    r7
 387
 388        /* Get storage ptr */
 389        lis     r5,(cpu_state_storage-KERNELBASE)@h
 390        ori     r5,r5,cpu_state_storage@l
 391
 392        /* Restore HID0 */
 393        lwz     r3,CS_HID0(r5)
 394        sync
 395        isync
 396        mtspr   SPRN_HID0,r3
 397        sync
 398        isync
 399
 400        /* Now deal with CPU type dependent registers */
 401        mfspr   r3,SPRN_PVR
 402        srwi    r3,r3,16
 403        cmplwi  cr0,r3,0x8000   /* 7450 */
 404        cmplwi  cr1,r3,0x000c   /* 7400 */
 405        cmplwi  cr2,r3,0x800c   /* 7410 */
 406        cmplwi  cr3,r3,0x8001   /* 7455 */
 407        cmplwi  cr4,r3,0x8002   /* 7457 */
 408        cmplwi  cr5,r3,0x8003   /* 7447A */
 409        cmplwi  cr6,r3,0x7000   /* 750FX */
 410        cmplwi  cr7,r3,0x8004   /* 7448 */
 411        /* cr1 is 7400 || 7410 */
 412        cror    4*cr1+eq,4*cr1+eq,4*cr2+eq
 413        /* cr0 is 74xx */
 414        cror    4*cr0+eq,4*cr0+eq,4*cr3+eq
 415        cror    4*cr0+eq,4*cr0+eq,4*cr4+eq
 416        cror    4*cr0+eq,4*cr0+eq,4*cr1+eq
 417        cror    4*cr0+eq,4*cr0+eq,4*cr5+eq
 418        cror    4*cr0+eq,4*cr0+eq,4*cr7+eq
 419        bne     2f
 420        /* Restore 74xx specific regs */
 421        lwz     r4,CS_MSSCR0(r5)
 422        sync
 423        mtspr   SPRN_MSSCR0,r4
 424        sync
 425        isync
 426        lwz     r4,CS_MSSSR0(r5)
 427        sync
 428        mtspr   SPRN_MSSSR0,r4
 429        sync
 430        isync
 431        bne     cr2,1f
 432        /* Clear 7410 L2CR2 */
 433        li      r4,0
 434        mtspr   SPRN_L2CR2,r4
 4351:      beq     cr1,2f
 436        /* Restore 745x specific registers */
 437        lwz     r4,CS_HID1(r5)
 438        sync
 439        mtspr   SPRN_HID1,r4
 440        isync
 441        sync
 442        lwz     r4,CS_ICTRL(r5)
 443        sync
 444        mtspr   SPRN_ICTRL,r4
 445        isync
 446        sync
 447        lwz     r4,CS_LDSTCR(r5)
 448        sync
 449        mtspr   SPRN_LDSTCR,r4
 450        isync
 451        sync
 452        lwz     r4,CS_LDSTDB(r5)
 453        sync
 454        mtspr   SPRN_LDSTDB,r4
 455        isync
 456        sync
 4572:      bne     cr6,1f
 458        /* Restore 750FX specific registers
 459         * that is restore HID2 on rev 2.x and PLL config & switch
 460         * to PLL 0 on all
 461         */
 462        /* If rev 2.x, restore HID2 with low voltage bit cleared */
 463        mfspr   r3,SPRN_PVR
 464        andi.   r3,r3,0xff00
 465        cmpwi   cr0,r3,0x0200
 466        bne     4f
 467        lwz     r4,CS_HID2(r5)
 468        rlwinm  r4,r4,0,19,17
 469        mtspr   SPRN_HID2,r4
 470        sync
 4714:
 472        lwz     r4,CS_HID1(r5)
 473        rlwinm  r5,r4,0,16,14
 474        mtspr   SPRN_HID1,r5
 475                /* Wait for PLL to stabilize */
 476        mftbl   r5
 4773:      mftbl   r6
 478        sub     r6,r6,r5
 479        cmplwi  cr0,r6,10000
 480        ble     3b
 481        /* Setup final PLL */
 482        mtspr   SPRN_HID1,r4
 4831:
 484        mtcr    r7
 485        blr
 486
 487