linux/arch/powerpc/kernel/cpu_setup_fsl_booke.S
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/*
   3 * This file contains low level CPU setup functions.
   4 * Kumar Gala <galak@kernel.crashing.org>
   5 * Copyright 2009 Freescale Semiconductor, Inc.
   6 *
   7 * Based on cpu_setup_6xx code by
   8 * Benjamin Herrenschmidt <benh@kernel.crashing.org>
   9 */
  10
  11#include <asm/page.h>
  12#include <asm/processor.h>
  13#include <asm/cputable.h>
  14#include <asm/ppc_asm.h>
  15#include <asm/nohash/mmu-book3e.h>
  16#include <asm/asm-offsets.h>
  17#include <asm/mpc85xx.h>
  18
  19_GLOBAL(__e500_icache_setup)
  20        mfspr   r0, SPRN_L1CSR1
  21        andi.   r3, r0, L1CSR1_ICE
  22        bnelr                           /* Already enabled */
  23        oris    r0, r0, L1CSR1_CPE@h
  24        ori     r0, r0, (L1CSR1_ICFI | L1CSR1_ICLFR |  L1CSR1_ICE)
  25        mtspr   SPRN_L1CSR1, r0         /* Enable I-Cache */
  26        isync
  27        blr
  28
  29_GLOBAL(__e500_dcache_setup)
  30        mfspr   r0, SPRN_L1CSR0
  31        andi.   r3, r0, L1CSR0_DCE
  32        bnelr                           /* Already enabled */
  33        msync
  34        isync
  35        li      r0, 0
  36        mtspr   SPRN_L1CSR0, r0         /* Disable */
  37        msync
  38        isync
  39        li      r0, (L1CSR0_DCFI | L1CSR0_CLFC)
  40        mtspr   SPRN_L1CSR0, r0         /* Invalidate */
  41        isync
  421:      mfspr   r0, SPRN_L1CSR0
  43        andi.   r3, r0, L1CSR0_CLFC
  44        bne+    1b                      /* Wait for lock bits reset */
  45        oris    r0, r0, L1CSR0_CPE@h
  46        ori     r0, r0, L1CSR0_DCE
  47        msync
  48        isync
  49        mtspr   SPRN_L1CSR0, r0         /* Enable */
  50        isync
  51        blr
  52
  53/*
  54 * FIXME - we haven't yet done testing to determine a reasonable default
  55 * value for PW20_WAIT_IDLE_BIT.
  56 */
  57#define PW20_WAIT_IDLE_BIT              50 /* 1ms, TB frequency is 41.66MHZ */
  58_GLOBAL(setup_pw20_idle)
  59        mfspr   r3, SPRN_PWRMGTCR0
  60
  61        /* Set PW20_WAIT bit, enable pw20 state*/
  62        ori     r3, r3, PWRMGTCR0_PW20_WAIT
  63        li      r11, PW20_WAIT_IDLE_BIT
  64
  65        /* Set Automatic PW20 Core Idle Count */
  66        rlwimi  r3, r11, PWRMGTCR0_PW20_ENT_SHIFT, PWRMGTCR0_PW20_ENT
  67
  68        mtspr   SPRN_PWRMGTCR0, r3
  69
  70        blr
  71
  72/*
  73 * FIXME - we haven't yet done testing to determine a reasonable default
  74 * value for AV_WAIT_IDLE_BIT.
  75 */
  76#define AV_WAIT_IDLE_BIT                50 /* 1ms, TB frequency is 41.66MHZ */
  77_GLOBAL(setup_altivec_idle)
  78        mfspr   r3, SPRN_PWRMGTCR0
  79
  80        /* Enable Altivec Idle */
  81        oris    r3, r3, PWRMGTCR0_AV_IDLE_PD_EN@h
  82        li      r11, AV_WAIT_IDLE_BIT
  83
  84        /* Set Automatic AltiVec Idle Count */
  85        rlwimi  r3, r11, PWRMGTCR0_AV_IDLE_CNT_SHIFT, PWRMGTCR0_AV_IDLE_CNT
  86
  87        mtspr   SPRN_PWRMGTCR0, r3
  88
  89        blr
  90
  91#ifdef CONFIG_PPC_E500MC
  92_GLOBAL(__setup_cpu_e6500)
  93        mflr    r6
  94#ifdef CONFIG_PPC64
  95        bl      setup_altivec_ivors
  96        /* Touch IVOR42 only if the CPU supports E.HV category */
  97        mfspr   r10,SPRN_MMUCFG
  98        rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
  99        beq     1f
 100        bl      setup_lrat_ivor
 1011:
 102#endif
 103        bl      setup_pw20_idle
 104        bl      setup_altivec_idle
 105        bl      __setup_cpu_e5500
 106        mtlr    r6
 107        blr
 108#endif /* CONFIG_PPC_E500MC */
 109
 110#ifdef CONFIG_PPC32
 111#ifdef CONFIG_E200
 112_GLOBAL(__setup_cpu_e200)
 113        /* enable dedicated debug exception handling resources (Debug APU) */
 114        mfspr   r3,SPRN_HID0
 115        ori     r3,r3,HID0_DAPUEN@l
 116        mtspr   SPRN_HID0,r3
 117        b       __setup_e200_ivors
 118#endif /* CONFIG_E200 */
 119
 120#ifdef CONFIG_E500
 121#ifndef CONFIG_PPC_E500MC
 122_GLOBAL(__setup_cpu_e500v1)
 123_GLOBAL(__setup_cpu_e500v2)
 124        mflr    r4
 125        bl      __e500_icache_setup
 126        bl      __e500_dcache_setup
 127        bl      __setup_e500_ivors
 128#if defined(CONFIG_FSL_RIO) || defined(CONFIG_FSL_PCI)
 129        /* Ensure that RFXE is set */
 130        mfspr   r3,SPRN_HID1
 131        oris    r3,r3,HID1_RFXE@h
 132        mtspr   SPRN_HID1,r3
 133#endif
 134        mtlr    r4
 135        blr
 136#else /* CONFIG_PPC_E500MC */
 137_GLOBAL(__setup_cpu_e500mc)
 138_GLOBAL(__setup_cpu_e5500)
 139        mflr    r5
 140        bl      __e500_icache_setup
 141        bl      __e500_dcache_setup
 142        bl      __setup_e500mc_ivors
 143        /*
 144         * We only want to touch IVOR38-41 if we're running on hardware
 145         * that supports category E.HV.  The architectural way to determine
 146         * this is MMUCFG[LPIDSIZE].
 147         */
 148        mfspr   r3, SPRN_MMUCFG
 149        rlwinm. r3, r3, 0, MMUCFG_LPIDSIZE
 150        beq     1f
 151        bl      __setup_ehv_ivors
 152        b       2f
 1531:
 154        lwz     r3, CPU_SPEC_FEATURES(r4)
 155        /* We need this check as cpu_setup is also called for
 156         * the secondary cores. So, if we have already cleared
 157         * the feature on the primary core, avoid doing it on the
 158         * secondary core.
 159         */
 160        andi.   r6, r3, CPU_FTR_EMB_HV
 161        beq     2f
 162        rlwinm  r3, r3, 0, ~CPU_FTR_EMB_HV
 163        stw     r3, CPU_SPEC_FEATURES(r4)
 1642:
 165        mtlr    r5
 166        blr
 167#endif /* CONFIG_PPC_E500MC */
 168#endif /* CONFIG_E500 */
 169#endif /* CONFIG_PPC32 */
 170
 171#ifdef CONFIG_PPC_BOOK3E_64
 172_GLOBAL(__restore_cpu_e6500)
 173        mflr    r5
 174        bl      setup_altivec_ivors
 175        /* Touch IVOR42 only if the CPU supports E.HV category */
 176        mfspr   r10,SPRN_MMUCFG
 177        rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
 178        beq     1f
 179        bl      setup_lrat_ivor
 1801:
 181        bl      setup_pw20_idle
 182        bl      setup_altivec_idle
 183        bl      __restore_cpu_e5500
 184        mtlr    r5
 185        blr
 186
 187_GLOBAL(__restore_cpu_e5500)
 188        mflr    r4
 189        bl      __e500_icache_setup
 190        bl      __e500_dcache_setup
 191        bl      __setup_base_ivors
 192        bl      setup_perfmon_ivor
 193        bl      setup_doorbell_ivors
 194        /*
 195         * We only want to touch IVOR38-41 if we're running on hardware
 196         * that supports category E.HV.  The architectural way to determine
 197         * this is MMUCFG[LPIDSIZE].
 198         */
 199        mfspr   r10,SPRN_MMUCFG
 200        rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
 201        beq     1f
 202        bl      setup_ehv_ivors
 2031:
 204        mtlr    r4
 205        blr
 206
 207_GLOBAL(__setup_cpu_e5500)
 208        mflr    r5
 209        bl      __e500_icache_setup
 210        bl      __e500_dcache_setup
 211        bl      __setup_base_ivors
 212        bl      setup_perfmon_ivor
 213        bl      setup_doorbell_ivors
 214        /*
 215         * We only want to touch IVOR38-41 if we're running on hardware
 216         * that supports category E.HV.  The architectural way to determine
 217         * this is MMUCFG[LPIDSIZE].
 218         */
 219        mfspr   r10,SPRN_MMUCFG
 220        rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
 221        beq     1f
 222        bl      setup_ehv_ivors
 223        b       2f
 2241:
 225        ld      r10,CPU_SPEC_FEATURES(r4)
 226        LOAD_REG_IMMEDIATE(r9,CPU_FTR_EMB_HV)
 227        andc    r10,r10,r9
 228        std     r10,CPU_SPEC_FEATURES(r4)
 2292:
 230        mtlr    r5
 231        blr
 232#endif
 233
 234/* flush L1 date cache, it can apply to e500v2, e500mc and e5500 */
 235_GLOBAL(flush_dcache_L1)
 236        mfmsr   r10
 237        wrteei  0
 238
 239        mfspr   r3,SPRN_L1CFG0
 240        rlwinm  r5,r3,9,3       /* Extract cache block size */
 241        twlgti  r5,1            /* Only 32 and 64 byte cache blocks
 242                                 * are currently defined.
 243                                 */
 244        li      r4,32
 245        subfic  r6,r5,2         /* r6 = log2(1KiB / cache block size) -
 246                                 *      log2(number of ways)
 247                                 */
 248        slw     r5,r4,r5        /* r5 = cache block size */
 249
 250        rlwinm  r7,r3,0,0xff    /* Extract number of KiB in the cache */
 251        mulli   r7,r7,13        /* An 8-way cache will require 13
 252                                 * loads per set.
 253                                 */
 254        slw     r7,r7,r6
 255
 256        /* save off HID0 and set DCFA */
 257        mfspr   r8,SPRN_HID0
 258        ori     r9,r8,HID0_DCFA@l
 259        mtspr   SPRN_HID0,r9
 260        isync
 261
 262        LOAD_REG_IMMEDIATE(r6, KERNELBASE)
 263        mr      r4, r6
 264        mtctr   r7
 265
 2661:      lwz     r3,0(r4)        /* Load... */
 267        add     r4,r4,r5
 268        bdnz    1b
 269
 270        msync
 271        mr      r4, r6
 272        mtctr   r7
 273
 2741:      dcbf    0,r4            /* ...and flush. */
 275        add     r4,r4,r5
 276        bdnz    1b
 277
 278        /* restore HID0 */
 279        mtspr   SPRN_HID0,r8
 280        isync
 281
 282        wrtee r10
 283
 284        blr
 285
 286has_L2_cache:
 287        /* skip L2 cache on P2040/P2040E as they have no L2 cache */
 288        mfspr   r3, SPRN_SVR
 289        /* shift right by 8 bits and clear E bit of SVR */
 290        rlwinm  r4, r3, 24, ~0x800
 291
 292        lis     r3, SVR_P2040@h
 293        ori     r3, r3, SVR_P2040@l
 294        cmpw    r4, r3
 295        beq     1f
 296
 297        li      r3, 1
 298        blr
 2991:
 300        li      r3, 0
 301        blr
 302
 303/* flush backside L2 cache */
 304flush_backside_L2_cache:
 305        mflr    r10
 306        bl      has_L2_cache
 307        mtlr    r10
 308        cmpwi   r3, 0
 309        beq     2f
 310
 311        /* Flush the L2 cache */
 312        mfspr   r3, SPRN_L2CSR0
 313        ori     r3, r3, L2CSR0_L2FL@l
 314        msync
 315        isync
 316        mtspr   SPRN_L2CSR0,r3
 317        isync
 318
 319        /* check if it is complete */
 3201:      mfspr   r3,SPRN_L2CSR0
 321        andi.   r3, r3, L2CSR0_L2FL@l
 322        bne     1b
 3232:
 324        blr
 325
 326_GLOBAL(cpu_down_flush_e500v2)
 327        mflr r0
 328        bl      flush_dcache_L1
 329        mtlr r0
 330        blr
 331
 332_GLOBAL(cpu_down_flush_e500mc)
 333_GLOBAL(cpu_down_flush_e5500)
 334        mflr r0
 335        bl      flush_dcache_L1
 336        bl      flush_backside_L2_cache
 337        mtlr r0
 338        blr
 339
 340/* L1 Data Cache of e6500 contains no modified data, no flush is required */
 341_GLOBAL(cpu_down_flush_e6500)
 342        blr
 343