linux/arch/sh/kernel/cpu/init.c
<<
>>
Prefs
   1/*
   2 * arch/sh/kernel/cpu/init.c
   3 *
   4 * CPU init code
   5 *
   6 * Copyright (C) 2002 - 2009  Paul Mundt
   7 * Copyright (C) 2003  Richard Curnow
   8 *
   9 * This file is subject to the terms and conditions of the GNU General Public
  10 * License.  See the file "COPYING" in the main directory of this archive
  11 * for more details.
  12 */
  13#include <linux/init.h>
  14#include <linux/kernel.h>
  15#include <linux/mm.h>
  16#include <linux/log2.h>
  17#include <asm/mmu_context.h>
  18#include <asm/processor.h>
  19#include <asm/uaccess.h>
  20#include <asm/page.h>
  21#include <asm/system.h>
  22#include <asm/cacheflush.h>
  23#include <asm/cache.h>
  24#include <asm/elf.h>
  25#include <asm/io.h>
  26#include <asm/smp.h>
  27#include <asm/sh_bios.h>
  28
  29#ifdef CONFIG_SH_FPU
  30#define cpu_has_fpu     1
  31#else
  32#define cpu_has_fpu     0
  33#endif
  34
  35#ifdef CONFIG_SH_DSP
  36#define cpu_has_dsp     1
  37#else
  38#define cpu_has_dsp     0
  39#endif
  40
  41/*
  42 * Generic wrapper for command line arguments to disable on-chip
  43 * peripherals (nofpu, nodsp, and so forth).
  44 */
  45#define onchip_setup(x)                                 \
  46static int x##_disabled __cpuinitdata = !cpu_has_##x;   \
  47                                                        \
  48static int __cpuinit x##_setup(char *opts)                      \
  49{                                                       \
  50        x##_disabled = 1;                               \
  51        return 1;                                       \
  52}                                                       \
  53__setup("no" __stringify(x), x##_setup);
  54
  55onchip_setup(fpu);
  56onchip_setup(dsp);
  57
  58#ifdef CONFIG_SPECULATIVE_EXECUTION
  59#define CPUOPM          0xff2f0000
  60#define CPUOPM_RABD     (1 << 5)
  61
  62static void __cpuinit speculative_execution_init(void)
  63{
  64        /* Clear RABD */
  65        __raw_writel(__raw_readl(CPUOPM) & ~CPUOPM_RABD, CPUOPM);
  66
  67        /* Flush the update */
  68        (void)__raw_readl(CPUOPM);
  69        ctrl_barrier();
  70}
  71#else
  72#define speculative_execution_init()    do { } while (0)
  73#endif
  74
  75#ifdef CONFIG_CPU_SH4A
  76#define EXPMASK                 0xff2f0004
  77#define EXPMASK_RTEDS           (1 << 0)
  78#define EXPMASK_BRDSSLP         (1 << 1)
  79#define EXPMASK_MMCAW           (1 << 4)
  80
  81static void __cpuinit expmask_init(void)
  82{
  83        unsigned long expmask = __raw_readl(EXPMASK);
  84
  85        /*
  86         * Future proofing.
  87         *
  88         * Disable support for slottable sleep instruction, non-nop
  89         * instructions in the rte delay slot, and associative writes to
  90         * the memory-mapped cache array.
  91         */
  92        expmask &= ~(EXPMASK_RTEDS | EXPMASK_BRDSSLP | EXPMASK_MMCAW);
  93
  94        __raw_writel(expmask, EXPMASK);
  95        ctrl_barrier();
  96}
  97#else
  98#define expmask_init()  do { } while (0)
  99#endif
 100
 101/* 2nd-level cache init */
 102void __attribute__ ((weak)) l2_cache_init(void)
 103{
 104}
 105
 106/*
 107 * Generic first-level cache init
 108 */
 109#ifdef CONFIG_SUPERH32
 110static void cache_init(void)
 111{
 112        unsigned long ccr, flags;
 113
 114        jump_to_uncached();
 115        ccr = __raw_readl(CCR);
 116
 117        /*
 118         * At this point we don't know whether the cache is enabled or not - a
 119         * bootloader may have enabled it.  There are at least 2 things that
 120         * could be dirty in the cache at this point:
 121         * 1. kernel command line set up by boot loader
 122         * 2. spilled registers from the prolog of this function
 123         * => before re-initialising the cache, we must do a purge of the whole
 124         * cache out to memory for safety.  As long as nothing is spilled
 125         * during the loop to lines that have already been done, this is safe.
 126         * - RPC
 127         */
 128        if (ccr & CCR_CACHE_ENABLE) {
 129                unsigned long ways, waysize, addrstart;
 130
 131                waysize = current_cpu_data.dcache.sets;
 132
 133#ifdef CCR_CACHE_ORA
 134                /*
 135                 * If the OC is already in RAM mode, we only have
 136                 * half of the entries to flush..
 137                 */
 138                if (ccr & CCR_CACHE_ORA)
 139                        waysize >>= 1;
 140#endif
 141
 142                waysize <<= current_cpu_data.dcache.entry_shift;
 143
 144#ifdef CCR_CACHE_EMODE
 145                /* If EMODE is not set, we only have 1 way to flush. */
 146                if (!(ccr & CCR_CACHE_EMODE))
 147                        ways = 1;
 148                else
 149#endif
 150                        ways = current_cpu_data.dcache.ways;
 151
 152                addrstart = CACHE_OC_ADDRESS_ARRAY;
 153                do {
 154                        unsigned long addr;
 155
 156                        for (addr = addrstart;
 157                             addr < addrstart + waysize;
 158                             addr += current_cpu_data.dcache.linesz)
 159                                __raw_writel(0, addr);
 160
 161                        addrstart += current_cpu_data.dcache.way_incr;
 162                } while (--ways);
 163        }
 164
 165        /*
 166         * Default CCR values .. enable the caches
 167         * and invalidate them immediately..
 168         */
 169        flags = CCR_CACHE_ENABLE | CCR_CACHE_INVALIDATE;
 170
 171#ifdef CCR_CACHE_EMODE
 172        /* Force EMODE if possible */
 173        if (current_cpu_data.dcache.ways > 1)
 174                flags |= CCR_CACHE_EMODE;
 175        else
 176                flags &= ~CCR_CACHE_EMODE;
 177#endif
 178
 179#if defined(CONFIG_CACHE_WRITETHROUGH)
 180        /* Write-through */
 181        flags |= CCR_CACHE_WT;
 182#elif defined(CONFIG_CACHE_WRITEBACK)
 183        /* Write-back */
 184        flags |= CCR_CACHE_CB;
 185#else
 186        /* Off */
 187        flags &= ~CCR_CACHE_ENABLE;
 188#endif
 189
 190        l2_cache_init();
 191
 192        __raw_writel(flags, CCR);
 193        back_to_cached();
 194}
 195#else
 196#define cache_init()    do { } while (0)
 197#endif
 198
 199#define CSHAPE(totalsize, linesize, assoc) \
 200        ((totalsize & ~0xff) | (linesize << 4) | assoc)
 201
 202#define CACHE_DESC_SHAPE(desc)  \
 203        CSHAPE((desc).way_size * (desc).ways, ilog2((desc).linesz), (desc).ways)
 204
 205static void detect_cache_shape(void)
 206{
 207        l1d_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.dcache);
 208
 209        if (current_cpu_data.dcache.flags & SH_CACHE_COMBINED)
 210                l1i_cache_shape = l1d_cache_shape;
 211        else
 212                l1i_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.icache);
 213
 214        if (current_cpu_data.flags & CPU_HAS_L2_CACHE)
 215                l2_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.scache);
 216        else
 217                l2_cache_shape = -1; /* No S-cache */
 218}
 219
 220static void __cpuinit fpu_init(void)
 221{
 222        /* Disable the FPU */
 223        if (fpu_disabled && (current_cpu_data.flags & CPU_HAS_FPU)) {
 224                printk("FPU Disabled\n");
 225                current_cpu_data.flags &= ~CPU_HAS_FPU;
 226        }
 227
 228        disable_fpu();
 229        clear_used_math();
 230}
 231
 232#ifdef CONFIG_SH_DSP
 233static void __cpuinit release_dsp(void)
 234{
 235        unsigned long sr;
 236
 237        /* Clear SR.DSP bit */
 238        __asm__ __volatile__ (
 239                "stc\tsr, %0\n\t"
 240                "and\t%1, %0\n\t"
 241                "ldc\t%0, sr\n\t"
 242                : "=&r" (sr)
 243                : "r" (~SR_DSP)
 244        );
 245}
 246
 247static void __cpuinit dsp_init(void)
 248{
 249        unsigned long sr;
 250
 251        /*
 252         * Set the SR.DSP bit, wait for one instruction, and then read
 253         * back the SR value.
 254         */
 255        __asm__ __volatile__ (
 256                "stc\tsr, %0\n\t"
 257                "or\t%1, %0\n\t"
 258                "ldc\t%0, sr\n\t"
 259                "nop\n\t"
 260                "stc\tsr, %0\n\t"
 261                : "=&r" (sr)
 262                : "r" (SR_DSP)
 263        );
 264
 265        /* If the DSP bit is still set, this CPU has a DSP */
 266        if (sr & SR_DSP)
 267                current_cpu_data.flags |= CPU_HAS_DSP;
 268
 269        /* Disable the DSP */
 270        if (dsp_disabled && (current_cpu_data.flags & CPU_HAS_DSP)) {
 271                printk("DSP Disabled\n");
 272                current_cpu_data.flags &= ~CPU_HAS_DSP;
 273        }
 274
 275        /* Now that we've determined the DSP status, clear the DSP bit. */
 276        release_dsp();
 277}
 278#else
 279static inline void __cpuinit dsp_init(void) { }
 280#endif /* CONFIG_SH_DSP */
 281
 282/**
 283 * cpu_init
 284 *
 285 * This is our initial entry point for each CPU, and is invoked on the
 286 * boot CPU prior to calling start_kernel(). For SMP, a combination of
 287 * this and start_secondary() will bring up each processor to a ready
 288 * state prior to hand forking the idle loop.
 289 *
 290 * We do all of the basic processor init here, including setting up
 291 * the caches, FPU, DSP, etc. By the time start_kernel() is hit (and
 292 * subsequently platform_setup()) things like determining the CPU
 293 * subtype and initial configuration will all be done.
 294 *
 295 * Each processor family is still responsible for doing its own probing
 296 * and cache configuration in cpu_probe().
 297 */
 298asmlinkage void __cpuinit cpu_init(void)
 299{
 300        current_thread_info()->cpu = hard_smp_processor_id();
 301
 302        /* First, probe the CPU */
 303        cpu_probe();
 304
 305        if (current_cpu_data.type == CPU_SH_NONE)
 306                panic("Unknown CPU");
 307
 308        /* First setup the rest of the I-cache info */
 309        current_cpu_data.icache.entry_mask = current_cpu_data.icache.way_incr -
 310                                      current_cpu_data.icache.linesz;
 311
 312        current_cpu_data.icache.way_size = current_cpu_data.icache.sets *
 313                                    current_cpu_data.icache.linesz;
 314
 315        /* And the D-cache too */
 316        current_cpu_data.dcache.entry_mask = current_cpu_data.dcache.way_incr -
 317                                      current_cpu_data.dcache.linesz;
 318
 319        current_cpu_data.dcache.way_size = current_cpu_data.dcache.sets *
 320                                    current_cpu_data.dcache.linesz;
 321
 322        /* Init the cache */
 323        cache_init();
 324
 325        if (raw_smp_processor_id() == 0) {
 326                shm_align_mask = max_t(unsigned long,
 327                                       current_cpu_data.dcache.way_size - 1,
 328                                       PAGE_SIZE - 1);
 329
 330                /* Boot CPU sets the cache shape */
 331                detect_cache_shape();
 332        }
 333
 334        fpu_init();
 335        dsp_init();
 336
 337        /*
 338         * Initialize the per-CPU ASID cache very early, since the
 339         * TLB flushing routines depend on this being setup.
 340         */
 341        current_cpu_data.asid_cache = NO_CONTEXT;
 342
 343        current_cpu_data.phys_bits = __in_29bit_mode() ? 29 : 32;
 344
 345        speculative_execution_init();
 346        expmask_init();
 347
 348        /* Do the rest of the boot processor setup */
 349        if (raw_smp_processor_id() == 0) {
 350                /* Save off the BIOS VBR, if there is one */
 351                sh_bios_vbr_init();
 352
 353                /*
 354                 * Setup VBR for boot CPU. Secondary CPUs do this through
 355                 * start_secondary().
 356                 */
 357                per_cpu_trap_init();
 358
 359                /*
 360                 * Boot processor to setup the FP and extended state
 361                 * context info.
 362                 */
 363                init_thread_xstate();
 364        }
 365}
 366