1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/mm.h>
16#include <linux/log2.h>
17#include <asm/mmu_context.h>
18#include <asm/processor.h>
19#include <linux/uaccess.h>
20#include <asm/page.h>
21#include <asm/cacheflush.h>
22#include <asm/cache.h>
23#include <asm/elf.h>
24#include <asm/io.h>
25#include <asm/smp.h>
26#include <asm/sh_bios.h>
27#include <asm/setup.h>
28
29#ifdef CONFIG_SH_FPU
30#define cpu_has_fpu 1
31#else
32#define cpu_has_fpu 0
33#endif
34
35#ifdef CONFIG_SH_DSP
36#define cpu_has_dsp 1
37#else
38#define cpu_has_dsp 0
39#endif
40
41
42
43
44
45#define onchip_setup(x) \
46static int x##_disabled = !cpu_has_##x; \
47 \
48static int x##_setup(char *opts) \
49{ \
50 x##_disabled = 1; \
51 return 1; \
52} \
53__setup("no" __stringify(x), x##_setup);
54
55onchip_setup(fpu);
56onchip_setup(dsp);
57
58#ifdef CONFIG_SPECULATIVE_EXECUTION
59#define CPUOPM 0xff2f0000
60#define CPUOPM_RABD (1 << 5)
61
62static void speculative_execution_init(void)
63{
64
65 __raw_writel(__raw_readl(CPUOPM) & ~CPUOPM_RABD, CPUOPM);
66
67
68 (void)__raw_readl(CPUOPM);
69 ctrl_barrier();
70}
71#else
72#define speculative_execution_init() do { } while (0)
73#endif
74
75#ifdef CONFIG_CPU_SH4A
76#define EXPMASK 0xff2f0004
77#define EXPMASK_RTEDS (1 << 0)
78#define EXPMASK_BRDSSLP (1 << 1)
79#define EXPMASK_MMCAW (1 << 4)
80
81static void expmask_init(void)
82{
83 unsigned long expmask = __raw_readl(EXPMASK);
84
85
86
87
88
89
90
91
92 expmask &= ~(EXPMASK_RTEDS | EXPMASK_BRDSSLP | EXPMASK_MMCAW);
93
94 __raw_writel(expmask, EXPMASK);
95 ctrl_barrier();
96}
97#else
98#define expmask_init() do { } while (0)
99#endif
100
101
102void __attribute__ ((weak)) l2_cache_init(void)
103{
104}
105
106
107
108
109#if defined(CONFIG_SUPERH32) && !defined(CONFIG_CPU_J2)
110static void cache_init(void)
111{
112 unsigned long ccr, flags;
113
114 jump_to_uncached();
115 ccr = __raw_readl(SH_CCR);
116
117
118
119
120
121
122
123
124
125
126
127
128 if (ccr & CCR_CACHE_ENABLE) {
129 unsigned long ways, waysize, addrstart;
130
131 waysize = current_cpu_data.dcache.sets;
132
133#ifdef CCR_CACHE_ORA
134
135
136
137
138 if (ccr & CCR_CACHE_ORA)
139 waysize >>= 1;
140#endif
141
142 waysize <<= current_cpu_data.dcache.entry_shift;
143
144#ifdef CCR_CACHE_EMODE
145
146 if (!(ccr & CCR_CACHE_EMODE))
147 ways = 1;
148 else
149#endif
150 ways = current_cpu_data.dcache.ways;
151
152 addrstart = CACHE_OC_ADDRESS_ARRAY;
153 do {
154 unsigned long addr;
155
156 for (addr = addrstart;
157 addr < addrstart + waysize;
158 addr += current_cpu_data.dcache.linesz)
159 __raw_writel(0, addr);
160
161 addrstart += current_cpu_data.dcache.way_incr;
162 } while (--ways);
163 }
164
165
166
167
168
169 flags = CCR_CACHE_ENABLE | CCR_CACHE_INVALIDATE;
170
171#ifdef CCR_CACHE_EMODE
172
173 if (current_cpu_data.dcache.ways > 1)
174 flags |= CCR_CACHE_EMODE;
175 else
176 flags &= ~CCR_CACHE_EMODE;
177#endif
178
179#if defined(CONFIG_CACHE_WRITETHROUGH)
180
181 flags |= CCR_CACHE_WT;
182#elif defined(CONFIG_CACHE_WRITEBACK)
183
184 flags |= CCR_CACHE_CB;
185#else
186
187 flags &= ~CCR_CACHE_ENABLE;
188#endif
189
190 l2_cache_init();
191
192 __raw_writel(flags, SH_CCR);
193 back_to_cached();
194}
195#else
196#define cache_init() do { } while (0)
197#endif
198
199#define CSHAPE(totalsize, linesize, assoc) \
200 ((totalsize & ~0xff) | (linesize << 4) | assoc)
201
202#define CACHE_DESC_SHAPE(desc) \
203 CSHAPE((desc).way_size * (desc).ways, ilog2((desc).linesz), (desc).ways)
204
205static void detect_cache_shape(void)
206{
207 l1d_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.dcache);
208
209 if (current_cpu_data.dcache.flags & SH_CACHE_COMBINED)
210 l1i_cache_shape = l1d_cache_shape;
211 else
212 l1i_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.icache);
213
214 if (current_cpu_data.flags & CPU_HAS_L2_CACHE)
215 l2_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.scache);
216 else
217 l2_cache_shape = -1;
218}
219
220static void fpu_init(void)
221{
222
223 if (fpu_disabled && (current_cpu_data.flags & CPU_HAS_FPU)) {
224 printk("FPU Disabled\n");
225 current_cpu_data.flags &= ~CPU_HAS_FPU;
226 }
227
228 disable_fpu();
229 clear_used_math();
230}
231
232#ifdef CONFIG_SH_DSP
233static void release_dsp(void)
234{
235 unsigned long sr;
236
237
238 __asm__ __volatile__ (
239 "stc\tsr, %0\n\t"
240 "and\t%1, %0\n\t"
241 "ldc\t%0, sr\n\t"
242 : "=&r" (sr)
243 : "r" (~SR_DSP)
244 );
245}
246
247static void dsp_init(void)
248{
249 unsigned long sr;
250
251
252
253
254
255 __asm__ __volatile__ (
256 "stc\tsr, %0\n\t"
257 "or\t%1, %0\n\t"
258 "ldc\t%0, sr\n\t"
259 "nop\n\t"
260 "stc\tsr, %0\n\t"
261 : "=&r" (sr)
262 : "r" (SR_DSP)
263 );
264
265
266 if (sr & SR_DSP)
267 current_cpu_data.flags |= CPU_HAS_DSP;
268
269
270 if (dsp_disabled && (current_cpu_data.flags & CPU_HAS_DSP)) {
271 printk("DSP Disabled\n");
272 current_cpu_data.flags &= ~CPU_HAS_DSP;
273 }
274
275
276 release_dsp();
277}
278#else
279static inline void dsp_init(void) { }
280#endif
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298asmlinkage void cpu_init(void)
299{
300 current_thread_info()->cpu = hard_smp_processor_id();
301
302
303 cpu_probe();
304
305 if (current_cpu_data.type == CPU_SH_NONE)
306 panic("Unknown CPU");
307
308
309 current_cpu_data.icache.entry_mask = current_cpu_data.icache.way_incr -
310 current_cpu_data.icache.linesz;
311
312 current_cpu_data.icache.way_size = current_cpu_data.icache.sets *
313 current_cpu_data.icache.linesz;
314
315
316 current_cpu_data.dcache.entry_mask = current_cpu_data.dcache.way_incr -
317 current_cpu_data.dcache.linesz;
318
319 current_cpu_data.dcache.way_size = current_cpu_data.dcache.sets *
320 current_cpu_data.dcache.linesz;
321
322
323 cache_init();
324
325 if (raw_smp_processor_id() == 0) {
326#ifdef CONFIG_MMU
327 shm_align_mask = max_t(unsigned long,
328 current_cpu_data.dcache.way_size - 1,
329 PAGE_SIZE - 1);
330#else
331 shm_align_mask = PAGE_SIZE - 1;
332#endif
333
334
335 detect_cache_shape();
336 }
337
338 fpu_init();
339 dsp_init();
340
341
342
343
344
345 current_cpu_data.asid_cache = NO_CONTEXT;
346
347 current_cpu_data.phys_bits = __in_29bit_mode() ? 29 : 32;
348
349 speculative_execution_init();
350 expmask_init();
351
352
353 if (raw_smp_processor_id() == 0) {
354
355 sh_bios_vbr_init();
356
357
358
359
360
361 per_cpu_trap_init();
362
363
364
365
366
367 init_thread_xstate();
368 }
369}
370