1
2
3
4
5
6
7
8
9
10
11
12
13#include <asm/cacheflush.h>
14#include <linux/cache.h>
15#include <asm/cpuinfo.h>
16#include <asm/pvr.h>
17
18static inline void __enable_icache_msr(void)
19{
20 __asm__ __volatile__ (" msrset r0, %0;" \
21 "nop;" \
22 : : "i" (MSR_ICE) : "memory");
23}
24
25static inline void __disable_icache_msr(void)
26{
27 __asm__ __volatile__ (" msrclr r0, %0;" \
28 "nop;" \
29 : : "i" (MSR_ICE) : "memory");
30}
31
32static inline void __enable_dcache_msr(void)
33{
34 __asm__ __volatile__ (" msrset r0, %0;" \
35 "nop;" \
36 : : "i" (MSR_DCE) : "memory");
37}
38
39static inline void __disable_dcache_msr(void)
40{
41 __asm__ __volatile__ (" msrclr r0, %0;" \
42 "nop; " \
43 : : "i" (MSR_DCE) : "memory");
44}
45
46static inline void __enable_icache_nomsr(void)
47{
48 __asm__ __volatile__ (" mfs r12, rmsr;" \
49 "nop;" \
50 "ori r12, r12, %0;" \
51 "mts rmsr, r12;" \
52 "nop;" \
53 : : "i" (MSR_ICE) : "memory", "r12");
54}
55
56static inline void __disable_icache_nomsr(void)
57{
58 __asm__ __volatile__ (" mfs r12, rmsr;" \
59 "nop;" \
60 "andi r12, r12, ~%0;" \
61 "mts rmsr, r12;" \
62 "nop;" \
63 : : "i" (MSR_ICE) : "memory", "r12");
64}
65
66static inline void __enable_dcache_nomsr(void)
67{
68 __asm__ __volatile__ (" mfs r12, rmsr;" \
69 "nop;" \
70 "ori r12, r12, %0;" \
71 "mts rmsr, r12;" \
72 "nop;" \
73 : : "i" (MSR_DCE) : "memory", "r12");
74}
75
76static inline void __disable_dcache_nomsr(void)
77{
78 __asm__ __volatile__ (" mfs r12, rmsr;" \
79 "nop;" \
80 "andi r12, r12, ~%0;" \
81 "mts rmsr, r12;" \
82 "nop;" \
83 : : "i" (MSR_DCE) : "memory", "r12");
84}
85
86
87
88
89
90
91
92#define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \
93do { \
94 int align = ~(cache_line_length - 1); \
95 end = min(start + cache_size, end); \
96 start &= align; \
97} while (0)
98
99
100
101
102
103#define CACHE_ALL_LOOP(cache_size, line_length, op) \
104do { \
105 unsigned int len = cache_size - line_length; \
106 int step = -line_length; \
107 WARN_ON(step >= 0); \
108 \
109 __asm__ __volatile__ (" 1: " #op " %0, r0;" \
110 "bgtid %0, 1b;" \
111 "addk %0, %0, %1;" \
112 : : "r" (len), "r" (step) \
113 : "memory"); \
114} while (0)
115
116
117
118
119
120
121
122
123
124#define CACHE_RANGE_LOOP_2(start, end, line_length, op) \
125do { \
126 int step = -line_length; \
127 int align = ~(line_length - 1); \
128 int count; \
129 end = ((end & align) == end) ? end - line_length : end & align; \
130 count = end - start; \
131 WARN_ON(count < 0); \
132 \
133 __asm__ __volatile__ (" 1: " #op " %0, %1;" \
134 "bgtid %1, 1b;" \
135 "addk %1, %1, %2;" \
136 : : "r" (start), "r" (count), \
137 "r" (step) : "memory"); \
138} while (0)
139
140
141#define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
142do { \
143 unsigned int volatile temp = 0; \
144 unsigned int align = ~(line_length - 1); \
145 end = ((end & align) == end) ? end - line_length : end & align; \
146 WARN_ON(end < start); \
147 \
148 __asm__ __volatile__ (" 1: " #op " %1, r0;" \
149 "cmpu %0, %1, %2;" \
150 "bgtid %0, 1b;" \
151 "addk %1, %1, %3;" \
152 : : "r" (temp), "r" (start), "r" (end), \
153 "r" (line_length) : "memory"); \
154} while (0)
155
156#define ASM_LOOP
157
158static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
159{
160 unsigned long flags;
161#ifndef ASM_LOOP
162 int i;
163#endif
164 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
165 (unsigned int)start, (unsigned int) end);
166
167 CACHE_LOOP_LIMITS(start, end,
168 cpuinfo.icache_line_length, cpuinfo.icache_size);
169
170 local_irq_save(flags);
171 __disable_icache_msr();
172
173#ifdef ASM_LOOP
174 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
175#else
176 for (i = start; i < end; i += cpuinfo.icache_line_length)
177 __asm__ __volatile__ ("wic %0, r0;" \
178 : : "r" (i));
179#endif
180 __enable_icache_msr();
181 local_irq_restore(flags);
182}
183
184static void __flush_icache_range_nomsr_irq(unsigned long start,
185 unsigned long end)
186{
187 unsigned long flags;
188#ifndef ASM_LOOP
189 int i;
190#endif
191 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
192 (unsigned int)start, (unsigned int) end);
193
194 CACHE_LOOP_LIMITS(start, end,
195 cpuinfo.icache_line_length, cpuinfo.icache_size);
196
197 local_irq_save(flags);
198 __disable_icache_nomsr();
199
200#ifdef ASM_LOOP
201 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
202#else
203 for (i = start; i < end; i += cpuinfo.icache_line_length)
204 __asm__ __volatile__ ("wic %0, r0;" \
205 : : "r" (i));
206#endif
207
208 __enable_icache_nomsr();
209 local_irq_restore(flags);
210}
211
212static void __flush_icache_range_noirq(unsigned long start,
213 unsigned long end)
214{
215#ifndef ASM_LOOP
216 int i;
217#endif
218 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
219 (unsigned int)start, (unsigned int) end);
220
221 CACHE_LOOP_LIMITS(start, end,
222 cpuinfo.icache_line_length, cpuinfo.icache_size);
223#ifdef ASM_LOOP
224 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
225#else
226 for (i = start; i < end; i += cpuinfo.icache_line_length)
227 __asm__ __volatile__ ("wic %0, r0;" \
228 : : "r" (i));
229#endif
230}
231
232static void __flush_icache_all_msr_irq(void)
233{
234 unsigned long flags;
235#ifndef ASM_LOOP
236 int i;
237#endif
238 pr_debug("%s\n", __func__);
239
240 local_irq_save(flags);
241 __disable_icache_msr();
242#ifdef ASM_LOOP
243 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
244#else
245 for (i = 0; i < cpuinfo.icache_size;
246 i += cpuinfo.icache_line_length)
247 __asm__ __volatile__ ("wic %0, r0;" \
248 : : "r" (i));
249#endif
250 __enable_icache_msr();
251 local_irq_restore(flags);
252}
253
254static void __flush_icache_all_nomsr_irq(void)
255{
256 unsigned long flags;
257#ifndef ASM_LOOP
258 int i;
259#endif
260 pr_debug("%s\n", __func__);
261
262 local_irq_save(flags);
263 __disable_icache_nomsr();
264#ifdef ASM_LOOP
265 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
266#else
267 for (i = 0; i < cpuinfo.icache_size;
268 i += cpuinfo.icache_line_length)
269 __asm__ __volatile__ ("wic %0, r0;" \
270 : : "r" (i));
271#endif
272 __enable_icache_nomsr();
273 local_irq_restore(flags);
274}
275
276static void __flush_icache_all_noirq(void)
277{
278#ifndef ASM_LOOP
279 int i;
280#endif
281 pr_debug("%s\n", __func__);
282#ifdef ASM_LOOP
283 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
284#else
285 for (i = 0; i < cpuinfo.icache_size;
286 i += cpuinfo.icache_line_length)
287 __asm__ __volatile__ ("wic %0, r0;" \
288 : : "r" (i));
289#endif
290}
291
292static void __invalidate_dcache_all_msr_irq(void)
293{
294 unsigned long flags;
295#ifndef ASM_LOOP
296 int i;
297#endif
298 pr_debug("%s\n", __func__);
299
300 local_irq_save(flags);
301 __disable_dcache_msr();
302#ifdef ASM_LOOP
303 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
304#else
305 for (i = 0; i < cpuinfo.dcache_size;
306 i += cpuinfo.dcache_line_length)
307 __asm__ __volatile__ ("wdc %0, r0;" \
308 : : "r" (i));
309#endif
310 __enable_dcache_msr();
311 local_irq_restore(flags);
312}
313
314static void __invalidate_dcache_all_nomsr_irq(void)
315{
316 unsigned long flags;
317#ifndef ASM_LOOP
318 int i;
319#endif
320 pr_debug("%s\n", __func__);
321
322 local_irq_save(flags);
323 __disable_dcache_nomsr();
324#ifdef ASM_LOOP
325 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
326#else
327 for (i = 0; i < cpuinfo.dcache_size;
328 i += cpuinfo.dcache_line_length)
329 __asm__ __volatile__ ("wdc %0, r0;" \
330 : : "r" (i));
331#endif
332 __enable_dcache_nomsr();
333 local_irq_restore(flags);
334}
335
336static void __invalidate_dcache_all_noirq_wt(void)
337{
338#ifndef ASM_LOOP
339 int i;
340#endif
341 pr_debug("%s\n", __func__);
342#ifdef ASM_LOOP
343 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
344#else
345 for (i = 0; i < cpuinfo.dcache_size;
346 i += cpuinfo.dcache_line_length)
347 __asm__ __volatile__ ("wdc %0, r0;" \
348 : : "r" (i));
349#endif
350}
351
352
353
354
355
356
357
358
359static void __invalidate_dcache_all_wb(void)
360{
361#ifndef ASM_LOOP
362 int i;
363#endif
364 pr_debug("%s\n", __func__);
365#ifdef ASM_LOOP
366 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
367 wdc);
368#else
369 for (i = 0; i < cpuinfo.dcache_size;
370 i += cpuinfo.dcache_line_length)
371 __asm__ __volatile__ ("wdc %0, r0;" \
372 : : "r" (i));
373#endif
374}
375
376static void __invalidate_dcache_range_wb(unsigned long start,
377 unsigned long end)
378{
379#ifndef ASM_LOOP
380 int i;
381#endif
382 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
383 (unsigned int)start, (unsigned int) end);
384
385 CACHE_LOOP_LIMITS(start, end,
386 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
387#ifdef ASM_LOOP
388 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear);
389#else
390 for (i = start; i < end; i += cpuinfo.dcache_line_length)
391 __asm__ __volatile__ ("wdc.clear %0, r0;" \
392 : : "r" (i));
393#endif
394}
395
396static void __invalidate_dcache_range_nomsr_wt(unsigned long start,
397 unsigned long end)
398{
399#ifndef ASM_LOOP
400 int i;
401#endif
402 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
403 (unsigned int)start, (unsigned int) end);
404 CACHE_LOOP_LIMITS(start, end,
405 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
406
407#ifdef ASM_LOOP
408 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
409#else
410 for (i = start; i < end; i += cpuinfo.dcache_line_length)
411 __asm__ __volatile__ ("wdc %0, r0;" \
412 : : "r" (i));
413#endif
414}
415
416static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
417 unsigned long end)
418{
419 unsigned long flags;
420#ifndef ASM_LOOP
421 int i;
422#endif
423 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
424 (unsigned int)start, (unsigned int) end);
425 CACHE_LOOP_LIMITS(start, end,
426 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
427
428 local_irq_save(flags);
429 __disable_dcache_msr();
430
431#ifdef ASM_LOOP
432 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
433#else
434 for (i = start; i < end; i += cpuinfo.dcache_line_length)
435 __asm__ __volatile__ ("wdc %0, r0;" \
436 : : "r" (i));
437#endif
438
439 __enable_dcache_msr();
440 local_irq_restore(flags);
441}
442
443static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
444 unsigned long end)
445{
446 unsigned long flags;
447#ifndef ASM_LOOP
448 int i;
449#endif
450 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
451 (unsigned int)start, (unsigned int) end);
452
453 CACHE_LOOP_LIMITS(start, end,
454 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
455
456 local_irq_save(flags);
457 __disable_dcache_nomsr();
458
459#ifdef ASM_LOOP
460 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
461#else
462 for (i = start; i < end; i += cpuinfo.dcache_line_length)
463 __asm__ __volatile__ ("wdc %0, r0;" \
464 : : "r" (i));
465#endif
466
467 __enable_dcache_nomsr();
468 local_irq_restore(flags);
469}
470
471static void __flush_dcache_all_wb(void)
472{
473#ifndef ASM_LOOP
474 int i;
475#endif
476 pr_debug("%s\n", __func__);
477#ifdef ASM_LOOP
478 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
479 wdc.flush);
480#else
481 for (i = 0; i < cpuinfo.dcache_size;
482 i += cpuinfo.dcache_line_length)
483 __asm__ __volatile__ ("wdc.flush %0, r0;" \
484 : : "r" (i));
485#endif
486}
487
488static void __flush_dcache_range_wb(unsigned long start, unsigned long end)
489{
490#ifndef ASM_LOOP
491 int i;
492#endif
493 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
494 (unsigned int)start, (unsigned int) end);
495
496 CACHE_LOOP_LIMITS(start, end,
497 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
498#ifdef ASM_LOOP
499 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush);
500#else
501 for (i = start; i < end; i += cpuinfo.dcache_line_length)
502 __asm__ __volatile__ ("wdc.flush %0, r0;" \
503 : : "r" (i));
504#endif
505}
506
507
508struct scache *mbc;
509
510
511static const struct scache wb_msr = {
512 .ie = __enable_icache_msr,
513 .id = __disable_icache_msr,
514 .ifl = __flush_icache_all_noirq,
515 .iflr = __flush_icache_range_noirq,
516 .iin = __flush_icache_all_noirq,
517 .iinr = __flush_icache_range_noirq,
518 .de = __enable_dcache_msr,
519 .dd = __disable_dcache_msr,
520 .dfl = __flush_dcache_all_wb,
521 .dflr = __flush_dcache_range_wb,
522 .din = __invalidate_dcache_all_wb,
523 .dinr = __invalidate_dcache_range_wb,
524};
525
526
527static const struct scache wb_nomsr = {
528 .ie = __enable_icache_nomsr,
529 .id = __disable_icache_nomsr,
530 .ifl = __flush_icache_all_noirq,
531 .iflr = __flush_icache_range_noirq,
532 .iin = __flush_icache_all_noirq,
533 .iinr = __flush_icache_range_noirq,
534 .de = __enable_dcache_nomsr,
535 .dd = __disable_dcache_nomsr,
536 .dfl = __flush_dcache_all_wb,
537 .dflr = __flush_dcache_range_wb,
538 .din = __invalidate_dcache_all_wb,
539 .dinr = __invalidate_dcache_range_wb,
540};
541
542
543static const struct scache wt_msr = {
544 .ie = __enable_icache_msr,
545 .id = __disable_icache_msr,
546 .ifl = __flush_icache_all_msr_irq,
547 .iflr = __flush_icache_range_msr_irq,
548 .iin = __flush_icache_all_msr_irq,
549 .iinr = __flush_icache_range_msr_irq,
550 .de = __enable_dcache_msr,
551 .dd = __disable_dcache_msr,
552 .dfl = __invalidate_dcache_all_msr_irq,
553 .dflr = __invalidate_dcache_range_msr_irq_wt,
554 .din = __invalidate_dcache_all_msr_irq,
555 .dinr = __invalidate_dcache_range_msr_irq_wt,
556};
557
558static const struct scache wt_nomsr = {
559 .ie = __enable_icache_nomsr,
560 .id = __disable_icache_nomsr,
561 .ifl = __flush_icache_all_nomsr_irq,
562 .iflr = __flush_icache_range_nomsr_irq,
563 .iin = __flush_icache_all_nomsr_irq,
564 .iinr = __flush_icache_range_nomsr_irq,
565 .de = __enable_dcache_nomsr,
566 .dd = __disable_dcache_nomsr,
567 .dfl = __invalidate_dcache_all_nomsr_irq,
568 .dflr = __invalidate_dcache_range_nomsr_irq,
569 .din = __invalidate_dcache_all_nomsr_irq,
570 .dinr = __invalidate_dcache_range_nomsr_irq,
571};
572
573
574static const struct scache wt_msr_noirq = {
575 .ie = __enable_icache_msr,
576 .id = __disable_icache_msr,
577 .ifl = __flush_icache_all_noirq,
578 .iflr = __flush_icache_range_noirq,
579 .iin = __flush_icache_all_noirq,
580 .iinr = __flush_icache_range_noirq,
581 .de = __enable_dcache_msr,
582 .dd = __disable_dcache_msr,
583 .dfl = __invalidate_dcache_all_noirq_wt,
584 .dflr = __invalidate_dcache_range_nomsr_wt,
585 .din = __invalidate_dcache_all_noirq_wt,
586 .dinr = __invalidate_dcache_range_nomsr_wt,
587};
588
589static const struct scache wt_nomsr_noirq = {
590 .ie = __enable_icache_nomsr,
591 .id = __disable_icache_nomsr,
592 .ifl = __flush_icache_all_noirq,
593 .iflr = __flush_icache_range_noirq,
594 .iin = __flush_icache_all_noirq,
595 .iinr = __flush_icache_range_noirq,
596 .de = __enable_dcache_nomsr,
597 .dd = __disable_dcache_nomsr,
598 .dfl = __invalidate_dcache_all_noirq_wt,
599 .dflr = __invalidate_dcache_range_nomsr_wt,
600 .din = __invalidate_dcache_all_noirq_wt,
601 .dinr = __invalidate_dcache_range_nomsr_wt,
602};
603
604
605#define CPUVER_7_20_A 0x0c
606#define CPUVER_7_20_D 0x0f
607
608void microblaze_cache_init(void)
609{
610 if (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) {
611 if (cpuinfo.dcache_wb) {
612 pr_info("wb_msr\n");
613 mbc = (struct scache *)&wb_msr;
614 if (cpuinfo.ver_code <= CPUVER_7_20_D) {
615
616 pr_info("WB won't work properly\n");
617 }
618 } else {
619 if (cpuinfo.ver_code >= CPUVER_7_20_A) {
620 pr_info("wt_msr_noirq\n");
621 mbc = (struct scache *)&wt_msr_noirq;
622 } else {
623 pr_info("wt_msr\n");
624 mbc = (struct scache *)&wt_msr;
625 }
626 }
627 } else {
628 if (cpuinfo.dcache_wb) {
629 pr_info("wb_nomsr\n");
630 mbc = (struct scache *)&wb_nomsr;
631 if (cpuinfo.ver_code <= CPUVER_7_20_D) {
632
633 pr_info("WB won't work properly\n");
634 }
635 } else {
636 if (cpuinfo.ver_code >= CPUVER_7_20_A) {
637 pr_info("wt_nomsr_noirq\n");
638 mbc = (struct scache *)&wt_nomsr_noirq;
639 } else {
640 pr_info("wt_nomsr\n");
641 mbc = (struct scache *)&wt_nomsr;
642 }
643 }
644 }
645
646
647
648
649
650
651 enable_dcache();
652
653 invalidate_icache();
654 enable_icache();
655}
656