1
2
3#include <config.h>
4#include <linux/linkage.h>
5#include <linux/sizes.h>
6#include <asm/system.h>
7
8
9#define ARM(x...)
10#define THUMB(x...) x
11#else
12#define ARM(x...) x
13#define THUMB(x...)
14#endif
15
16
17
18
19
20
21
22
23
24
25ENTRY(__v7_flush_dcache_all)
26 dmb @ ensure ordering with previous memory accesses
27 mrc p15, 1, r0, c0, c0, 1 @ read clidr
28 mov r3, r0, lsr
29 ands r3, r3,
30 beq finished @ if loc is 0, then no need to clean
31start_flush_levels:
32 mov r10,
33flush_levels:
34 add r2, r10, r10, lsr
35 mov r1, r0, lsr r2 @ extract cache type bits from clidr
36 and r1, r1,
37 cmp r1,
38 blt skip @ skip if no cache, or just i-cache
39 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
40 isb @ isb to sych the new cssr&csidr
41 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
42 and r2, r1,
43 add r2, r2,
44 movw r4,
45 ands r4, r4, r1, lsr
46 clz r5, r4 @ find bit position of way size increment
47 movw r7,
48 ands r7, r7, r1, lsr
49loop1:
50 mov r9, r7 @ create working copy of max index
51loop2:
52 ARM( orr r11, r10, r4, lsl r5 ) @ factor way and cache number into r11
53 THUMB( lsl r6, r4, r5 )
54 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
55 ARM( orr r11, r11, r9, lsl r2 ) @ factor index number into r11
56 THUMB( lsl r6, r9, r2 )
57 THUMB( orr r11, r11, r6 ) @ factor index number into r11
58 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
59 subs r9, r9,
60 bge loop2
61 subs r4, r4,
62 bge loop1
63skip:
64 add r10, r10,
65 cmp r3, r10
66 bgt flush_levels
67finished:
68 mov r10,
69 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
70 dsb st
71 isb
72 bx lr
73ENDPROC(__v7_flush_dcache_all)
74
75ENTRY(v7_flush_dcache_all)
76 ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} )
77 THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
78 bl __v7_flush_dcache_all
79 ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
80 THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
81 bx lr
82ENDPROC(v7_flush_dcache_all)
83
84
85
86
87
88
89
90
91
92
93
94
95
96ENTRY(__v7_invalidate_dcache_all)
97 dmb @ ensure ordering with previous memory accesses
98 mrc p15, 1, r0, c0, c0, 1 @ read clidr
99 mov r3, r0, lsr
100 ands r3, r3,
101 beq inval_finished @ if loc is 0, then no need to clean
102 mov r10,
103inval_levels:
104 add r2, r10, r10, lsr
105 mov r1, r0, lsr r2 @ extract cache type bits from clidr
106 and r1, r1,
107 cmp r1,
108 blt inval_skip @ skip if no cache, or just i-cache
109 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
110 isb @ isb to sych the new cssr&csidr
111 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
112 and r2, r1,
113 add r2, r2,
114 movw r4,
115 ands r4, r4, r1, lsr
116 clz r5, r4 @ find bit position of way size increment
117 movw r7,
118 ands r7, r7, r1, lsr
119inval_loop1:
120 mov r9, r7 @ create working copy of max index
121inval_loop2:
122 ARM( orr r11, r10, r4, lsl r5 ) @ factor way and cache number into r11
123 THUMB( lsl r6, r4, r5 )
124 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
125 ARM( orr r11, r11, r9, lsl r2 ) @ factor index number into r11
126 THUMB( lsl r6, r9, r2 )
127 THUMB( orr r11, r11, r6 ) @ factor index number into r11
128 mcr p15, 0, r11, c7, c6, 2 @ invalidate by set/way
129 subs r9, r9,
130 bge inval_loop2
131 subs r4, r4,
132 bge inval_loop1
133inval_skip:
134 add r10, r10,
135 cmp r3, r10
136 bgt inval_levels
137inval_finished:
138 mov r10,
139 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
140 dsb st
141 isb
142 bx lr
143ENDPROC(__v7_invalidate_dcache_all)
144
145ENTRY(v7_invalidate_dcache_all)
146 ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} )
147 THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
148 bl __v7_invalidate_dcache_all
149 ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
150 THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
151 bx lr
152ENDPROC(v7_invalidate_dcache_all)
153