1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/init.h>
27#include <linux/linkage.h>
28#include <linux/kernel.h>
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/sched.h>
32#include <linux/fs.h>
33
34#include <asm/mmu_context.h>
35
36
37
38
39
40
41
42static void flush_data_cache_page(unsigned long addr)
43{
44 unsigned int i;
45 for (i = 0; i < (PAGE_SIZE / L1_CACHE_BYTES); i += L1_CACHE_BYTES) {
46 __asm__ __volatile__(
47 "cache 0x0e, [%0, 0]\n"
48 "cache 0x1a, [%0, 0]\n"
49 "nop\n"
50 : : "r" (addr));
51 addr += L1_CACHE_BYTES;
52 }
53}
54
55void flush_dcache_page(struct page *page)
56{
57 struct address_space *mapping = page_mapping(page);
58 unsigned long addr;
59
60 if (PageHighMem(page))
61 return;
62 if (mapping && !mapping_mapped(mapping)) {
63 set_bit(PG_dcache_dirty, &(page)->flags);
64 return;
65 }
66
67
68
69
70
71
72 addr = (unsigned long) page_address(page);
73 flush_data_cache_page(addr);
74}
75EXPORT_SYMBOL(flush_dcache_page);
76
77
78void __update_cache(struct vm_area_struct *vma, unsigned long address,
79 pte_t pte)
80{
81 struct page *page;
82 unsigned long pfn, addr;
83 int exec = (vma->vm_flags & VM_EXEC);
84
85 pfn = pte_pfn(pte);
86 if (unlikely(!pfn_valid(pfn)))
87 return;
88 page = pfn_to_page(pfn);
89 if (page_mapping(page) && test_bit(PG_dcache_dirty, &(page)->flags)) {
90 addr = (unsigned long) page_address(page);
91 if (exec)
92 flush_data_cache_page(addr);
93 clear_bit(PG_dcache_dirty, &(page)->flags);
94 }
95}
96
97static inline void setup_protection_map(void)
98{
99 protection_map[0] = PAGE_NONE;
100 protection_map[1] = PAGE_READONLY;
101 protection_map[2] = PAGE_COPY;
102 protection_map[3] = PAGE_COPY;
103 protection_map[4] = PAGE_READONLY;
104 protection_map[5] = PAGE_READONLY;
105 protection_map[6] = PAGE_COPY;
106 protection_map[7] = PAGE_COPY;
107 protection_map[8] = PAGE_NONE;
108 protection_map[9] = PAGE_READONLY;
109 protection_map[10] = PAGE_SHARED;
110 protection_map[11] = PAGE_SHARED;
111 protection_map[12] = PAGE_READONLY;
112 protection_map[13] = PAGE_READONLY;
113 protection_map[14] = PAGE_SHARED;
114 protection_map[15] = PAGE_SHARED;
115}
116
117void cpu_cache_init(void)
118{
119 setup_protection_map();
120}
121
122void flush_icache_all(void)
123{
124 __asm__ __volatile__(
125 "la r8, flush_icache_all\n"
126 "cache 0x10, [r8, 0]\n"
127 "nop\nnop\nnop\nnop\nnop\nnop\n"
128 : : : "r8");
129}
130
131void flush_dcache_all(void)
132{
133 __asm__ __volatile__(
134 "la r8, flush_dcache_all\n"
135 "cache 0x1f, [r8, 0]\n"
136 "nop\nnop\nnop\nnop\nnop\nnop\n"
137 "cache 0x1a, [r8, 0]\n"
138 "nop\nnop\nnop\nnop\nnop\nnop\n"
139 : : : "r8");
140}
141
142void flush_cache_all(void)
143{
144 __asm__ __volatile__(
145 "la r8, flush_cache_all\n"
146 "cache 0x10, [r8, 0]\n"
147 "nop\nnop\nnop\nnop\nnop\nnop\n"
148 "cache 0x1f, [r8, 0]\n"
149 "nop\nnop\nnop\nnop\nnop\nnop\n"
150 "cache 0x1a, [r8, 0]\n"
151 "nop\nnop\nnop\nnop\nnop\nnop\n"
152 : : : "r8");
153}
154
155void flush_cache_mm(struct mm_struct *mm)
156{
157 if (!(mm->context))
158 return;
159 flush_cache_all();
160}
161
162
163
164
165
166
167
168
169
170
171
172void flush_cache_range(struct vm_area_struct *vma,
173 unsigned long start, unsigned long end)
174{
175 struct mm_struct *mm = vma->vm_mm;
176 int exec = vma->vm_flags & VM_EXEC;
177 pgd_t *pgdp;
178 pud_t *pudp;
179 pmd_t *pmdp;
180 pte_t *ptep;
181
182 if (!(mm->context))
183 return;
184
185 pgdp = pgd_offset(mm, start);
186 pudp = pud_offset(pgdp, start);
187 pmdp = pmd_offset(pudp, start);
188 ptep = pte_offset(pmdp, start);
189
190 while (start <= end) {
191 unsigned long tmpend;
192 pgdp = pgd_offset(mm, start);
193 pudp = pud_offset(pgdp, start);
194 pmdp = pmd_offset(pudp, start);
195 ptep = pte_offset(pmdp, start);
196
197 if (!(pte_val(*ptep) & _PAGE_PRESENT)) {
198 start = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1);
199 continue;
200 }
201 tmpend = (start | (PAGE_SIZE-1)) > end ?
202 end : (start | (PAGE_SIZE-1));
203
204 flush_dcache_range(start, tmpend);
205 if (exec)
206 flush_icache_range(start, tmpend);
207 start = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1);
208 }
209}
210
211void flush_cache_page(struct vm_area_struct *vma,
212 unsigned long addr, unsigned long pfn)
213{
214 int exec = vma->vm_flags & VM_EXEC;
215 unsigned long kaddr = 0xa0000000 | (pfn << PAGE_SHIFT);
216
217 flush_dcache_range(kaddr, kaddr + PAGE_SIZE);
218
219 if (exec)
220 flush_icache_range(kaddr, kaddr + PAGE_SIZE);
221}
222
223void flush_cache_sigtramp(unsigned long addr)
224{
225 __asm__ __volatile__(
226 "cache 0x02, [%0, 0]\n"
227 "nop\nnop\nnop\nnop\nnop\n"
228 "cache 0x02, [%0, 0x4]\n"
229 "nop\nnop\nnop\nnop\nnop\n"
230
231 "cache 0x0d, [%0, 0]\n"
232 "nop\nnop\nnop\nnop\nnop\n"
233 "cache 0x0d, [%0, 0x4]\n"
234 "nop\nnop\nnop\nnop\nnop\n"
235
236 "cache 0x1a, [%0, 0]\n"
237 "nop\nnop\nnop\nnop\nnop\n"
238 : : "r" (addr));
239}
240
241
242
243
244
245
246void flush_dcache_range(unsigned long start, unsigned long end)
247{
248 int size, i;
249
250 start = start & ~(L1_CACHE_BYTES - 1);
251 end = end & ~(L1_CACHE_BYTES - 1);
252 size = end - start;
253
254 for (i = 0; i < size; i += L1_CACHE_BYTES) {
255 __asm__ __volatile__(
256 "cache 0x0e, [%0, 0]\n"
257 "nop\nnop\nnop\nnop\nnop\n"
258 "cache 0x1a, [%0, 0]\n"
259 "nop\nnop\nnop\nnop\nnop\n"
260 : : "r" (start));
261 start += L1_CACHE_BYTES;
262 }
263}
264
265void flush_icache_range(unsigned long start, unsigned long end)
266{
267 int size, i;
268 start = start & ~(L1_CACHE_BYTES - 1);
269 end = end & ~(L1_CACHE_BYTES - 1);
270
271 size = end - start;
272
273 for (i = 0; i < size; i += L1_CACHE_BYTES) {
274 __asm__ __volatile__(
275 "cache 0x02, [%0, 0]\n"
276 "nop\nnop\nnop\nnop\nnop\n"
277 : : "r" (start));
278 start += L1_CACHE_BYTES;
279 }
280}
281EXPORT_SYMBOL(flush_icache_range);
282