1
2
3
4
5
6
7
8
9
10#include <linux/module.h>
11#include <linux/mm.h>
12#include <linux/pagemap.h>
13#include <linux/highmem.h>
14
15#include <asm/cacheflush.h>
16#include <asm/cachetype.h>
17#include <asm/highmem.h>
18#include <asm/smp_plat.h>
19#include <asm/tlbflush.h>
20
21#include "mm.h"
22
23#ifdef CONFIG_CPU_CACHE_VIPT
24
25static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
26{
27 unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
28 const int zero = 0;
29
30 set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL));
31
32 asm( "mcrr p15, 0, %1, %0, c14\n"
33 " mcr p15, 0, %2, c7, c10, 4"
34 :
35 : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero)
36 : "cc");
37}
38
39static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
40{
41 unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
42 unsigned long offset = vaddr & (PAGE_SIZE - 1);
43 unsigned long to;
44
45 set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL));
46 to = va + offset;
47 flush_icache_range(to, to + len);
48}
49
50void flush_cache_mm(struct mm_struct *mm)
51{
52 if (cache_is_vivt()) {
53 vivt_flush_cache_mm(mm);
54 return;
55 }
56
57 if (cache_is_vipt_aliasing()) {
58 asm( "mcr p15, 0, %0, c7, c14, 0\n"
59 " mcr p15, 0, %0, c7, c10, 4"
60 :
61 : "r" (0)
62 : "cc");
63 }
64}
65
66void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
67{
68 if (cache_is_vivt()) {
69 vivt_flush_cache_range(vma, start, end);
70 return;
71 }
72
73 if (cache_is_vipt_aliasing()) {
74 asm( "mcr p15, 0, %0, c7, c14, 0\n"
75 " mcr p15, 0, %0, c7, c10, 4"
76 :
77 : "r" (0)
78 : "cc");
79 }
80
81 if (vma->vm_flags & VM_EXEC)
82 __flush_icache_all();
83}
84
85void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
86{
87 if (cache_is_vivt()) {
88 vivt_flush_cache_page(vma, user_addr, pfn);
89 return;
90 }
91
92 if (cache_is_vipt_aliasing()) {
93 flush_pfn_alias(pfn, user_addr);
94 __flush_icache_all();
95 }
96
97 if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
98 __flush_icache_all();
99}
100
101#else
102#define flush_pfn_alias(pfn,vaddr) do { } while (0)
103#define flush_icache_alias(pfn,vaddr,len) do { } while (0)
104#endif
105
106static void flush_ptrace_access_other(void *args)
107{
108 __flush_icache_all();
109}
110
111static
112void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
113 unsigned long uaddr, void *kaddr, unsigned long len)
114{
115 if (cache_is_vivt()) {
116 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
117 unsigned long addr = (unsigned long)kaddr;
118 __cpuc_coherent_kern_range(addr, addr + len);
119 }
120 return;
121 }
122
123 if (cache_is_vipt_aliasing()) {
124 flush_pfn_alias(page_to_pfn(page), uaddr);
125 __flush_icache_all();
126 return;
127 }
128
129
130 if (vma->vm_flags & VM_EXEC) {
131 unsigned long addr = (unsigned long)kaddr;
132 if (icache_is_vipt_aliasing())
133 flush_icache_alias(page_to_pfn(page), uaddr, len);
134 else
135 __cpuc_coherent_kern_range(addr, addr + len);
136 if (cache_ops_need_broadcast())
137 smp_call_function(flush_ptrace_access_other,
138 NULL, 1);
139 }
140}
141
142
143
144
145
146
147
148
149void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
150 unsigned long uaddr, void *dst, const void *src,
151 unsigned long len)
152{
153#ifdef CONFIG_SMP
154 preempt_disable();
155#endif
156 memcpy(dst, src, len);
157 flush_ptrace_access(vma, page, uaddr, dst, len);
158#ifdef CONFIG_SMP
159 preempt_enable();
160#endif
161}
162
163void __flush_dcache_page(struct address_space *mapping, struct page *page)
164{
165
166
167
168
169
170 if (!PageHighMem(page)) {
171 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
172 } else {
173 void *addr;
174
175 if (cache_is_vipt_nonaliasing()) {
176 addr = kmap_atomic(page);
177 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
178 kunmap_atomic(addr);
179 } else {
180 addr = kmap_high_get(page);
181 if (addr) {
182 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
183 kunmap_high(page);
184 }
185 }
186 }
187
188
189
190
191
192
193 if (mapping && cache_is_vipt_aliasing())
194 flush_pfn_alias(page_to_pfn(page),
195 page->index << PAGE_CACHE_SHIFT);
196}
197
198static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
199{
200 struct mm_struct *mm = current->active_mm;
201 struct vm_area_struct *mpnt;
202 pgoff_t pgoff;
203
204
205
206
207
208
209
210 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
211
212 flush_dcache_mmap_lock(mapping);
213 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
214 unsigned long offset;
215
216
217
218
219 if (mpnt->vm_mm != mm)
220 continue;
221 if (!(mpnt->vm_flags & VM_MAYSHARE))
222 continue;
223 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
224 flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
225 }
226 flush_dcache_mmap_unlock(mapping);
227}
228
229#if __LINUX_ARM_ARCH__ >= 6
230void __sync_icache_dcache(pte_t pteval)
231{
232 unsigned long pfn;
233 struct page *page;
234 struct address_space *mapping;
235
236 if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
237
238 return;
239 pfn = pte_pfn(pteval);
240 if (!pfn_valid(pfn))
241 return;
242
243 page = pfn_to_page(pfn);
244 if (cache_is_vipt_aliasing())
245 mapping = page_mapping(page);
246 else
247 mapping = NULL;
248
249 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
250 __flush_dcache_page(mapping, page);
251
252 if (pte_exec(pteval))
253 __flush_icache_all();
254}
255#endif
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276void flush_dcache_page(struct page *page)
277{
278 struct address_space *mapping;
279
280
281
282
283
284 if (page == ZERO_PAGE(0))
285 return;
286
287 mapping = page_mapping(page);
288
289 if (!cache_ops_need_broadcast() &&
290 mapping && !mapping_mapped(mapping))
291 clear_bit(PG_dcache_clean, &page->flags);
292 else {
293 __flush_dcache_page(mapping, page);
294 if (mapping && cache_is_vivt())
295 __flush_dcache_aliases(mapping, page);
296 else if (mapping)
297 __flush_icache_all();
298 set_bit(PG_dcache_clean, &page->flags);
299 }
300}
301EXPORT_SYMBOL(flush_dcache_page);
302
303
304
305
306
307
308
309
310
311
312void flush_kernel_dcache_page(struct page *page)
313{
314 if (cache_is_vivt() || cache_is_vipt_aliasing()) {
315 struct address_space *mapping;
316
317 mapping = page_mapping(page);
318
319 if (!mapping || mapping_mapped(mapping)) {
320 void *addr;
321
322 addr = page_address(page);
323
324
325
326
327
328
329 if (!IS_ENABLED(CONFIG_HIGHMEM) || addr)
330 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
331 }
332 }
333}
334EXPORT_SYMBOL(flush_kernel_dcache_page);
335
336
337
338
339
340
341
342
343
344
345void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
346{
347 unsigned long pfn;
348
349
350 if (cache_is_vipt_nonaliasing())
351 return;
352
353
354
355
356 pfn = page_to_pfn(page);
357 if (cache_is_vivt()) {
358 flush_cache_page(vma, vmaddr, pfn);
359 } else {
360
361
362
363
364 flush_pfn_alias(pfn, vmaddr);
365 __flush_icache_all();
366 }
367
368
369
370
371
372
373 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
374}
375