1
2
3
4
5
6
7
8
9
10#include <linux/module.h>
11#include <linux/mm.h>
12#include <linux/pagemap.h>
13#include <linux/highmem.h>
14
15#include <asm/cacheflush.h>
16#include <asm/cachetype.h>
17#include <asm/highmem.h>
18#include <asm/smp_plat.h>
19#include <asm/tlbflush.h>
20#include <linux/hugetlb.h>
21
22#include "mm.h"
23
24#ifdef CONFIG_ARM_HEAVY_MB
25void (*soc_mb)(void);
26
27void arm_heavy_mb(void)
28{
29#ifdef CONFIG_OUTER_CACHE_SYNC
30 if (outer_cache.sync)
31 outer_cache.sync();
32#endif
33 if (soc_mb)
34 soc_mb();
35}
36EXPORT_SYMBOL(arm_heavy_mb);
37#endif
38
39#ifdef CONFIG_CPU_CACHE_VIPT
40
41static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
42{
43 unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
44 const int zero = 0;
45
46 set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL));
47
48 asm( "mcrr p15, 0, %1, %0, c14\n"
49 " mcr p15, 0, %2, c7, c10, 4"
50 :
51 : "r" (to), "r" (to + PAGE_SIZE - 1), "r" (zero)
52 : "cc");
53}
54
55static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
56{
57 unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
58 unsigned long offset = vaddr & (PAGE_SIZE - 1);
59 unsigned long to;
60
61 set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL));
62 to = va + offset;
63 flush_icache_range(to, to + len);
64}
65
66void flush_cache_mm(struct mm_struct *mm)
67{
68 if (cache_is_vivt()) {
69 vivt_flush_cache_mm(mm);
70 return;
71 }
72
73 if (cache_is_vipt_aliasing()) {
74 asm( "mcr p15, 0, %0, c7, c14, 0\n"
75 " mcr p15, 0, %0, c7, c10, 4"
76 :
77 : "r" (0)
78 : "cc");
79 }
80}
81
82void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
83{
84 if (cache_is_vivt()) {
85 vivt_flush_cache_range(vma, start, end);
86 return;
87 }
88
89 if (cache_is_vipt_aliasing()) {
90 asm( "mcr p15, 0, %0, c7, c14, 0\n"
91 " mcr p15, 0, %0, c7, c10, 4"
92 :
93 : "r" (0)
94 : "cc");
95 }
96
97 if (vma->vm_flags & VM_EXEC)
98 __flush_icache_all();
99}
100
101void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
102{
103 if (cache_is_vivt()) {
104 vivt_flush_cache_page(vma, user_addr, pfn);
105 return;
106 }
107
108 if (cache_is_vipt_aliasing()) {
109 flush_pfn_alias(pfn, user_addr);
110 __flush_icache_all();
111 }
112
113 if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
114 __flush_icache_all();
115}
116
117#else
118#define flush_pfn_alias(pfn,vaddr) do { } while (0)
119#define flush_icache_alias(pfn,vaddr,len) do { } while (0)
120#endif
121
122#define FLAG_PA_IS_EXEC 1
123#define FLAG_PA_CORE_IN_MM 2
124
125static void flush_ptrace_access_other(void *args)
126{
127 __flush_icache_all();
128}
129
130static inline
131void __flush_ptrace_access(struct page *page, unsigned long uaddr, void *kaddr,
132 unsigned long len, unsigned int flags)
133{
134 if (cache_is_vivt()) {
135 if (flags & FLAG_PA_CORE_IN_MM) {
136 unsigned long addr = (unsigned long)kaddr;
137 __cpuc_coherent_kern_range(addr, addr + len);
138 }
139 return;
140 }
141
142 if (cache_is_vipt_aliasing()) {
143 flush_pfn_alias(page_to_pfn(page), uaddr);
144 __flush_icache_all();
145 return;
146 }
147
148
149 if (flags & FLAG_PA_IS_EXEC) {
150 unsigned long addr = (unsigned long)kaddr;
151 if (icache_is_vipt_aliasing())
152 flush_icache_alias(page_to_pfn(page), uaddr, len);
153 else
154 __cpuc_coherent_kern_range(addr, addr + len);
155 if (cache_ops_need_broadcast())
156 smp_call_function(flush_ptrace_access_other,
157 NULL, 1);
158 }
159}
160
161static
162void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
163 unsigned long uaddr, void *kaddr, unsigned long len)
164{
165 unsigned int flags = 0;
166 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
167 flags |= FLAG_PA_CORE_IN_MM;
168 if (vma->vm_flags & VM_EXEC)
169 flags |= FLAG_PA_IS_EXEC;
170 __flush_ptrace_access(page, uaddr, kaddr, len, flags);
171}
172
173void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
174 void *kaddr, unsigned long len)
175{
176 unsigned int flags = FLAG_PA_CORE_IN_MM|FLAG_PA_IS_EXEC;
177
178 __flush_ptrace_access(page, uaddr, kaddr, len, flags);
179}
180
181
182
183
184
185
186
187
188void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
189 unsigned long uaddr, void *dst, const void *src,
190 unsigned long len)
191{
192#ifdef CONFIG_SMP
193 preempt_disable();
194#endif
195 memcpy(dst, src, len);
196 flush_ptrace_access(vma, page, uaddr, dst, len);
197#ifdef CONFIG_SMP
198 preempt_enable();
199#endif
200}
201
202void __flush_dcache_page(struct address_space *mapping, struct page *page)
203{
204
205
206
207
208
209 if (!PageHighMem(page)) {
210 size_t page_size = PAGE_SIZE << compound_order(page);
211 __cpuc_flush_dcache_area(page_address(page), page_size);
212 } else {
213 unsigned long i;
214 if (cache_is_vipt_nonaliasing()) {
215 for (i = 0; i < (1 << compound_order(page)); i++) {
216 void *addr = kmap_atomic(page + i);
217 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
218 kunmap_atomic(addr);
219 }
220 } else {
221 for (i = 0; i < (1 << compound_order(page)); i++) {
222 void *addr = kmap_high_get(page + i);
223 if (addr) {
224 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
225 kunmap_high(page + i);
226 }
227 }
228 }
229 }
230
231
232
233
234
235
236 if (mapping && cache_is_vipt_aliasing())
237 flush_pfn_alias(page_to_pfn(page),
238 page->index << PAGE_SHIFT);
239}
240
241static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
242{
243 struct mm_struct *mm = current->active_mm;
244 struct vm_area_struct *mpnt;
245 pgoff_t pgoff;
246
247
248
249
250
251
252
253 pgoff = page->index;
254
255 flush_dcache_mmap_lock(mapping);
256 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
257 unsigned long offset;
258
259
260
261
262 if (mpnt->vm_mm != mm)
263 continue;
264 if (!(mpnt->vm_flags & VM_MAYSHARE))
265 continue;
266 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
267 flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
268 }
269 flush_dcache_mmap_unlock(mapping);
270}
271
272#if __LINUX_ARM_ARCH__ >= 6
273void __sync_icache_dcache(pte_t pteval)
274{
275 unsigned long pfn;
276 struct page *page;
277 struct address_space *mapping;
278
279 if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
280
281 return;
282 pfn = pte_pfn(pteval);
283 if (!pfn_valid(pfn))
284 return;
285
286 page = pfn_to_page(pfn);
287 if (cache_is_vipt_aliasing())
288 mapping = page_mapping_file(page);
289 else
290 mapping = NULL;
291
292 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
293 __flush_dcache_page(mapping, page);
294
295 if (pte_exec(pteval))
296 __flush_icache_all();
297}
298#endif
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319void flush_dcache_page(struct page *page)
320{
321 struct address_space *mapping;
322
323
324
325
326
327 if (page == ZERO_PAGE(0))
328 return;
329
330 if (!cache_ops_need_broadcast() && cache_is_vipt_nonaliasing()) {
331 if (test_bit(PG_dcache_clean, &page->flags))
332 clear_bit(PG_dcache_clean, &page->flags);
333 return;
334 }
335
336 mapping = page_mapping_file(page);
337
338 if (!cache_ops_need_broadcast() &&
339 mapping && !page_mapcount(page))
340 clear_bit(PG_dcache_clean, &page->flags);
341 else {
342 __flush_dcache_page(mapping, page);
343 if (mapping && cache_is_vivt())
344 __flush_dcache_aliases(mapping, page);
345 else if (mapping)
346 __flush_icache_all();
347 set_bit(PG_dcache_clean, &page->flags);
348 }
349}
350EXPORT_SYMBOL(flush_dcache_page);
351
352
353
354
355
356
357
358
359
360
361void flush_kernel_dcache_page(struct page *page)
362{
363 if (cache_is_vivt() || cache_is_vipt_aliasing()) {
364 struct address_space *mapping;
365
366 mapping = page_mapping_file(page);
367
368 if (!mapping || mapping_mapped(mapping)) {
369 void *addr;
370
371 addr = page_address(page);
372
373
374
375
376
377
378 if (!IS_ENABLED(CONFIG_HIGHMEM) || addr)
379 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
380 }
381 }
382}
383EXPORT_SYMBOL(flush_kernel_dcache_page);
384
385
386
387
388
389
390
391
392
393
394void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
395{
396 unsigned long pfn;
397
398
399 if (cache_is_vipt_nonaliasing())
400 return;
401
402
403
404
405 pfn = page_to_pfn(page);
406 if (cache_is_vivt()) {
407 flush_cache_page(vma, vmaddr, pfn);
408 } else {
409
410
411
412
413 flush_pfn_alias(pfn, vmaddr);
414 __flush_icache_all();
415 }
416
417
418
419
420
421
422 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
423}
424