1
2
3
4
5
6
7
8
9
10#ifndef _ASMARM_CACHEFLUSH_H
11#define _ASMARM_CACHEFLUSH_H
12
13#include <linux/mm.h>
14
15#include <asm/glue-cache.h>
16#include <asm/shmparam.h>
17#include <asm/cachetype.h>
18#include <asm/outercache.h>
19
20#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
21
22
23
24
25
26#define PG_dcache_clean PG_arch_1
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104struct cpu_cache_fns {
105 void (*flush_icache_all)(void);
106 void (*flush_kern_all)(void);
107 void (*flush_kern_louis)(void);
108 void (*flush_user_all)(void);
109 void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
110
111 void (*coherent_kern_range)(unsigned long, unsigned long);
112 int (*coherent_user_range)(unsigned long, unsigned long);
113 void (*flush_kern_dcache_area)(void *, size_t);
114
115 void (*dma_map_area)(const void *, size_t, int);
116 void (*dma_unmap_area)(const void *, size_t, int);
117
118 void (*dma_flush_range)(const void *, const void *);
119} __no_randomize_layout;
120
121
122
123
124#ifdef MULTI_CACHE
125
126extern struct cpu_cache_fns cpu_cache;
127
128#define __cpuc_flush_icache_all cpu_cache.flush_icache_all
129#define __cpuc_flush_kern_all cpu_cache.flush_kern_all
130#define __cpuc_flush_kern_louis cpu_cache.flush_kern_louis
131#define __cpuc_flush_user_all cpu_cache.flush_user_all
132#define __cpuc_flush_user_range cpu_cache.flush_user_range
133#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
134#define __cpuc_coherent_user_range cpu_cache.coherent_user_range
135#define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area
136
137
138
139
140
141
142
143#define dmac_flush_range cpu_cache.dma_flush_range
144
145#else
146
147extern void __cpuc_flush_icache_all(void);
148extern void __cpuc_flush_kern_all(void);
149extern void __cpuc_flush_kern_louis(void);
150extern void __cpuc_flush_user_all(void);
151extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
152extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
153extern int __cpuc_coherent_user_range(unsigned long, unsigned long);
154extern void __cpuc_flush_dcache_area(void *, size_t);
155
156
157
158
159
160
161
162extern void dmac_flush_range(const void *, const void *);
163
164#endif
165
166
167
168
169
170
171extern void copy_to_user_page(struct vm_area_struct *, struct page *,
172 unsigned long, void *, const void *, unsigned long);
173#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
174 do { \
175 memcpy(dst, src, len); \
176 } while (0)
177
178
179
180
181
182
183#define __flush_icache_all_generic() \
184 asm("mcr p15, 0, %0, c7, c5, 0" \
185 : : "r" (0));
186
187
188#define __flush_icache_all_v7_smp() \
189 asm("mcr p15, 0, %0, c7, c1, 0" \
190 : : "r" (0));
191
192
193
194
195
196#if (defined(CONFIG_CPU_V7) && \
197 (defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \
198 defined(CONFIG_SMP_ON_UP)
199#define __flush_icache_preferred __cpuc_flush_icache_all
200#elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
201#define __flush_icache_preferred __flush_icache_all_v7_smp
202#elif __LINUX_ARM_ARCH__ == 6 && defined(CONFIG_ARM_ERRATA_411920)
203#define __flush_icache_preferred __cpuc_flush_icache_all
204#else
205#define __flush_icache_preferred __flush_icache_all_generic
206#endif
207
208static inline void __flush_icache_all(void)
209{
210 __flush_icache_preferred();
211 dsb(ishst);
212}
213
214
215
216
217#define flush_cache_louis() __cpuc_flush_kern_louis()
218
219#define flush_cache_all() __cpuc_flush_kern_all()
220
221static inline void vivt_flush_cache_mm(struct mm_struct *mm)
222{
223 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
224 __cpuc_flush_user_all();
225}
226
227static inline void
228vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
229{
230 struct mm_struct *mm = vma->vm_mm;
231
232 if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
233 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
234 vma->vm_flags);
235}
236
237static inline void
238vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
239{
240 struct mm_struct *mm = vma->vm_mm;
241
242 if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
243 unsigned long addr = user_addr & PAGE_MASK;
244 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
245 }
246}
247
248#ifndef CONFIG_CPU_CACHE_VIPT
249#define flush_cache_mm(mm) \
250 vivt_flush_cache_mm(mm)
251#define flush_cache_range(vma,start,end) \
252 vivt_flush_cache_range(vma,start,end)
253#define flush_cache_page(vma,addr,pfn) \
254 vivt_flush_cache_page(vma,addr,pfn)
255#else
256extern void flush_cache_mm(struct mm_struct *mm);
257extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
258extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
259#endif
260
261#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
262
263
264
265
266
267
268#define flush_cache_user_range(s,e) __cpuc_coherent_user_range(s,e)
269
270
271
272
273
274#define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
275
276
277
278
279
280#define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
281
282
283
284
285
286
287
288
289
290
291
292
293
294#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
295extern void flush_dcache_page(struct page *);
296
297static inline void flush_kernel_vmap_range(void *addr, int size)
298{
299 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
300 __cpuc_flush_dcache_area(addr, (size_t)size);
301}
302static inline void invalidate_kernel_vmap_range(void *addr, int size)
303{
304 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
305 __cpuc_flush_dcache_area(addr, (size_t)size);
306}
307
308#define ARCH_HAS_FLUSH_ANON_PAGE
309static inline void flush_anon_page(struct vm_area_struct *vma,
310 struct page *page, unsigned long vmaddr)
311{
312 extern void __flush_anon_page(struct vm_area_struct *vma,
313 struct page *, unsigned long);
314 if (PageAnon(page))
315 __flush_anon_page(vma, page, vmaddr);
316}
317
318#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
319extern void flush_kernel_dcache_page(struct page *);
320
321#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
322#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
323
324#define flush_icache_user_range(vma,page,addr,len) \
325 flush_dcache_page(page)
326
327
328
329
330
331#define flush_icache_page(vma,page) do { } while (0)
332
333
334
335
336
337
338
339
340static inline void flush_cache_vmap(unsigned long start, unsigned long end)
341{
342 if (!cache_is_vipt_nonaliasing())
343 flush_cache_all();
344 else
345
346
347
348
349 dsb(ishst);
350}
351
352static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
353{
354 if (!cache_is_vipt_nonaliasing())
355 flush_cache_all();
356}
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382#define __CACHE_WRITEBACK_ORDER 6
383#define __CACHE_WRITEBACK_GRANULE (1 << __CACHE_WRITEBACK_ORDER)
384
385
386
387
388
389#define __cpuc_clean_dcache_area __cpuc_flush_dcache_area
390
391
392
393
394
395static inline void __sync_cache_range_w(volatile void *p, size_t size)
396{
397 char *_p = (char *)p;
398
399 __cpuc_clean_dcache_area(_p, size);
400 outer_clean_range(__pa(_p), __pa(_p + size));
401}
402
403
404
405
406
407
408
409static inline void __sync_cache_range_r(volatile void *p, size_t size)
410{
411 char *_p = (char *)p;
412
413#ifdef CONFIG_OUTER_CACHE
414 if (outer_cache.flush_range) {
415
416
417
418
419 __cpuc_clean_dcache_area(_p, size);
420
421
422 outer_flush_range(__pa(_p), __pa(_p + size));
423 }
424#endif
425
426
427 __cpuc_flush_dcache_area(_p, size);
428}
429
430#define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
431#define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461#define v7_exit_coherency_flush(level) \
462 asm volatile( \
463 ".arch armv7-a \n\t" \
464 "stmfd sp!, {fp, ip} \n\t" \
465 "mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \
466 "bic r0, r0, #"__stringify(CR_C)" \n\t" \
467 "mcr p15, 0, r0, c1, c0, 0 @ set SCTLR \n\t" \
468 "isb \n\t" \
469 "bl v7_flush_dcache_"__stringify(level)" \n\t" \
470 "mrc p15, 0, r0, c1, c0, 1 @ get ACTLR \n\t" \
471 "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \
472 "mcr p15, 0, r0, c1, c0, 1 @ set ACTLR \n\t" \
473 "isb \n\t" \
474 "dsb \n\t" \
475 "ldmfd sp!, {fp, ip}" \
476 : : : "r0","r1","r2","r3","r4","r5","r6","r7", \
477 "r9","r10","lr","memory" )
478
479void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
480 void *kaddr, unsigned long len);
481
482#endif
483