1
2
3
4
5
6
7
8
9
10#ifndef _ASMARM_CACHEFLUSH_H
11#define _ASMARM_CACHEFLUSH_H
12
13#include <linux/mm.h>
14
15#include <asm/glue-cache.h>
16#include <asm/shmparam.h>
17#include <asm/cachetype.h>
18#include <asm/outercache.h>
19
20#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
21
22
23
24
25
26#define PG_dcache_clean PG_arch_1
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104struct cpu_cache_fns {
105 void (*flush_icache_all)(void);
106 void (*flush_kern_all)(void);
107 void (*flush_kern_louis)(void);
108 void (*flush_user_all)(void);
109 void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
110
111 void (*coherent_kern_range)(unsigned long, unsigned long);
112 int (*coherent_user_range)(unsigned long, unsigned long);
113 void (*flush_kern_dcache_area)(void *, size_t);
114
115 void (*dma_map_area)(const void *, size_t, int);
116 void (*dma_unmap_area)(const void *, size_t, int);
117
118 void (*dma_flush_range)(const void *, const void *);
119};
120
121
122
123
124#ifdef MULTI_CACHE
125
126extern struct cpu_cache_fns cpu_cache;
127
128#define __cpuc_flush_icache_all cpu_cache.flush_icache_all
129#define __cpuc_flush_kern_all cpu_cache.flush_kern_all
130#define __cpuc_flush_kern_louis cpu_cache.flush_kern_louis
131#define __cpuc_flush_user_all cpu_cache.flush_user_all
132#define __cpuc_flush_user_range cpu_cache.flush_user_range
133#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
134#define __cpuc_coherent_user_range cpu_cache.coherent_user_range
135#define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area
136
137
138
139
140
141
142
143#define dmac_flush_range cpu_cache.dma_flush_range
144
145#else
146
147extern void __cpuc_flush_icache_all(void);
148extern void __cpuc_flush_kern_all(void);
149extern void __cpuc_flush_kern_louis(void);
150extern void __cpuc_flush_user_all(void);
151extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
152extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
153extern int __cpuc_coherent_user_range(unsigned long, unsigned long);
154extern void __cpuc_flush_dcache_area(void *, size_t);
155
156
157
158
159
160
161
162extern void dmac_flush_range(const void *, const void *);
163
164#endif
165
166
167
168
169
170
171extern void copy_to_user_page(struct vm_area_struct *, struct page *,
172 unsigned long, void *, const void *, unsigned long);
173#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
174 do { \
175 memcpy(dst, src, len); \
176 } while (0)
177
178
179
180
181
182
183#define __flush_icache_all_generic() \
184 asm("mcr p15, 0, %0, c7, c5, 0" \
185 : : "r" (0));
186
187
188#define __flush_icache_all_v7_smp() \
189 asm("mcr p15, 0, %0, c7, c1, 0" \
190 : : "r" (0));
191
192
193
194
195
196#if (defined(CONFIG_CPU_V7) && \
197 (defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \
198 defined(CONFIG_SMP_ON_UP)
199#define __flush_icache_preferred __cpuc_flush_icache_all
200#elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
201#define __flush_icache_preferred __flush_icache_all_v7_smp
202#elif __LINUX_ARM_ARCH__ == 6 && defined(CONFIG_ARM_ERRATA_411920)
203#define __flush_icache_preferred __cpuc_flush_icache_all
204#else
205#define __flush_icache_preferred __flush_icache_all_generic
206#endif
207
208static inline void __flush_icache_all(void)
209{
210 __flush_icache_preferred();
211 dsb(ishst);
212}
213
214
215
216
217#define flush_cache_louis() __cpuc_flush_kern_louis()
218
219#define flush_cache_all() __cpuc_flush_kern_all()
220
221static inline void vivt_flush_cache_mm(struct mm_struct *mm)
222{
223 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
224 __cpuc_flush_user_all();
225}
226
227static inline void
228vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
229{
230 struct mm_struct *mm = vma->vm_mm;
231
232 if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
233 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
234 vma->vm_flags);
235}
236
237static inline void
238vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
239{
240 struct mm_struct *mm = vma->vm_mm;
241
242 if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
243 unsigned long addr = user_addr & PAGE_MASK;
244 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
245 }
246}
247
248#ifndef CONFIG_CPU_CACHE_VIPT
249#define flush_cache_mm(mm) \
250 vivt_flush_cache_mm(mm)
251#define flush_cache_range(vma,start,end) \
252 vivt_flush_cache_range(vma,start,end)
253#define flush_cache_page(vma,addr,pfn) \
254 vivt_flush_cache_page(vma,addr,pfn)
255#else
256extern void flush_cache_mm(struct mm_struct *mm);
257extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
258extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
259#endif
260
261#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
262
263
264
265
266
267
268#define flush_cache_user_range(s,e) __cpuc_coherent_user_range(s,e)
269
270
271
272
273
274#define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
275
276
277
278
279
280#define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
281
282
283
284
285
286
287
288
289
290
291
292
293
294#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
295extern void flush_dcache_page(struct page *);
296
297static inline void flush_kernel_vmap_range(void *addr, int size)
298{
299 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
300 __cpuc_flush_dcache_area(addr, (size_t)size);
301}
302static inline void invalidate_kernel_vmap_range(void *addr, int size)
303{
304 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
305 __cpuc_flush_dcache_area(addr, (size_t)size);
306}
307
308#define ARCH_HAS_FLUSH_ANON_PAGE
309static inline void flush_anon_page(struct vm_area_struct *vma,
310 struct page *page, unsigned long vmaddr)
311{
312 extern void __flush_anon_page(struct vm_area_struct *vma,
313 struct page *, unsigned long);
314 if (PageAnon(page))
315 __flush_anon_page(vma, page, vmaddr);
316}
317
318#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
319extern void flush_kernel_dcache_page(struct page *);
320
321#define flush_dcache_mmap_lock(mapping) \
322 spin_lock_irq(&(mapping)->tree_lock)
323#define flush_dcache_mmap_unlock(mapping) \
324 spin_unlock_irq(&(mapping)->tree_lock)
325
326#define flush_icache_user_range(vma,page,addr,len) \
327 flush_dcache_page(page)
328
329
330
331
332
333#define flush_icache_page(vma,page) do { } while (0)
334
335
336
337
338
339
340
341
342static inline void flush_cache_vmap(unsigned long start, unsigned long end)
343{
344 if (!cache_is_vipt_nonaliasing())
345 flush_cache_all();
346 else
347
348
349
350
351 dsb(ishst);
352}
353
354static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
355{
356 if (!cache_is_vipt_nonaliasing())
357 flush_cache_all();
358}
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384#define __CACHE_WRITEBACK_ORDER 6
385#define __CACHE_WRITEBACK_GRANULE (1 << __CACHE_WRITEBACK_ORDER)
386
387
388
389
390
391#define __cpuc_clean_dcache_area __cpuc_flush_dcache_area
392
393
394
395
396
397static inline void __sync_cache_range_w(volatile void *p, size_t size)
398{
399 char *_p = (char *)p;
400
401 __cpuc_clean_dcache_area(_p, size);
402 outer_clean_range(__pa(_p), __pa(_p + size));
403}
404
405
406
407
408
409
410
411static inline void __sync_cache_range_r(volatile void *p, size_t size)
412{
413 char *_p = (char *)p;
414
415#ifdef CONFIG_OUTER_CACHE
416 if (outer_cache.flush_range) {
417
418
419
420
421 __cpuc_clean_dcache_area(_p, size);
422
423
424 outer_flush_range(__pa(_p), __pa(_p + size));
425 }
426#endif
427
428
429 __cpuc_flush_dcache_area(_p, size);
430}
431
432#define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
433#define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463#define v7_exit_coherency_flush(level) \
464 asm volatile( \
465 ".arch armv7-a \n\t" \
466 "stmfd sp!, {fp, ip} \n\t" \
467 "mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \
468 "bic r0, r0, #"__stringify(CR_C)" \n\t" \
469 "mcr p15, 0, r0, c1, c0, 0 @ set SCTLR \n\t" \
470 "isb \n\t" \
471 "bl v7_flush_dcache_"__stringify(level)" \n\t" \
472 "mrc p15, 0, r0, c1, c0, 1 @ get ACTLR \n\t" \
473 "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \
474 "mcr p15, 0, r0, c1, c0, 1 @ set ACTLR \n\t" \
475 "isb \n\t" \
476 "dsb \n\t" \
477 "ldmfd sp!, {fp, ip}" \
478 : : : "r0","r1","r2","r3","r4","r5","r6","r7", \
479 "r9","r10","lr","memory" )
480
481#ifdef CONFIG_MMU
482int set_memory_ro(unsigned long addr, int numpages);
483int set_memory_rw(unsigned long addr, int numpages);
484int set_memory_x(unsigned long addr, int numpages);
485int set_memory_nx(unsigned long addr, int numpages);
486#else
487static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; }
488static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; }
489static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
490static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
491#endif
492
493#ifdef CONFIG_DEBUG_RODATA
494void mark_rodata_ro(void);
495void set_kernel_text_rw(void);
496void set_kernel_text_ro(void);
497#else
498static inline void set_kernel_text_rw(void) { }
499static inline void set_kernel_text_ro(void) { }
500#endif
501
502void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
503 void *kaddr, unsigned long len);
504
505
506
507
508
509
510
511
512
513
514static inline void secure_flush_area(const void *addr, size_t size)
515{
516 phys_addr_t phys = __pa(addr);
517
518 __cpuc_flush_dcache_area((void *)addr, size);
519 outer_flush_range(phys, phys + size);
520}
521
522#endif
523