1
2
3
4
5
6
7
8
9
10#ifndef _ASMARM_CACHEFLUSH_H
11#define _ASMARM_CACHEFLUSH_H
12
13#include <linux/mm.h>
14
15#include <asm/glue-cache.h>
16#include <asm/shmparam.h>
17#include <asm/cachetype.h>
18#include <asm/outercache.h>
19
20#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
21
22
23
24
25
26#define PG_dcache_clean PG_arch_1
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104struct cpu_cache_fns {
105 void (*flush_icache_all)(void);
106 void (*flush_kern_all)(void);
107 void (*flush_kern_louis)(void);
108 void (*flush_user_all)(void);
109 void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
110
111 void (*coherent_kern_range)(unsigned long, unsigned long);
112 int (*coherent_user_range)(unsigned long, unsigned long);
113 void (*flush_kern_dcache_area)(void *, size_t);
114
115 void (*dma_map_area)(const void *, size_t, int);
116 void (*dma_unmap_area)(const void *, size_t, int);
117
118 void (*dma_flush_range)(const void *, const void *);
119};
120
121
122
123
124#ifdef MULTI_CACHE
125
126extern struct cpu_cache_fns cpu_cache;
127
128#define __cpuc_flush_icache_all cpu_cache.flush_icache_all
129#define __cpuc_flush_kern_all cpu_cache.flush_kern_all
130#define __cpuc_flush_kern_louis cpu_cache.flush_kern_louis
131#define __cpuc_flush_user_all cpu_cache.flush_user_all
132#define __cpuc_flush_user_range cpu_cache.flush_user_range
133#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
134#define __cpuc_coherent_user_range cpu_cache.coherent_user_range
135#define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area
136
137
138
139
140
141
142
143#define dmac_map_area cpu_cache.dma_map_area
144#define dmac_unmap_area cpu_cache.dma_unmap_area
145#define dmac_flush_range cpu_cache.dma_flush_range
146
147#else
148
149extern void __cpuc_flush_icache_all(void);
150extern void __cpuc_flush_kern_all(void);
151extern void __cpuc_flush_kern_louis(void);
152extern void __cpuc_flush_user_all(void);
153extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
154extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
155extern int __cpuc_coherent_user_range(unsigned long, unsigned long);
156extern void __cpuc_flush_dcache_area(void *, size_t);
157
158
159
160
161
162
163
164extern void dmac_map_area(const void *, size_t, int);
165extern void dmac_unmap_area(const void *, size_t, int);
166extern void dmac_flush_range(const void *, const void *);
167
168#endif
169
170
171
172
173
174
175extern void copy_to_user_page(struct vm_area_struct *, struct page *,
176 unsigned long, void *, const void *, unsigned long);
177#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
178 do { \
179 memcpy(dst, src, len); \
180 } while (0)
181
182
183
184
185
186
187#define __flush_icache_all_generic() \
188 asm("mcr p15, 0, %0, c7, c5, 0" \
189 : : "r" (0));
190
191
192#define __flush_icache_all_v7_smp() \
193 asm("mcr p15, 0, %0, c7, c1, 0" \
194 : : "r" (0));
195
196
197
198
199
200#if (defined(CONFIG_CPU_V7) && \
201 (defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \
202 defined(CONFIG_SMP_ON_UP)
203#define __flush_icache_preferred __cpuc_flush_icache_all
204#elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
205#define __flush_icache_preferred __flush_icache_all_v7_smp
206#elif __LINUX_ARM_ARCH__ == 6 && defined(CONFIG_ARM_ERRATA_411920)
207#define __flush_icache_preferred __cpuc_flush_icache_all
208#else
209#define __flush_icache_preferred __flush_icache_all_generic
210#endif
211
212static inline void __flush_icache_all(void)
213{
214 __flush_icache_preferred();
215}
216
217
218
219
220#define flush_cache_louis() __cpuc_flush_kern_louis()
221
222#define flush_cache_all() __cpuc_flush_kern_all()
223
224static inline void vivt_flush_cache_mm(struct mm_struct *mm)
225{
226 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
227 __cpuc_flush_user_all();
228}
229
230static inline void
231vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
232{
233 struct mm_struct *mm = vma->vm_mm;
234
235 if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
236 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
237 vma->vm_flags);
238}
239
240static inline void
241vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
242{
243 struct mm_struct *mm = vma->vm_mm;
244
245 if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
246 unsigned long addr = user_addr & PAGE_MASK;
247 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
248 }
249}
250
251#ifndef CONFIG_CPU_CACHE_VIPT
252#define flush_cache_mm(mm) \
253 vivt_flush_cache_mm(mm)
254#define flush_cache_range(vma,start,end) \
255 vivt_flush_cache_range(vma,start,end)
256#define flush_cache_page(vma,addr,pfn) \
257 vivt_flush_cache_page(vma,addr,pfn)
258#else
259extern void flush_cache_mm(struct mm_struct *mm);
260extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
261extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
262#endif
263
264#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
265
266
267
268
269
270
271#define flush_cache_user_range(start,end) \
272 __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
273
274
275
276
277
278#define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
279
280
281
282
283
284#define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
285
286
287
288
289
290
291
292
293
294
295
296
297
298#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
299extern void flush_dcache_page(struct page *);
300
301static inline void flush_kernel_vmap_range(void *addr, int size)
302{
303 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
304 __cpuc_flush_dcache_area(addr, (size_t)size);
305}
306static inline void invalidate_kernel_vmap_range(void *addr, int size)
307{
308 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
309 __cpuc_flush_dcache_area(addr, (size_t)size);
310}
311
312#define ARCH_HAS_FLUSH_ANON_PAGE
313static inline void flush_anon_page(struct vm_area_struct *vma,
314 struct page *page, unsigned long vmaddr)
315{
316 extern void __flush_anon_page(struct vm_area_struct *vma,
317 struct page *, unsigned long);
318 if (PageAnon(page))
319 __flush_anon_page(vma, page, vmaddr);
320}
321
322#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
323extern void flush_kernel_dcache_page(struct page *);
324
325#define flush_dcache_mmap_lock(mapping) \
326 spin_lock_irq(&(mapping)->tree_lock)
327#define flush_dcache_mmap_unlock(mapping) \
328 spin_unlock_irq(&(mapping)->tree_lock)
329
330#define flush_icache_user_range(vma,page,addr,len) \
331 flush_dcache_page(page)
332
333
334
335
336
337#define flush_icache_page(vma,page) do { } while (0)
338
339
340
341
342
343
344
345
346static inline void flush_cache_vmap(unsigned long start, unsigned long end)
347{
348 if (!cache_is_vipt_nonaliasing())
349 flush_cache_all();
350 else
351
352
353
354
355 dsb();
356}
357
358static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
359{
360 if (!cache_is_vipt_nonaliasing())
361 flush_cache_all();
362}
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388#define __CACHE_WRITEBACK_ORDER 6
389#define __CACHE_WRITEBACK_GRANULE (1 << __CACHE_WRITEBACK_ORDER)
390
391
392
393
394
395#define __cpuc_clean_dcache_area __cpuc_flush_dcache_area
396
397
398
399
400
401static inline void __sync_cache_range_w(volatile void *p, size_t size)
402{
403 char *_p = (char *)p;
404
405 __cpuc_clean_dcache_area(_p, size);
406 outer_clean_range(__pa(_p), __pa(_p + size));
407}
408
409
410
411
412
413
414
415static inline void __sync_cache_range_r(volatile void *p, size_t size)
416{
417 char *_p = (char *)p;
418
419#ifdef CONFIG_OUTER_CACHE
420 if (outer_cache.flush_range) {
421
422
423
424
425 __cpuc_clean_dcache_area(_p, size);
426
427
428 outer_flush_range(__pa(_p), __pa(_p + size));
429 }
430#endif
431
432
433 __cpuc_flush_dcache_area(_p, size);
434}
435
436#define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
437#define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))
438
439#endif
440