1
2
3
4
5
6
7
8
9
10#ifndef _ASMARM_CACHEFLUSH_H
11#define _ASMARM_CACHEFLUSH_H
12
13#include <linux/mm.h>
14
15#include <asm/glue.h>
16#include <asm/shmparam.h>
17#include <asm/cachetype.h>
18#include <asm/outercache.h>
19
20#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
21
22
23
24
25
26#undef _CACHE
27#undef MULTI_CACHE
28
29#if defined(CONFIG_CPU_CACHE_V3)
30# ifdef _CACHE
31# define MULTI_CACHE 1
32# else
33# define _CACHE v3
34# endif
35#endif
36
37#if defined(CONFIG_CPU_CACHE_V4)
38# ifdef _CACHE
39# define MULTI_CACHE 1
40# else
41# define _CACHE v4
42# endif
43#endif
44
45#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
46 defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \
47 defined(CONFIG_CPU_ARM1026)
48# define MULTI_CACHE 1
49#endif
50
51#if defined(CONFIG_CPU_FA526)
52# ifdef _CACHE
53# define MULTI_CACHE 1
54# else
55# define _CACHE fa
56# endif
57#endif
58
59#if defined(CONFIG_CPU_ARM926T)
60# ifdef _CACHE
61# define MULTI_CACHE 1
62# else
63# define _CACHE arm926
64# endif
65#endif
66
67#if defined(CONFIG_CPU_ARM940T)
68# ifdef _CACHE
69# define MULTI_CACHE 1
70# else
71# define _CACHE arm940
72# endif
73#endif
74
75#if defined(CONFIG_CPU_ARM946E)
76# ifdef _CACHE
77# define MULTI_CACHE 1
78# else
79# define _CACHE arm946
80# endif
81#endif
82
83#if defined(CONFIG_CPU_CACHE_V4WB)
84# ifdef _CACHE
85# define MULTI_CACHE 1
86# else
87# define _CACHE v4wb
88# endif
89#endif
90
91#if defined(CONFIG_CPU_XSCALE)
92# ifdef _CACHE
93# define MULTI_CACHE 1
94# else
95# define _CACHE xscale
96# endif
97#endif
98
99#if defined(CONFIG_CPU_XSC3)
100# ifdef _CACHE
101# define MULTI_CACHE 1
102# else
103# define _CACHE xsc3
104# endif
105#endif
106
107#if defined(CONFIG_CPU_MOHAWK)
108# ifdef _CACHE
109# define MULTI_CACHE 1
110# else
111# define _CACHE mohawk
112# endif
113#endif
114
115#if defined(CONFIG_CPU_FEROCEON)
116# define MULTI_CACHE 1
117#endif
118
119#if defined(CONFIG_CPU_V6)
120
121# define MULTI_CACHE 1
122
123
124
125#endif
126
127#if defined(CONFIG_CPU_V7)
128
129# define MULTI_CACHE 1
130
131
132
133#endif
134
135#if !defined(_CACHE) && !defined(MULTI_CACHE)
136#error Unknown cache maintainence model
137#endif
138
139
140
141
142
143#define PG_dcache_clean PG_arch_1
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214struct cpu_cache_fns {
215 void (*flush_icache_all)(void);
216 void (*flush_kern_all)(void);
217 void (*flush_user_all)(void);
218 void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
219
220 void (*coherent_kern_range)(unsigned long, unsigned long);
221 void (*coherent_user_range)(unsigned long, unsigned long);
222 void (*flush_kern_dcache_area)(void *, size_t);
223
224 void (*dma_map_area)(const void *, size_t, int);
225 void (*dma_unmap_area)(const void *, size_t, int);
226
227 void (*dma_flush_range)(const void *, const void *);
228};
229
230
231
232
233#ifdef MULTI_CACHE
234
235extern struct cpu_cache_fns cpu_cache;
236
237#define __cpuc_flush_icache_all cpu_cache.flush_icache_all
238#define __cpuc_flush_kern_all cpu_cache.flush_kern_all
239#define __cpuc_flush_user_all cpu_cache.flush_user_all
240#define __cpuc_flush_user_range cpu_cache.flush_user_range
241#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
242#define __cpuc_coherent_user_range cpu_cache.coherent_user_range
243#define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area
244
245
246
247
248
249
250
251#define dmac_map_area cpu_cache.dma_map_area
252#define dmac_unmap_area cpu_cache.dma_unmap_area
253#define dmac_flush_range cpu_cache.dma_flush_range
254
255#else
256
257#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all)
258#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
259#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
260#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
261#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
262#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
263#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
264
265extern void __cpuc_flush_icache_all(void);
266extern void __cpuc_flush_kern_all(void);
267extern void __cpuc_flush_user_all(void);
268extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
269extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
270extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
271extern void __cpuc_flush_dcache_area(void *, size_t);
272
273
274
275
276
277
278
279#define dmac_map_area __glue(_CACHE,_dma_map_area)
280#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
281#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
282
283extern void dmac_map_area(const void *, size_t, int);
284extern void dmac_unmap_area(const void *, size_t, int);
285extern void dmac_flush_range(const void *, const void *);
286
287#endif
288
289
290
291
292
293
294extern void copy_to_user_page(struct vm_area_struct *, struct page *,
295 unsigned long, void *, const void *, unsigned long);
296#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
297 do { \
298 memcpy(dst, src, len); \
299 } while (0)
300
301
302
303
304
305
306#define __flush_icache_all_generic() \
307 asm("mcr p15, 0, %0, c7, c5, 0" \
308 : : "r" (0));
309
310
311#define __flush_icache_all_v7_smp() \
312 asm("mcr p15, 0, %0, c7, c1, 0" \
313 : : "r" (0));
314
315
316
317
318
319#if (defined(CONFIG_CPU_V7) && defined(CONFIG_CPU_V6)) || \
320 defined(CONFIG_SMP_ON_UP)
321#define __flush_icache_preferred __cpuc_flush_icache_all
322#elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
323#define __flush_icache_preferred __flush_icache_all_v7_smp
324#elif __LINUX_ARM_ARCH__ == 6 && defined(CONFIG_ARM_ERRATA_411920)
325#define __flush_icache_preferred __cpuc_flush_icache_all
326#else
327#define __flush_icache_preferred __flush_icache_all_generic
328#endif
329
330static inline void __flush_icache_all(void)
331{
332 __flush_icache_preferred();
333}
334
335#define flush_cache_all() __cpuc_flush_kern_all()
336
337static inline void vivt_flush_cache_mm(struct mm_struct *mm)
338{
339 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
340 __cpuc_flush_user_all();
341}
342
343static inline void
344vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
345{
346 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
347 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
348 vma->vm_flags);
349}
350
351static inline void
352vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
353{
354 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
355 unsigned long addr = user_addr & PAGE_MASK;
356 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
357 }
358}
359
360#ifndef CONFIG_CPU_CACHE_VIPT
361#define flush_cache_mm(mm) \
362 vivt_flush_cache_mm(mm)
363#define flush_cache_range(vma,start,end) \
364 vivt_flush_cache_range(vma,start,end)
365#define flush_cache_page(vma,addr,pfn) \
366 vivt_flush_cache_page(vma,addr,pfn)
367#else
368extern void flush_cache_mm(struct mm_struct *mm);
369extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
370extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
371#endif
372
373#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
374
375
376
377
378
379
380#define flush_cache_user_range(vma,start,end) \
381 __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
382
383
384
385
386
387#define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
388
389
390
391
392
393#define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
394
395
396
397
398
399
400
401
402
403
404
405
406
407#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
408extern void flush_dcache_page(struct page *);
409
410static inline void flush_kernel_vmap_range(void *addr, int size)
411{
412 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
413 __cpuc_flush_dcache_area(addr, (size_t)size);
414}
415static inline void invalidate_kernel_vmap_range(void *addr, int size)
416{
417 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
418 __cpuc_flush_dcache_area(addr, (size_t)size);
419}
420
421#define ARCH_HAS_FLUSH_ANON_PAGE
422static inline void flush_anon_page(struct vm_area_struct *vma,
423 struct page *page, unsigned long vmaddr)
424{
425 extern void __flush_anon_page(struct vm_area_struct *vma,
426 struct page *, unsigned long);
427 if (PageAnon(page))
428 __flush_anon_page(vma, page, vmaddr);
429}
430
431#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
432static inline void flush_kernel_dcache_page(struct page *page)
433{
434}
435
436#define flush_dcache_mmap_lock(mapping) \
437 spin_lock_irq(&(mapping)->tree_lock)
438#define flush_dcache_mmap_unlock(mapping) \
439 spin_unlock_irq(&(mapping)->tree_lock)
440
441#define flush_icache_user_range(vma,page,addr,len) \
442 flush_dcache_page(page)
443
444
445
446
447
448#define flush_icache_page(vma,page) do { } while (0)
449
450
451
452
453
454
455
456
457static inline void flush_cache_vmap(unsigned long start, unsigned long end)
458{
459 if (!cache_is_vipt_nonaliasing())
460 flush_cache_all();
461 else
462
463
464
465
466 dsb();
467}
468
469static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
470{
471 if (!cache_is_vipt_nonaliasing())
472 flush_cache_all();
473}
474
475#endif
476