1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#ifndef _ASM_CACHEFLUSH_H
19#define _ASM_CACHEFLUSH_H
20
21#include <linux/mm.h>
22#include <asm/shmparam.h>
23
24
25
26
27
28
29
30#define flush_icache_page(vma, page)
31
32void flush_cache_all(void);
33
34void flush_icache_range(unsigned long kstart, unsigned long kend);
35void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len);
36void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr);
37void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr);
38
39#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
40
41void flush_dcache_page(struct page *page);
42
43void dma_cache_wback_inv(unsigned long start, unsigned long sz);
44void dma_cache_inv(unsigned long start, unsigned long sz);
45void dma_cache_wback(unsigned long start, unsigned long sz);
46
47#define flush_dcache_mmap_lock(mapping) do { } while (0)
48#define flush_dcache_mmap_unlock(mapping) do { } while (0)
49
50
51#define flush_cache_vmap(start, end) flush_cache_all()
52#define flush_cache_vunmap(start, end) flush_cache_all()
53
54#define flush_cache_dup_mm(mm)
55
56#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
57
58#define flush_cache_mm(mm)
59#define flush_cache_range(mm, u_vstart, u_vend)
60#define flush_cache_page(vma, u_vaddr, pfn)
61
62#else
63
64
65void flush_cache_mm(struct mm_struct *mm);
66void flush_cache_range(struct vm_area_struct *vma,
67 unsigned long start,unsigned long end);
68void flush_cache_page(struct vm_area_struct *vma,
69 unsigned long user_addr, unsigned long page);
70
71
72
73
74
75#define ARCH_HAS_FLUSH_ANON_PAGE
76void flush_anon_page(struct vm_area_struct *vma,
77 struct page *page, unsigned long u_vaddr);
78
79#endif
80
81
82
83
84
85
86#define PG_dc_clean PG_arch_1
87
88
89
90
91
92static inline int cache_is_vipt_aliasing(void)
93{
94 return IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
95}
96
97#define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & 1)
98
99
100
101
102#define addr_not_cache_congruent(addr1, addr2) \
103({ \
104 cache_is_vipt_aliasing() ? \
105 (CACHE_COLOR(addr1) != CACHE_COLOR(addr2)) : 0; \
106})
107
108#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
109do { \
110 memcpy(dst, src, len); \
111 if (vma->vm_flags & VM_EXEC) \
112 __sync_icache_dcache((unsigned long)(dst), vaddr, len); \
113} while (0)
114
115#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
116 memcpy(dst, src, len); \
117
118#endif
119