1#ifndef _METAG_CACHEFLUSH_H
2#define _METAG_CACHEFLUSH_H
3
4#include <linux/mm.h>
5#include <linux/sched.h>
6#include <linux/io.h>
7
8#include <asm/l2cache.h>
9#include <asm/metag_isa.h>
10#include <asm/metag_mem.h>
11
12void metag_cache_probe(void);
13
14void metag_data_cache_flush_all(const void *start);
15void metag_code_cache_flush_all(const void *start);
16
17
18
19
20
21
22
23
24void metag_data_cache_flush(const void *start, int bytes);
25void metag_code_cache_flush(const void *start, int bytes);
26
27#ifdef CONFIG_METAG_META12
28
29
30
31static inline void __flush_cache_all(void)
32{
33 metag_code_cache_flush_all((void *) PAGE_OFFSET);
34 metag_data_cache_flush_all((void *) PAGE_OFFSET);
35}
36
37#define flush_cache_all() __flush_cache_all()
38
39
40static inline void flush_cache_mm(struct mm_struct *mm)
41{
42 if (mm == current->mm)
43 __flush_cache_all();
44}
45
46#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
47
48
49static inline void flush_cache_range(struct vm_area_struct *vma,
50 unsigned long start, unsigned long end)
51{
52 flush_cache_mm(vma->vm_mm);
53}
54
55static inline void flush_cache_page(struct vm_area_struct *vma,
56 unsigned long vmaddr, unsigned long pfn)
57{
58 flush_cache_mm(vma->vm_mm);
59}
60
61#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
62static inline void flush_dcache_page(struct page *page)
63{
64 metag_data_cache_flush_all((void *) PAGE_OFFSET);
65}
66
67#define flush_dcache_mmap_lock(mapping) do { } while (0)
68#define flush_dcache_mmap_unlock(mapping) do { } while (0)
69
70static inline void flush_icache_page(struct vm_area_struct *vma,
71 struct page *page)
72{
73 metag_code_cache_flush(page_to_virt(page), PAGE_SIZE);
74}
75
76static inline void flush_cache_vmap(unsigned long start, unsigned long end)
77{
78 metag_data_cache_flush_all((void *) PAGE_OFFSET);
79}
80
81static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
82{
83 metag_data_cache_flush_all((void *) PAGE_OFFSET);
84}
85
86#else
87
88
89
90#define flush_cache_all() do { } while (0)
91#define flush_cache_mm(mm) do { } while (0)
92#define flush_cache_dup_mm(mm) do { } while (0)
93#define flush_cache_range(vma, start, end) do { } while (0)
94#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
95#define flush_dcache_mmap_lock(mapping) do { } while (0)
96#define flush_dcache_mmap_unlock(mapping) do { } while (0)
97#define flush_icache_page(vma, pg) do { } while (0)
98#define flush_cache_vmap(start, end) do { } while (0)
99#define flush_cache_vunmap(start, end) do { } while (0)
100
101#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
102static inline void flush_dcache_page(struct page *page)
103{
104
105
106
107
108#ifdef CONFIG_SMP
109 metag_out32(1, SYSC_ICACHE_FLUSH);
110#else
111 metag_code_cache_flush_all((void *) PAGE_OFFSET);
112#endif
113}
114
115#endif
116
117
118static inline void flush_icache_range(unsigned long address,
119 unsigned long endaddr)
120{
121#ifdef CONFIG_SMP
122 metag_out32(1, SYSC_ICACHE_FLUSH);
123#else
124 metag_code_cache_flush((void *) address, endaddr - address);
125#endif
126}
127
128static inline void flush_cache_sigtramp(unsigned long addr, int size)
129{
130
131
132
133
134
135
136
137#ifdef CONFIG_SMP
138 metag_out32(1, SYSC_ICACHE_FLUSH);
139#else
140 metag_code_cache_flush((void *) addr, size);
141#endif
142}
143
144#ifdef CONFIG_METAG_L2C
145
146
147
148
149
150static inline void cachewd_line(void *addr, unsigned int data)
151{
152 unsigned long masked = (unsigned long)addr & -0x40;
153 __builtin_meta2_cachewd((void *)masked, data);
154}
155
156
157static inline void cachew_region_op(void *start, unsigned long size,
158 unsigned int op)
159{
160 unsigned long offset = (unsigned long)start & 0x3f;
161 int i;
162 if (offset) {
163 size += offset;
164 start -= offset;
165 }
166 i = (size - 1) >> 6;
167 do {
168 __builtin_meta2_cachewd(start, op);
169 start += 0x40;
170 } while (i--);
171}
172
173
174static inline void l2c_fence_flush(void *addr)
175{
176
177
178
179
180
181 (void)(volatile u8 *)addr;
182 cachewd_line(addr, CACHEW_FLUSH_L1D_L2);
183}
184
185
186static inline void l2c_fence(void *addr)
187{
188
189
190
191
192
193 cachewd_line(addr, CACHEW_FLUSH_L1D_L2);
194 l2c_fence_flush(addr);
195}
196
197
198static inline void flush_dcache_region(void *start, unsigned long size)
199{
200
201 if (meta_l2c_is_enabled()) {
202 cachew_region_op(start, size, CACHEW_FLUSH_L1D_L2);
203 if (meta_l2c_is_writeback())
204 l2c_fence_flush(start + size - 1);
205 } else {
206 metag_data_cache_flush(start, size);
207 }
208}
209
210
211static inline void writeback_dcache_region(void *start, unsigned long size)
212{
213 if (meta_l2c_is_enabled() && meta_l2c_is_writeback()) {
214 cachew_region_op(start, size, CACHEW_WRITEBACK_L1D_L2);
215 l2c_fence(start + size - 1);
216 }
217}
218
219
220static inline void invalidate_dcache_region(void *start, unsigned long size)
221{
222 if (meta_l2c_is_enabled())
223 cachew_region_op(start, size, CACHEW_INVALIDATE_L1D_L2);
224 else
225 metag_data_cache_flush(start, size);
226}
227#else
228#define flush_dcache_region(s, l) metag_data_cache_flush((s), (l))
229#define writeback_dcache_region(s, l) do {} while (0)
230#define invalidate_dcache_region(s, l) flush_dcache_region((s), (l))
231#endif
232
233static inline void copy_to_user_page(struct vm_area_struct *vma,
234 struct page *page, unsigned long vaddr,
235 void *dst, const void *src,
236 unsigned long len)
237{
238 memcpy(dst, src, len);
239 flush_icache_range((unsigned long)dst, (unsigned long)dst + len);
240}
241
242static inline void copy_from_user_page(struct vm_area_struct *vma,
243 struct page *page, unsigned long vaddr,
244 void *dst, const void *src,
245 unsigned long len)
246{
247 memcpy(dst, src, len);
248}
249
250#endif
251