1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/init.h>
27#include <linux/linkage.h>
28#include <linux/kernel.h>
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/sched.h>
32
33#include <asm/mmu_context.h>
34
35
36
37
38
39
40
41static void flush_data_cache_page(unsigned long addr)
42{
43 unsigned int i;
44 for (i = 0; i < (PAGE_SIZE / L1_CACHE_BYTES); i += L1_CACHE_BYTES) {
45 __asm__ __volatile__(
46 "cache 0x0e, [%0, 0]\n"
47 "cache 0x1a, [%0, 0]\n"
48 "nop\n"
49 : : "r" (addr));
50 addr += L1_CACHE_BYTES;
51 }
52}
53
54
55void __update_cache(struct vm_area_struct *vma, unsigned long address,
56 pte_t pte)
57{
58 struct page *page;
59 unsigned long pfn, addr;
60 int exec = (vma->vm_flags & VM_EXEC);
61
62 pfn = pte_pfn(pte);
63 if (unlikely(!pfn_valid(pfn)))
64 return;
65 page = pfn_to_page(pfn);
66 if (page_mapping(page) && test_bit(PG_arch_1, &page->flags)) {
67 addr = (unsigned long) page_address(page);
68 if (exec)
69 flush_data_cache_page(addr);
70 clear_bit(PG_arch_1, &page->flags);
71 }
72}
73
74static inline void setup_protection_map(void)
75{
76 protection_map[0] = PAGE_NONE;
77 protection_map[1] = PAGE_READONLY;
78 protection_map[2] = PAGE_COPY;
79 protection_map[3] = PAGE_COPY;
80 protection_map[4] = PAGE_READONLY;
81 protection_map[5] = PAGE_READONLY;
82 protection_map[6] = PAGE_COPY;
83 protection_map[7] = PAGE_COPY;
84 protection_map[8] = PAGE_NONE;
85 protection_map[9] = PAGE_READONLY;
86 protection_map[10] = PAGE_SHARED;
87 protection_map[11] = PAGE_SHARED;
88 protection_map[12] = PAGE_READONLY;
89 protection_map[13] = PAGE_READONLY;
90 protection_map[14] = PAGE_SHARED;
91 protection_map[15] = PAGE_SHARED;
92}
93
94void __devinit cpu_cache_init(void)
95{
96 setup_protection_map();
97}
98
99void flush_icache_all(void)
100{
101 __asm__ __volatile__(
102 "la r8, flush_icache_all\n"
103 "cache 0x10, [r8, 0]\n"
104 "nop\nnop\nnop\nnop\nnop\nnop\n"
105 : : : "r8");
106}
107
108void flush_dcache_all(void)
109{
110 __asm__ __volatile__(
111 "la r8, flush_dcache_all\n"
112 "cache 0x1f, [r8, 0]\n"
113 "nop\nnop\nnop\nnop\nnop\nnop\n"
114 "cache 0x1a, [r8, 0]\n"
115 "nop\nnop\nnop\nnop\nnop\nnop\n"
116 : : : "r8");
117}
118
119void flush_cache_all(void)
120{
121 __asm__ __volatile__(
122 "la r8, flush_cache_all\n"
123 "cache 0x10, [r8, 0]\n"
124 "nop\nnop\nnop\nnop\nnop\nnop\n"
125 "cache 0x1f, [r8, 0]\n"
126 "nop\nnop\nnop\nnop\nnop\nnop\n"
127 "cache 0x1a, [r8, 0]\n"
128 "nop\nnop\nnop\nnop\nnop\nnop\n"
129 : : : "r8");
130}
131
132void flush_cache_mm(struct mm_struct *mm)
133{
134 if (!(mm->context))
135 return;
136 flush_cache_all();
137}
138
139
140
141
142
143
144
145
146
147
148
149void flush_cache_range(struct vm_area_struct *vma,
150 unsigned long start, unsigned long end)
151{
152 struct mm_struct *mm = vma->vm_mm;
153 int exec = vma->vm_flags & VM_EXEC;
154 pgd_t *pgdp;
155 pud_t *pudp;
156 pmd_t *pmdp;
157 pte_t *ptep;
158
159 if (!(mm->context))
160 return;
161
162 pgdp = pgd_offset(mm, start);
163 pudp = pud_offset(pgdp, start);
164 pmdp = pmd_offset(pudp, start);
165 ptep = pte_offset(pmdp, start);
166
167 while (start <= end) {
168 unsigned long tmpend;
169 pgdp = pgd_offset(mm, start);
170 pudp = pud_offset(pgdp, start);
171 pmdp = pmd_offset(pudp, start);
172 ptep = pte_offset(pmdp, start);
173
174 if (!(pte_val(*ptep) & _PAGE_PRESENT)) {
175 start = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1);
176 continue;
177 }
178 tmpend = (start | (PAGE_SIZE-1)) > end ?
179 end : (start | (PAGE_SIZE-1));
180
181 flush_dcache_range(start, tmpend);
182 if (exec)
183 flush_icache_range(start, tmpend);
184 start = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1);
185 }
186}
187
188void flush_cache_page(struct vm_area_struct *vma,
189 unsigned long addr, unsigned long pfn)
190{
191 int exec = vma->vm_flags & VM_EXEC;
192 unsigned long kaddr = 0xa0000000 | (pfn << PAGE_SHIFT);
193
194 flush_dcache_range(kaddr, kaddr + PAGE_SIZE);
195
196 if (exec)
197 flush_icache_range(kaddr, kaddr + PAGE_SIZE);
198}
199
200void flush_cache_sigtramp(unsigned long addr)
201{
202 __asm__ __volatile__(
203 "cache 0x02, [%0, 0]\n"
204 "nop\nnop\nnop\nnop\nnop\n"
205 "cache 0x02, [%0, 0x4]\n"
206 "nop\nnop\nnop\nnop\nnop\n"
207
208 "cache 0x0d, [%0, 0]\n"
209 "nop\nnop\nnop\nnop\nnop\n"
210 "cache 0x0d, [%0, 0x4]\n"
211 "nop\nnop\nnop\nnop\nnop\n"
212
213 "cache 0x1a, [%0, 0]\n"
214 "nop\nnop\nnop\nnop\nnop\n"
215 : : "r" (addr));
216}
217
218
219
220
221
222
223void flush_dcache_range(unsigned long start, unsigned long end)
224{
225 int size, i;
226
227 start = start & ~(L1_CACHE_BYTES - 1);
228 end = end & ~(L1_CACHE_BYTES - 1);
229 size = end - start;
230
231 for (i = 0; i < size; i += L1_CACHE_BYTES) {
232 __asm__ __volatile__(
233 "cache 0x0e, [%0, 0]\n"
234 "nop\nnop\nnop\nnop\nnop\n"
235 "cache 0x1a, [%0, 0]\n"
236 "nop\nnop\nnop\nnop\nnop\n"
237 : : "r" (start));
238 start += L1_CACHE_BYTES;
239 }
240}
241
242void flush_icache_range(unsigned long start, unsigned long end)
243{
244 int size, i;
245 start = start & ~(L1_CACHE_BYTES - 1);
246 end = end & ~(L1_CACHE_BYTES - 1);
247
248 size = end - start;
249
250 for (i = 0; i < size; i += L1_CACHE_BYTES) {
251 __asm__ __volatile__(
252 "cache 0x02, [%0, 0]\n"
253 "nop\nnop\nnop\nnop\nnop\n"
254 : : "r" (start));
255 start += L1_CACHE_BYTES;
256 }
257}
258