1
2
3
4
5
6
7
8
9
10
11#include <linux/sched.h>
12#include <linux/kernel.h>
13#include <linux/mm.h>
14#include <linux/bitops.h>
15#include <linux/vmalloc.h>
16#include <linux/init.h>
17#include <linux/pagemap.h>
18#include <linux/gfp.h>
19
20#include <asm/bugs.h>
21#include <asm/cacheflush.h>
22#include <asm/cachetype.h>
23#include <asm/pgtable.h>
24#include <asm/tlbflush.h>
25
26#include "mm.h"
27
28static pteval_t shared_pte_mask = L_PTE_MT_BUFFERABLE;
29
30#if __LINUX_ARM_ARCH__ < 6
31
32
33
34
35
36
37
38
39
40static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
41 unsigned long pfn, pte_t *ptep)
42{
43 pte_t entry = *ptep;
44 int ret;
45
46
47
48
49 ret = pte_present(entry);
50
51
52
53
54
55 if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) {
56 flush_cache_page(vma, address, pfn);
57 outer_flush_range((pfn << PAGE_SHIFT),
58 (pfn << PAGE_SHIFT) + PAGE_SIZE);
59 pte_val(entry) &= ~L_PTE_MT_MASK;
60 pte_val(entry) |= shared_pte_mask;
61 set_pte_at(vma->vm_mm, address, ptep, entry);
62 flush_tlb_page(vma, address);
63 }
64
65 return ret;
66}
67
68#if USE_SPLIT_PTE_PTLOCKS
69
70
71
72
73
74static inline void do_pte_lock(spinlock_t *ptl)
75{
76
77
78
79
80 spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
81}
82
83static inline void do_pte_unlock(spinlock_t *ptl)
84{
85 spin_unlock(ptl);
86}
87#else
88static inline void do_pte_lock(spinlock_t *ptl) {}
89static inline void do_pte_unlock(spinlock_t *ptl) {}
90#endif
91
92static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
93 unsigned long pfn)
94{
95 spinlock_t *ptl;
96 pgd_t *pgd;
97 pud_t *pud;
98 pmd_t *pmd;
99 pte_t *pte;
100 int ret;
101
102 pgd = pgd_offset(vma->vm_mm, address);
103 if (pgd_none_or_clear_bad(pgd))
104 return 0;
105
106 pud = pud_offset(pgd, address);
107 if (pud_none_or_clear_bad(pud))
108 return 0;
109
110 pmd = pmd_offset(pud, address);
111 if (pmd_none_or_clear_bad(pmd))
112 return 0;
113
114
115
116
117
118
119 ptl = pte_lockptr(vma->vm_mm, pmd);
120 pte = pte_offset_map(pmd, address);
121 do_pte_lock(ptl);
122
123 ret = do_adjust_pte(vma, address, pfn, pte);
124
125 do_pte_unlock(ptl);
126 pte_unmap(pte);
127
128 return ret;
129}
130
131static void
132make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
133 unsigned long addr, pte_t *ptep, unsigned long pfn)
134{
135 struct mm_struct *mm = vma->vm_mm;
136 struct vm_area_struct *mpnt;
137 unsigned long offset;
138 pgoff_t pgoff;
139 int aliases = 0;
140
141 pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
142
143
144
145
146
147
148 flush_dcache_mmap_lock(mapping);
149 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
150
151
152
153
154
155 if (mpnt->vm_mm != mm || mpnt == vma)
156 continue;
157 if (!(mpnt->vm_flags & VM_MAYSHARE))
158 continue;
159 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
160 aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn);
161 }
162 flush_dcache_mmap_unlock(mapping);
163 if (aliases)
164 do_adjust_pte(vma, addr, pfn, ptep);
165}
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
181 pte_t *ptep)
182{
183 unsigned long pfn = pte_pfn(*ptep);
184 struct address_space *mapping;
185 struct page *page;
186
187 if (!pfn_valid(pfn))
188 return;
189
190
191
192
193
194 page = pfn_to_page(pfn);
195 if (page == ZERO_PAGE(0))
196 return;
197
198 mapping = page_mapping(page);
199 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
200 __flush_dcache_page(mapping, page);
201 if (mapping) {
202 if (cache_is_vivt())
203 make_coherent(mapping, vma, addr, ptep, pfn);
204 else if (vma->vm_flags & VM_EXEC)
205 __flush_icache_all();
206 }
207}
208#endif
209
210
211
212
213
214
215
216static int __init check_writebuffer(unsigned long *p1, unsigned long *p2)
217{
218 register unsigned long zero = 0, one = 1, val;
219
220 local_irq_disable();
221 mb();
222 *p1 = one;
223 mb();
224 *p2 = zero;
225 mb();
226 val = *p1;
227 mb();
228 local_irq_enable();
229 return val != zero;
230}
231
232void __init check_writebuffer_bugs(void)
233{
234 struct page *page;
235 const char *reason;
236 unsigned long v = 1;
237
238 printk(KERN_INFO "CPU: Testing write buffer coherency: ");
239
240 page = alloc_page(GFP_KERNEL);
241 if (page) {
242 unsigned long *p1, *p2;
243 pgprot_t prot = __pgprot_modify(PAGE_KERNEL,
244 L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE);
245
246 p1 = vmap(&page, 1, VM_IOREMAP, prot);
247 p2 = vmap(&page, 1, VM_IOREMAP, prot);
248
249 if (p1 && p2) {
250 v = check_writebuffer(p1, p2);
251 reason = "enabling work-around";
252 } else {
253 reason = "unable to map memory\n";
254 }
255
256 vunmap(p1);
257 vunmap(p2);
258 put_page(page);
259 } else {
260 reason = "unable to grab page\n";
261 }
262
263 if (v) {
264 printk("failed, %s\n", reason);
265 shared_pte_mask = L_PTE_MT_UNCACHED;
266 } else {
267 printk("ok\n");
268 }
269}
270