1
2
3
4
5
6
7
8
9
10
11#include <linux/module.h>
12#include <linux/sched.h>
13#include <linux/kernel.h>
14#include <linux/mm.h>
15#include <linux/bitops.h>
16#include <linux/vmalloc.h>
17#include <linux/init.h>
18#include <linux/pagemap.h>
19#include <linux/gfp.h>
20
21#include <asm/bugs.h>
22#include <asm/cacheflush.h>
23#include <asm/cachetype.h>
24#include <asm/pgtable.h>
25#include <asm/tlbflush.h>
26
27#include "mm.h"
28
29static pteval_t shared_pte_mask = L_PTE_MT_BUFFERABLE;
30
31#if __LINUX_ARM_ARCH__ < 6
32
33
34
35
36
37
38
39
40
41static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
42 unsigned long pfn, pte_t *ptep)
43{
44 pte_t entry = *ptep;
45 int ret;
46
47
48
49
50 ret = pte_present(entry);
51
52
53
54
55
56 if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) {
57 flush_cache_page(vma, address, pfn);
58 outer_flush_range((pfn << PAGE_SHIFT),
59 (pfn << PAGE_SHIFT) + PAGE_SIZE);
60 pte_val(entry) &= ~L_PTE_MT_MASK;
61 pte_val(entry) |= shared_pte_mask;
62 set_pte_at(vma->vm_mm, address, ptep, entry);
63 flush_tlb_page(vma, address);
64 }
65
66 return ret;
67}
68
69#if USE_SPLIT_PTLOCKS
70
71
72
73
74
75static inline void do_pte_lock(spinlock_t *ptl)
76{
77
78
79
80
81 spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
82}
83
84static inline void do_pte_unlock(spinlock_t *ptl)
85{
86 spin_unlock(ptl);
87}
88#else
89static inline void do_pte_lock(spinlock_t *ptl) {}
90static inline void do_pte_unlock(spinlock_t *ptl) {}
91#endif
92
93static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
94 unsigned long pfn)
95{
96 spinlock_t *ptl;
97 pgd_t *pgd;
98 pmd_t *pmd;
99 pte_t *pte;
100 int ret;
101
102 pgd = pgd_offset(vma->vm_mm, address);
103 if (pgd_none_or_clear_bad(pgd))
104 return 0;
105
106 pmd = pmd_offset(pgd, address);
107 if (pmd_none_or_clear_bad(pmd))
108 return 0;
109
110
111
112
113
114
115 ptl = pte_lockptr(vma->vm_mm, pmd);
116 pte = pte_offset_map(pmd, address);
117 do_pte_lock(ptl);
118
119 ret = do_adjust_pte(vma, address, pfn, pte);
120
121 do_pte_unlock(ptl);
122 pte_unmap(pte);
123
124 return ret;
125}
126
127static void
128make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
129 unsigned long addr, pte_t *ptep, unsigned long pfn)
130{
131 struct mm_struct *mm = vma->vm_mm;
132 struct vm_area_struct *mpnt;
133 struct prio_tree_iter iter;
134 unsigned long offset;
135 pgoff_t pgoff;
136 int aliases = 0;
137
138 pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
139
140
141
142
143
144
145 flush_dcache_mmap_lock(mapping);
146 vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
147
148
149
150
151
152 if (mpnt->vm_mm != mm || mpnt == vma)
153 continue;
154 if (!(mpnt->vm_flags & VM_MAYSHARE))
155 continue;
156 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
157 aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn);
158 }
159 flush_dcache_mmap_unlock(mapping);
160 if (aliases)
161 do_adjust_pte(vma, addr, pfn, ptep);
162}
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
178 pte_t *ptep)
179{
180 unsigned long pfn = pte_pfn(*ptep);
181 struct address_space *mapping;
182 struct page *page;
183
184 if (!pfn_valid(pfn))
185 return;
186
187
188
189
190
191 page = pfn_to_page(pfn);
192 if (page == ZERO_PAGE(0))
193 return;
194
195 mapping = page_mapping(page);
196 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
197 __flush_dcache_page(mapping, page);
198 if (mapping) {
199 if (cache_is_vivt())
200 make_coherent(mapping, vma, addr, ptep, pfn);
201 else if (vma->vm_flags & VM_EXEC)
202 __flush_icache_all();
203 }
204}
205#endif
206
207
208
209
210
211
212
213static int __init check_writebuffer(unsigned long *p1, unsigned long *p2)
214{
215 register unsigned long zero = 0, one = 1, val;
216
217 local_irq_disable();
218 mb();
219 *p1 = one;
220 mb();
221 *p2 = zero;
222 mb();
223 val = *p1;
224 mb();
225 local_irq_enable();
226 return val != zero;
227}
228
229void __init check_writebuffer_bugs(void)
230{
231 struct page *page;
232 const char *reason;
233 unsigned long v = 1;
234
235 printk(KERN_INFO "CPU: Testing write buffer coherency: ");
236
237 page = alloc_page(GFP_KERNEL);
238 if (page) {
239 unsigned long *p1, *p2;
240 pgprot_t prot = __pgprot_modify(PAGE_KERNEL,
241 L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE);
242
243 p1 = vmap(&page, 1, VM_IOREMAP, prot);
244 p2 = vmap(&page, 1, VM_IOREMAP, prot);
245
246 if (p1 && p2) {
247 v = check_writebuffer(p1, p2);
248 reason = "enabling work-around";
249 } else {
250 reason = "unable to map memory\n";
251 }
252
253 vunmap(p1);
254 vunmap(p2);
255 put_page(page);
256 } else {
257 reason = "unable to grab page\n";
258 }
259
260 if (v) {
261 printk("failed, %s\n", reason);
262 shared_pte_mask = L_PTE_MT_UNCACHED;
263 } else {
264 printk("ok\n");
265 }
266}
267