1
2
3
4
5
6
7
8
9
10
11#include <linux/module.h>
12#include <linux/sched.h>
13#include <linux/kernel.h>
14#include <linux/mm.h>
15#include <linux/bitops.h>
16#include <linux/vmalloc.h>
17#include <linux/init.h>
18#include <linux/pagemap.h>
19
20#include <asm/bugs.h>
21#include <asm/cacheflush.h>
22#include <asm/cachetype.h>
23#include <asm/pgtable.h>
24#include <asm/tlbflush.h>
25
26static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE;
27
28
29
30
31
32
33
34
35
36
37static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
38{
39 pgd_t *pgd;
40 pmd_t *pmd;
41 pte_t *pte, entry;
42 int ret;
43
44 pgd = pgd_offset(vma->vm_mm, address);
45 if (pgd_none(*pgd))
46 goto no_pgd;
47 if (pgd_bad(*pgd))
48 goto bad_pgd;
49
50 pmd = pmd_offset(pgd, address);
51 if (pmd_none(*pmd))
52 goto no_pmd;
53 if (pmd_bad(*pmd))
54 goto bad_pmd;
55
56 pte = pte_offset_map(pmd, address);
57 entry = *pte;
58
59
60
61
62 ret = pte_present(entry);
63
64
65
66
67
68 if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) {
69 unsigned long pfn = pte_pfn(entry);
70 flush_cache_page(vma, address, pfn);
71 outer_flush_range((pfn << PAGE_SHIFT),
72 (pfn << PAGE_SHIFT) + PAGE_SIZE);
73 pte_val(entry) &= ~L_PTE_MT_MASK;
74 pte_val(entry) |= shared_pte_mask;
75 set_pte_at(vma->vm_mm, address, pte, entry);
76 flush_tlb_page(vma, address);
77 }
78 pte_unmap(pte);
79 return ret;
80
81bad_pgd:
82 pgd_ERROR(*pgd);
83 pgd_clear(pgd);
84no_pgd:
85 return 0;
86
87bad_pmd:
88 pmd_ERROR(*pmd);
89 pmd_clear(pmd);
90no_pmd:
91 return 0;
92}
93
94static void
95make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigned long addr, unsigned long pfn)
96{
97 struct mm_struct *mm = vma->vm_mm;
98 struct vm_area_struct *mpnt;
99 struct prio_tree_iter iter;
100 unsigned long offset;
101 pgoff_t pgoff;
102 int aliases = 0;
103
104 pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
105
106
107
108
109
110
111 flush_dcache_mmap_lock(mapping);
112 vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
113
114
115
116
117
118 if (mpnt->vm_mm != mm || mpnt == vma)
119 continue;
120 if (!(mpnt->vm_flags & VM_MAYSHARE))
121 continue;
122 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
123 aliases += adjust_pte(mpnt, mpnt->vm_start + offset);
124 }
125 flush_dcache_mmap_unlock(mapping);
126 if (aliases)
127 adjust_pte(vma, addr);
128 else
129 flush_cache_page(vma, addr, pfn);
130}
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
146{
147 unsigned long pfn = pte_pfn(pte);
148 struct address_space *mapping;
149 struct page *page;
150
151 if (!pfn_valid(pfn))
152 return;
153
154 page = pfn_to_page(pfn);
155 mapping = page_mapping(page);
156#ifndef CONFIG_SMP
157 if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
158 __flush_dcache_page(mapping, page);
159#endif
160 if (mapping) {
161 if (cache_is_vivt())
162 make_coherent(mapping, vma, addr, pfn);
163 else if (vma->vm_flags & VM_EXEC)
164 __flush_icache_all();
165 }
166}
167
168
169
170
171
172
173
174static int __init check_writebuffer(unsigned long *p1, unsigned long *p2)
175{
176 register unsigned long zero = 0, one = 1, val;
177
178 local_irq_disable();
179 mb();
180 *p1 = one;
181 mb();
182 *p2 = zero;
183 mb();
184 val = *p1;
185 mb();
186 local_irq_enable();
187 return val != zero;
188}
189
190void __init check_writebuffer_bugs(void)
191{
192 struct page *page;
193 const char *reason;
194 unsigned long v = 1;
195
196 printk(KERN_INFO "CPU: Testing write buffer coherency: ");
197
198 page = alloc_page(GFP_KERNEL);
199 if (page) {
200 unsigned long *p1, *p2;
201 pgprot_t prot = __pgprot(L_PTE_PRESENT|L_PTE_YOUNG|
202 L_PTE_DIRTY|L_PTE_WRITE|
203 L_PTE_MT_BUFFERABLE);
204
205 p1 = vmap(&page, 1, VM_IOREMAP, prot);
206 p2 = vmap(&page, 1, VM_IOREMAP, prot);
207
208 if (p1 && p2) {
209 v = check_writebuffer(p1, p2);
210 reason = "enabling work-around";
211 } else {
212 reason = "unable to map memory\n";
213 }
214
215 vunmap(p1);
216 vunmap(p2);
217 put_page(page);
218 } else {
219 reason = "unable to grab page\n";
220 }
221
222 if (v) {
223 printk("failed, %s\n", reason);
224 shared_pte_mask = L_PTE_MT_UNCACHED;
225 } else {
226 printk("ok\n");
227 }
228}
229