1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/kernel.h>
25#include <linux/mm.h>
26#include <linux/init.h>
27#include <linux/percpu.h>
28#include <linux/hardirq.h>
29#include <asm/pgalloc.h>
30#include <asm/tlbflush.h>
31#include <asm/tlb.h>
32
33#include "mmu_decl.h"
34
35DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
36
37#ifdef CONFIG_SMP
38
39
40
41
42
43
44
45static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
46static unsigned long pte_freelist_forced_free;
47
48struct pte_freelist_batch
49{
50 struct rcu_head rcu;
51 unsigned int index;
52 pgtable_free_t tables[0];
53};
54
55#define PTE_FREELIST_SIZE \
56 ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
57 / sizeof(pgtable_free_t))
58
59static void pte_free_smp_sync(void *arg)
60{
61
62}
63
64
65
66
67static void pgtable_free_now(pgtable_free_t pgf)
68{
69 pte_freelist_forced_free++;
70
71 smp_call_function(pte_free_smp_sync, NULL, 1);
72
73 pgtable_free(pgf);
74}
75
76static void pte_free_rcu_callback(struct rcu_head *head)
77{
78 struct pte_freelist_batch *batch =
79 container_of(head, struct pte_freelist_batch, rcu);
80 unsigned int i;
81
82 for (i = 0; i < batch->index; i++)
83 pgtable_free(batch->tables[i]);
84
85 free_page((unsigned long)batch);
86}
87
88static void pte_free_submit(struct pte_freelist_batch *batch)
89{
90 INIT_RCU_HEAD(&batch->rcu);
91 call_rcu(&batch->rcu, pte_free_rcu_callback);
92}
93
94void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
95{
96
97 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
98
99 if (atomic_read(&tlb->mm->mm_users) < 2 ||
100 cpumask_equal(mm_cpumask(tlb->mm), cpumask_of(smp_processor_id()))){
101 pgtable_free(pgf);
102 return;
103 }
104
105 if (*batchp == NULL) {
106 *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
107 if (*batchp == NULL) {
108 pgtable_free_now(pgf);
109 return;
110 }
111 (*batchp)->index = 0;
112 }
113 (*batchp)->tables[(*batchp)->index++] = pgf;
114 if ((*batchp)->index == PTE_FREELIST_SIZE) {
115 pte_free_submit(*batchp);
116 *batchp = NULL;
117 }
118}
119
120void pte_free_finish(void)
121{
122
123 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
124
125 if (*batchp == NULL)
126 return;
127 pte_free_submit(*batchp);
128 *batchp = NULL;
129}
130
131#endif
132
133static inline int is_exec_fault(void)
134{
135 return current->thread.regs && TRAP(current->thread.regs) == 0x400;
136}
137
138
139
140
141
142
143static inline int pte_looks_normal(pte_t pte)
144{
145 return (pte_val(pte) &
146 (_PAGE_PRESENT | _PAGE_SPECIAL | _PAGE_NO_CACHE | _PAGE_USER)) ==
147 (_PAGE_PRESENT | _PAGE_USER);
148}
149
150struct page * maybe_pte_to_page(pte_t pte)
151{
152 unsigned long pfn = pte_pfn(pte);
153 struct page *page;
154
155 if (unlikely(!pfn_valid(pfn)))
156 return NULL;
157 page = pfn_to_page(pfn);
158 if (PageReserved(page))
159 return NULL;
160 return page;
161}
162
163#if defined(CONFIG_PPC_STD_MMU) || _PAGE_EXEC == 0
164
165
166
167
168
169
170
171static pte_t set_pte_filter(pte_t pte, unsigned long addr)
172{
173 pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
174 if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) ||
175 cpu_has_feature(CPU_FTR_NOEXECUTE))) {
176 struct page *pg = maybe_pte_to_page(pte);
177 if (!pg)
178 return pte;
179 if (!test_bit(PG_arch_1, &pg->flags)) {
180#ifdef CONFIG_8xx
181
182
183
184
185
186
187
188
189 _tlbil_va(addr, 0, 0, 0);
190#endif
191 flush_dcache_icache_page(pg);
192 set_bit(PG_arch_1, &pg->flags);
193 }
194 }
195 return pte;
196}
197
198static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
199 int dirty)
200{
201 return pte;
202}
203
204#else
205
206
207
208
209
210static pte_t set_pte_filter(pte_t pte, unsigned long addr)
211{
212 struct page *pg;
213
214
215 if (!(pte_val(pte) & _PAGE_EXEC) || !pte_looks_normal(pte))
216 return pte;
217
218
219 pg = maybe_pte_to_page(pte);
220 if (unlikely(!pg))
221 return pte;
222
223
224 if (test_bit(PG_arch_1, &pg->flags))
225 return pte;
226
227
228 if (is_exec_fault()) {
229 flush_dcache_icache_page(pg);
230 set_bit(PG_arch_1, &pg->flags);
231 return pte;
232 }
233
234
235 return __pte(pte_val(pte) & ~_PAGE_EXEC);
236}
237
238static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
239 int dirty)
240{
241 struct page *pg;
242
243
244
245
246
247
248 if (dirty || (pte_val(pte) & _PAGE_EXEC) || !is_exec_fault())
249 return pte;
250
251#ifdef CONFIG_DEBUG_VM
252
253
254
255
256 if (WARN_ON(!(vma->vm_flags & VM_EXEC)))
257 return pte;
258#endif
259
260
261 pg = maybe_pte_to_page(pte);
262 if (unlikely(!pg))
263 goto bail;
264
265
266 if (test_bit(PG_arch_1, &pg->flags))
267 goto bail;
268
269
270 flush_dcache_icache_page(pg);
271 set_bit(PG_arch_1, &pg->flags);
272
273 bail:
274 return __pte(pte_val(pte) | _PAGE_EXEC);
275}
276
277#endif
278
279
280
281
282void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
283 pte_t pte)
284{
285#ifdef CONFIG_DEBUG_VM
286 WARN_ON(pte_present(*ptep));
287#endif
288
289
290
291
292 pte = set_pte_filter(pte, addr);
293
294
295 __set_pte_at(mm, addr, ptep, pte, 0);
296}
297
298
299
300
301
302
303
304
305int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
306 pte_t *ptep, pte_t entry, int dirty)
307{
308 int changed;
309 entry = set_access_flags_filter(entry, vma, dirty);
310 changed = !pte_same(*(ptep), entry);
311 if (changed) {
312 if (!(vma->vm_flags & VM_HUGETLB))
313 assert_pte_locked(vma->vm_mm, address);
314 __ptep_set_access_flags(ptep, entry);
315 flush_tlb_page_nohash(vma, address);
316 }
317 return changed;
318}
319
320#ifdef CONFIG_DEBUG_VM
321void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
322{
323 pgd_t *pgd;
324 pud_t *pud;
325 pmd_t *pmd;
326
327 if (mm == &init_mm)
328 return;
329 pgd = mm->pgd + pgd_index(addr);
330 BUG_ON(pgd_none(*pgd));
331 pud = pud_offset(pgd, addr);
332 BUG_ON(pud_none(*pud));
333 pmd = pmd_offset(pud, addr);
334 BUG_ON(!pmd_present(*pmd));
335 assert_spin_locked(pte_lockptr(mm, pmd));
336}
337#endif
338
339