1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/kernel.h>
25#include <linux/gfp.h>
26#include <linux/mm.h>
27#include <linux/percpu.h>
28#include <linux/hardirq.h>
29#include <linux/hugetlb.h>
30#include <asm/pgalloc.h>
31#include <asm/tlbflush.h>
32#include <asm/tlb.h>
33
34static inline int is_exec_fault(void)
35{
36 return current->thread.regs && TRAP(current->thread.regs) == 0x400;
37}
38
39
40
41
42
43
44static inline int pte_looks_normal(pte_t pte)
45{
46
47#if defined(CONFIG_PPC_BOOK3S_64)
48 if ((pte_val(pte) & (_PAGE_PRESENT | _PAGE_SPECIAL)) == _PAGE_PRESENT) {
49 if (pte_ci(pte))
50 return 0;
51 if (pte_user(pte))
52 return 1;
53 }
54 return 0;
55#else
56 return (pte_val(pte) &
57 (_PAGE_PRESENT | _PAGE_SPECIAL | _PAGE_NO_CACHE | _PAGE_USER |
58 _PAGE_PRIVILEGED)) ==
59 (_PAGE_PRESENT | _PAGE_USER);
60#endif
61}
62
63static struct page *maybe_pte_to_page(pte_t pte)
64{
65 unsigned long pfn = pte_pfn(pte);
66 struct page *page;
67
68 if (unlikely(!pfn_valid(pfn)))
69 return NULL;
70 page = pfn_to_page(pfn);
71 if (PageReserved(page))
72 return NULL;
73 return page;
74}
75
76#if defined(CONFIG_PPC_STD_MMU) || _PAGE_EXEC == 0
77
78
79
80
81
82
83
84static pte_t set_pte_filter(pte_t pte)
85{
86 if (radix_enabled())
87 return pte;
88
89 pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
90 if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) ||
91 cpu_has_feature(CPU_FTR_NOEXECUTE))) {
92 struct page *pg = maybe_pte_to_page(pte);
93 if (!pg)
94 return pte;
95 if (!test_bit(PG_arch_1, &pg->flags)) {
96 flush_dcache_icache_page(pg);
97 set_bit(PG_arch_1, &pg->flags);
98 }
99 }
100 return pte;
101}
102
103static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
104 int dirty)
105{
106 return pte;
107}
108
109#else
110
111
112
113
114
115static pte_t set_pte_filter(pte_t pte)
116{
117 struct page *pg;
118
119
120 if (!(pte_val(pte) & _PAGE_EXEC) || !pte_looks_normal(pte))
121 return pte;
122
123
124 pg = maybe_pte_to_page(pte);
125 if (unlikely(!pg))
126 return pte;
127
128
129 if (test_bit(PG_arch_1, &pg->flags))
130 return pte;
131
132
133 if (is_exec_fault()) {
134 flush_dcache_icache_page(pg);
135 set_bit(PG_arch_1, &pg->flags);
136 return pte;
137 }
138
139
140 return __pte(pte_val(pte) & ~_PAGE_EXEC);
141}
142
143static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
144 int dirty)
145{
146 struct page *pg;
147
148
149
150
151
152
153 if (dirty || (pte_val(pte) & _PAGE_EXEC) || !is_exec_fault())
154 return pte;
155
156#ifdef CONFIG_DEBUG_VM
157
158
159
160
161 if (WARN_ON(!(vma->vm_flags & VM_EXEC)))
162 return pte;
163#endif
164
165
166 pg = maybe_pte_to_page(pte);
167 if (unlikely(!pg))
168 goto bail;
169
170
171 if (test_bit(PG_arch_1, &pg->flags))
172 goto bail;
173
174
175 flush_dcache_icache_page(pg);
176 set_bit(PG_arch_1, &pg->flags);
177
178 bail:
179 return __pte(pte_val(pte) | _PAGE_EXEC);
180}
181
182#endif
183
184
185
186
187void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
188 pte_t pte)
189{
190
191
192
193
194
195 VM_WARN_ON(pte_present(*ptep) && !pte_protnone(*ptep));
196
197
198 pte = __pte(pte_val(pte) | _PAGE_PTE);
199
200
201
202
203
204 pte = set_pte_filter(pte);
205
206
207 __set_pte_at(mm, addr, ptep, pte, 0);
208}
209
210
211
212
213
214
215
216
217int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
218 pte_t *ptep, pte_t entry, int dirty)
219{
220 int changed;
221 entry = set_access_flags_filter(entry, vma, dirty);
222 changed = !pte_same(*(ptep), entry);
223 if (changed) {
224 assert_pte_locked(vma->vm_mm, address);
225 __ptep_set_access_flags(vma, ptep, entry,
226 address, mmu_virtual_psize);
227 }
228 return changed;
229}
230
231#ifdef CONFIG_HUGETLB_PAGE
232extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
233 unsigned long addr, pte_t *ptep,
234 pte_t pte, int dirty)
235{
236#ifdef HUGETLB_NEED_PRELOAD
237
238
239
240
241
242 ptep_set_access_flags(vma, addr, ptep, pte, dirty);
243 return 1;
244#else
245 int changed, psize;
246
247 pte = set_access_flags_filter(pte, vma, dirty);
248 changed = !pte_same(*(ptep), pte);
249 if (changed) {
250
251#ifdef CONFIG_PPC_BOOK3S_64
252 struct hstate *h = hstate_vma(vma);
253
254 psize = hstate_get_psize(h);
255#ifdef CONFIG_DEBUG_VM
256 assert_spin_locked(huge_pte_lockptr(h, vma->vm_mm, ptep));
257#endif
258
259#else
260
261
262
263
264 psize = 0;
265#endif
266 __ptep_set_access_flags(vma, ptep, pte, addr, psize);
267 }
268 return changed;
269#endif
270}
271#endif
272
273#ifdef CONFIG_DEBUG_VM
274void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
275{
276 pgd_t *pgd;
277 pud_t *pud;
278 pmd_t *pmd;
279
280 if (mm == &init_mm)
281 return;
282 pgd = mm->pgd + pgd_index(addr);
283 BUG_ON(pgd_none(*pgd));
284 pud = pud_offset(pgd, addr);
285 BUG_ON(pud_none(*pud));
286 pmd = pmd_offset(pud, addr);
287
288
289
290
291
292
293 if (pmd_none(*pmd))
294 return;
295 BUG_ON(!pmd_present(*pmd));
296 assert_spin_locked(pte_lockptr(mm, pmd));
297}
298#endif
299
300unsigned long vmalloc_to_phys(void *va)
301{
302 unsigned long pfn = vmalloc_to_pfn(va);
303
304 BUG_ON(!pfn);
305 return __pa(pfn_to_kaddr(pfn)) + offset_in_page(va);
306}
307EXPORT_SYMBOL_GPL(vmalloc_to_phys);
308