1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/kernel.h>
25#include <linux/gfp.h>
26#include <linux/mm.h>
27#include <linux/percpu.h>
28#include <linux/hardirq.h>
29#include <linux/hugetlb.h>
30#include <asm/pgalloc.h>
31#include <asm/tlbflush.h>
32#include <asm/tlb.h>
33
34static inline int is_exec_fault(void)
35{
36 return current->thread.regs && TRAP(current->thread.regs) == 0x400;
37}
38
39
40
41
42
43
44static inline int pte_looks_normal(pte_t pte)
45{
46
47#if defined(CONFIG_PPC_BOOK3S_64)
48 if ((pte_val(pte) & (_PAGE_PRESENT | _PAGE_SPECIAL)) == _PAGE_PRESENT) {
49 if (pte_ci(pte))
50 return 0;
51 if (pte_user(pte))
52 return 1;
53 }
54 return 0;
55#else
56 return (pte_val(pte) &
57 (_PAGE_PRESENT | _PAGE_SPECIAL | _PAGE_NO_CACHE | _PAGE_USER |
58 _PAGE_PRIVILEGED)) ==
59 (_PAGE_PRESENT | _PAGE_USER);
60#endif
61}
62
63static struct page *maybe_pte_to_page(pte_t pte)
64{
65 unsigned long pfn = pte_pfn(pte);
66 struct page *page;
67
68 if (unlikely(!pfn_valid(pfn)))
69 return NULL;
70 page = pfn_to_page(pfn);
71 if (PageReserved(page))
72 return NULL;
73 return page;
74}
75
76#if defined(CONFIG_PPC_STD_MMU) || _PAGE_EXEC == 0
77
78
79
80
81
82
83
84static pte_t set_pte_filter(pte_t pte)
85{
86 if (radix_enabled())
87 return pte;
88
89 pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
90 if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) ||
91 cpu_has_feature(CPU_FTR_NOEXECUTE))) {
92 struct page *pg = maybe_pte_to_page(pte);
93 if (!pg)
94 return pte;
95 if (!test_bit(PG_arch_1, &pg->flags)) {
96 flush_dcache_icache_page(pg);
97 set_bit(PG_arch_1, &pg->flags);
98 }
99 }
100 return pte;
101}
102
103static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
104 int dirty)
105{
106 return pte;
107}
108
109#else
110
111
112
113
114
115static pte_t set_pte_filter(pte_t pte)
116{
117 struct page *pg;
118
119
120 if (!(pte_val(pte) & _PAGE_EXEC) || !pte_looks_normal(pte))
121 return pte;
122
123
124 pg = maybe_pte_to_page(pte);
125 if (unlikely(!pg))
126 return pte;
127
128
129 if (test_bit(PG_arch_1, &pg->flags))
130 return pte;
131
132
133 if (is_exec_fault()) {
134 flush_dcache_icache_page(pg);
135 set_bit(PG_arch_1, &pg->flags);
136 return pte;
137 }
138
139
140 return __pte(pte_val(pte) & ~_PAGE_EXEC);
141}
142
143static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
144 int dirty)
145{
146 struct page *pg;
147
148
149
150
151
152
153 if (dirty || (pte_val(pte) & _PAGE_EXEC) || !is_exec_fault())
154 return pte;
155
156#ifdef CONFIG_DEBUG_VM
157
158
159
160
161 if (WARN_ON(!(vma->vm_flags & VM_EXEC)))
162 return pte;
163#endif
164
165
166 pg = maybe_pte_to_page(pte);
167 if (unlikely(!pg))
168 goto bail;
169
170
171 if (test_bit(PG_arch_1, &pg->flags))
172 goto bail;
173
174
175 flush_dcache_icache_page(pg);
176 set_bit(PG_arch_1, &pg->flags);
177
178 bail:
179 return __pte(pte_val(pte) | _PAGE_EXEC);
180}
181
182#endif
183
184
185
186
187void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
188 pte_t pte)
189{
190
191
192
193
194
195 VM_WARN_ON(pte_present(*ptep) && !pte_protnone(*ptep));
196
197
198 pte = __pte(pte_val(pte) | _PAGE_PTE);
199
200
201
202
203
204 pte = set_pte_filter(pte);
205
206
207 __set_pte_at(mm, addr, ptep, pte, 0);
208}
209
210
211
212
213
214
215
216
217int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
218 pte_t *ptep, pte_t entry, int dirty)
219{
220 int changed;
221 entry = set_access_flags_filter(entry, vma, dirty);
222 changed = !pte_same(*(ptep), entry);
223 if (changed) {
224 if (!is_vm_hugetlb_page(vma))
225 assert_pte_locked(vma->vm_mm, address);
226 __ptep_set_access_flags(vma->vm_mm, ptep, entry, address);
227 flush_tlb_page(vma, address);
228 }
229 return changed;
230}
231
232#ifdef CONFIG_DEBUG_VM
233void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
234{
235 pgd_t *pgd;
236 pud_t *pud;
237 pmd_t *pmd;
238
239 if (mm == &init_mm)
240 return;
241 pgd = mm->pgd + pgd_index(addr);
242 BUG_ON(pgd_none(*pgd));
243 pud = pud_offset(pgd, addr);
244 BUG_ON(pud_none(*pud));
245 pmd = pmd_offset(pud, addr);
246
247
248
249
250
251
252 if (pmd_none(*pmd))
253 return;
254 BUG_ON(!pmd_present(*pmd));
255 assert_spin_locked(pte_lockptr(mm, pmd));
256}
257#endif
258
259unsigned long vmalloc_to_phys(void *va)
260{
261 unsigned long pfn = vmalloc_to_pfn(va);
262
263 BUG_ON(!pfn);
264 return __pa(pfn_to_kaddr(pfn)) + offset_in_page(va);
265}
266EXPORT_SYMBOL_GPL(vmalloc_to_phys);
267