1
2
3
4
5
6
7
8
9
10#include <linux/pagemap.h>
11#include <linux/hugetlb.h>
12#include <linux/pgtable.h>
13#include <linux/swap.h>
14#include <linux/swapops.h>
15#include <linux/mm_inline.h>
16#include <asm/pgalloc.h>
17#include <asm/tlb.h>
18
19
20
21
22
23
24
25void pgd_clear_bad(pgd_t *pgd)
26{
27 pgd_ERROR(*pgd);
28 pgd_clear(pgd);
29}
30
31#ifndef __PAGETABLE_P4D_FOLDED
32void p4d_clear_bad(p4d_t *p4d)
33{
34 p4d_ERROR(*p4d);
35 p4d_clear(p4d);
36}
37#endif
38
39#ifndef __PAGETABLE_PUD_FOLDED
40void pud_clear_bad(pud_t *pud)
41{
42 pud_ERROR(*pud);
43 pud_clear(pud);
44}
45#endif
46
47
48
49
50
51
52void pmd_clear_bad(pmd_t *pmd)
53{
54 pmd_ERROR(*pmd);
55 pmd_clear(pmd);
56}
57
58#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
59
60
61
62
63
64
65
66
67
68int ptep_set_access_flags(struct vm_area_struct *vma,
69 unsigned long address, pte_t *ptep,
70 pte_t entry, int dirty)
71{
72 int changed = !pte_same(ptep_get(ptep), entry);
73 if (changed) {
74 set_pte_at(vma->vm_mm, address, ptep, entry);
75 flush_tlb_fix_spurious_fault(vma, address, ptep);
76 }
77 return changed;
78}
79#endif
80
81#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
82int ptep_clear_flush_young(struct vm_area_struct *vma,
83 unsigned long address, pte_t *ptep)
84{
85 int young;
86 young = ptep_test_and_clear_young(vma, address, ptep);
87 if (young)
88 flush_tlb_page(vma, address);
89 return young;
90}
91#endif
92
93#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
94pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
95 pte_t *ptep)
96{
97 struct mm_struct *mm = (vma)->vm_mm;
98 pte_t pte;
99 pte = ptep_get_and_clear(mm, address, ptep);
100 if (pte_accessible(mm, pte))
101 flush_tlb_page(vma, address);
102 return pte;
103}
104#endif
105
106#ifdef CONFIG_TRANSPARENT_HUGEPAGE
107
108#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
109int pmdp_set_access_flags(struct vm_area_struct *vma,
110 unsigned long address, pmd_t *pmdp,
111 pmd_t entry, int dirty)
112{
113 int changed = !pmd_same(*pmdp, entry);
114 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
115 if (changed) {
116 set_pmd_at(vma->vm_mm, address, pmdp, entry);
117 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
118 }
119 return changed;
120}
121#endif
122
123#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
124int pmdp_clear_flush_young(struct vm_area_struct *vma,
125 unsigned long address, pmd_t *pmdp)
126{
127 int young;
128 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
129 young = pmdp_test_and_clear_young(vma, address, pmdp);
130 if (young)
131 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
132 return young;
133}
134#endif
135
136#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
137pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
138 pmd_t *pmdp)
139{
140 pmd_t pmd;
141 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
142 VM_BUG_ON(pmd_present(*pmdp) && !pmd_trans_huge(*pmdp));
143 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
144 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
145 return pmd;
146}
147
148#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
149pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
150 pud_t *pudp)
151{
152 pud_t pud;
153
154 VM_BUG_ON(address & ~HPAGE_PUD_MASK);
155 VM_BUG_ON(!pud_trans_huge(*pudp));
156 pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp);
157 flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE);
158 return pud;
159}
160#endif
161#endif
162
163#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
164void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
165 pgtable_t pgtable)
166{
167 assert_spin_locked(pmd_lockptr(mm, pmdp));
168
169
170 if (!pmd_huge_pte(mm, pmdp))
171 INIT_LIST_HEAD(&pgtable->lru);
172 else
173 list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru);
174 pmd_huge_pte(mm, pmdp) = pgtable;
175}
176#endif
177
178#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
179
180pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
181{
182 pgtable_t pgtable;
183
184 assert_spin_locked(pmd_lockptr(mm, pmdp));
185
186
187 pgtable = pmd_huge_pte(mm, pmdp);
188 pmd_huge_pte(mm, pmdp) = list_first_entry_or_null(&pgtable->lru,
189 struct page, lru);
190 if (pmd_huge_pte(mm, pmdp))
191 list_del(&pgtable->lru);
192 return pgtable;
193}
194#endif
195
196#ifndef __HAVE_ARCH_PMDP_INVALIDATE
197pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
198 pmd_t *pmdp)
199{
200 VM_WARN_ON_ONCE(!pmd_present(*pmdp));
201 pmd_t old = pmdp_establish(vma, address, pmdp, pmd_mkinvalid(*pmdp));
202 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
203 return old;
204}
205#endif
206
207#ifndef __HAVE_ARCH_PMDP_INVALIDATE_AD
208pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address,
209 pmd_t *pmdp)
210{
211 VM_WARN_ON_ONCE(!pmd_present(*pmdp));
212 return pmdp_invalidate(vma, address, pmdp);
213}
214#endif
215
216#ifndef pmdp_collapse_flush
217pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
218 pmd_t *pmdp)
219{
220
221
222
223
224 pmd_t pmd;
225
226 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
227 VM_BUG_ON(pmd_trans_huge(*pmdp));
228 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
229
230
231 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
232 return pmd;
233}
234#endif
235
236
237#ifndef pte_free_defer
238static void pte_free_now(struct rcu_head *head)
239{
240 struct page *page;
241
242 page = container_of(head, struct page, rcu_head);
243 pte_free(NULL , (pgtable_t)page);
244}
245
246void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable)
247{
248 struct page *page;
249
250 page = pgtable;
251 call_rcu(&page->rcu_head, pte_free_now);
252}
253#endif
254#endif
255
256#if defined(CONFIG_GUP_GET_PXX_LOW_HIGH) && \
257 (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RCU))
258
259
260
261
262
263
264
265static unsigned long pmdp_get_lockless_start(void)
266{
267 unsigned long irqflags;
268
269 local_irq_save(irqflags);
270 return irqflags;
271}
272static void pmdp_get_lockless_end(unsigned long irqflags)
273{
274 local_irq_restore(irqflags);
275}
276#else
277static unsigned long pmdp_get_lockless_start(void) { return 0; }
278static void pmdp_get_lockless_end(unsigned long irqflags) { }
279#endif
280
281pte_t *___pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp)
282{
283 unsigned long irqflags;
284 pmd_t pmdval;
285
286 rcu_read_lock();
287 irqflags = pmdp_get_lockless_start();
288 pmdval = pmdp_get_lockless(pmd);
289 pmdp_get_lockless_end(irqflags);
290
291 if (pmdvalp)
292 *pmdvalp = pmdval;
293 if (unlikely(pmd_none(pmdval) || is_pmd_migration_entry(pmdval)))
294 goto nomap;
295 if (unlikely(pmd_trans_huge(pmdval)))
296 goto nomap;
297 if (unlikely(pmd_bad(pmdval))) {
298 pmd_clear_bad(pmd);
299 goto nomap;
300 }
301 return __pte_map(&pmdval, addr);
302nomap:
303 rcu_read_unlock();
304 return NULL;
305}
306
307pte_t *pte_offset_map_ro_nolock(struct mm_struct *mm, pmd_t *pmd,
308 unsigned long addr, spinlock_t **ptlp)
309{
310 pmd_t pmdval;
311 pte_t *pte;
312
313 pte = __pte_offset_map(pmd, addr, &pmdval);
314 if (likely(pte))
315 *ptlp = pte_lockptr(mm, &pmdval);
316 return pte;
317}
318
319pte_t *pte_offset_map_rw_nolock(struct mm_struct *mm, pmd_t *pmd,
320 unsigned long addr, pmd_t *pmdvalp,
321 spinlock_t **ptlp)
322{
323 pte_t *pte;
324
325 VM_WARN_ON_ONCE(!pmdvalp);
326 pte = __pte_offset_map(pmd, addr, pmdvalp);
327 if (likely(pte))
328 *ptlp = pte_lockptr(mm, pmdvalp);
329 return pte;
330}
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390pte_t *__pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
391 unsigned long addr, spinlock_t **ptlp)
392{
393 spinlock_t *ptl;
394 pmd_t pmdval;
395 pte_t *pte;
396again:
397 pte = __pte_offset_map(pmd, addr, &pmdval);
398 if (unlikely(!pte))
399 return pte;
400 ptl = pte_lockptr(mm, &pmdval);
401 spin_lock(ptl);
402 if (likely(pmd_same(pmdval, pmdp_get_lockless(pmd)))) {
403 *ptlp = ptl;
404 return pte;
405 }
406 pte_unmap_unlock(pte, ptl);
407 goto again;
408}
409