1#include <linux/mm.h>
2#include <linux/gfp.h>
3#include <asm/pgalloc.h>
4#include <asm/pgtable.h>
5#include <asm/tlb.h>
6#include <asm/fixmap.h>
7
8#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
9
10#ifdef CONFIG_HIGHPTE
11#define PGALLOC_USER_GFP __GFP_HIGHMEM
12#else
13#define PGALLOC_USER_GFP 0
14#endif
15
16gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP;
17
18pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
19{
20 return (pte_t *)__get_free_page(PGALLOC_GFP);
21}
22
23pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
24{
25 struct page *pte;
26
27 pte = alloc_pages(__userpte_alloc_gfp, 0);
28 if (!pte)
29 return NULL;
30 if (!pgtable_page_ctor(pte)) {
31 __free_page(pte);
32 return NULL;
33 }
34 return pte;
35}
36
37static int __init setup_userpte(char *arg)
38{
39 if (!arg)
40 return -EINVAL;
41
42
43
44
45
46 if (strcmp(arg, "nohigh") == 0)
47 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
48 else
49 return -EINVAL;
50 return 0;
51}
52early_param("userpte", setup_userpte);
53
54void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
55{
56 pgtable_page_dtor(pte);
57 paravirt_release_pte(page_to_pfn(pte));
58 tlb_remove_page(tlb, pte);
59}
60
61#if PAGETABLE_LEVELS > 2
62void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
63{
64 struct page *page = virt_to_page(pmd);
65 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
66
67
68
69
70#ifdef CONFIG_X86_PAE
71 tlb->need_flush_all = 1;
72#endif
73 pgtable_pmd_page_dtor(page);
74 tlb_remove_page(tlb, page);
75}
76
77#if PAGETABLE_LEVELS > 3
78void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
79{
80 paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
81 tlb_remove_page(tlb, virt_to_page(pud));
82}
83#endif
84#endif
85
86static inline void pgd_list_add(pgd_t *pgd)
87{
88 struct page *page = virt_to_page(pgd);
89
90 list_add(&page->lru, &pgd_list);
91}
92
93static inline void pgd_list_del(pgd_t *pgd)
94{
95 struct page *page = virt_to_page(pgd);
96
97 list_del(&page->lru);
98}
99
100#define UNSHARED_PTRS_PER_PGD \
101 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
102
103
104static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
105{
106 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
107 virt_to_page(pgd)->index = (pgoff_t)mm;
108}
109
110struct mm_struct *pgd_page_get_mm(struct page *page)
111{
112 return (struct mm_struct *)page->index;
113}
114
115static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
116{
117
118
119
120 if (PAGETABLE_LEVELS == 2 ||
121 (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
122 PAGETABLE_LEVELS == 4) {
123 clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
124 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
125 KERNEL_PGD_PTRS);
126 }
127
128
129 if (!SHARED_KERNEL_PMD) {
130 pgd_set_mm(pgd, mm);
131 pgd_list_add(pgd);
132 }
133}
134
135static void pgd_dtor(pgd_t *pgd)
136{
137 if (SHARED_KERNEL_PMD)
138 return;
139
140 spin_lock(&pgd_lock);
141 pgd_list_del(pgd);
142 spin_unlock(&pgd_lock);
143}
144
145
146
147
148
149
150
151
152
153
154
155
156#ifdef CONFIG_X86_PAE
157
158
159
160
161
162
163
164
165
166
167
168#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
169
170void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
171{
172 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
173
174
175
176 set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
177
178
179
180
181
182
183
184 flush_tlb_mm(mm);
185}
186#else
187
188
189#define PREALLOCATED_PMDS 0
190
191#endif
192
193static void free_pmds(pmd_t *pmds[])
194{
195 int i;
196
197 for(i = 0; i < PREALLOCATED_PMDS; i++)
198 if (pmds[i]) {
199 pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
200 free_page((unsigned long)pmds[i]);
201 }
202}
203
204static int preallocate_pmds(pmd_t *pmds[])
205{
206 int i;
207 bool failed = false;
208
209 for(i = 0; i < PREALLOCATED_PMDS; i++) {
210 pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
211 if (!pmd)
212 failed = true;
213 if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
214 free_page((unsigned long)pmd);
215 pmd = NULL;
216 failed = true;
217 }
218 pmds[i] = pmd;
219 }
220
221 if (failed) {
222 free_pmds(pmds);
223 return -ENOMEM;
224 }
225
226 return 0;
227}
228
229
230
231
232
233
234
235static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
236{
237 int i;
238
239 for(i = 0; i < PREALLOCATED_PMDS; i++) {
240 pgd_t pgd = pgdp[i];
241
242 if (pgd_val(pgd) != 0) {
243 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
244
245 pgdp[i] = native_make_pgd(0);
246
247 paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
248 pmd_free(mm, pmd);
249 }
250 }
251}
252
253static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
254{
255 pud_t *pud;
256 int i;
257
258 if (PREALLOCATED_PMDS == 0)
259 return;
260
261 pud = pud_offset(pgd, 0);
262
263 for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
264 pmd_t *pmd = pmds[i];
265
266 if (i >= KERNEL_PGD_BOUNDARY)
267 memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
268 sizeof(pmd_t) * PTRS_PER_PMD);
269
270 pud_populate(mm, pud, pmd);
271 }
272}
273
274pgd_t *pgd_alloc(struct mm_struct *mm)
275{
276 pgd_t *pgd;
277 pmd_t *pmds[PREALLOCATED_PMDS];
278
279 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
280
281 if (pgd == NULL)
282 goto out;
283
284 mm->pgd = pgd;
285
286 if (preallocate_pmds(pmds) != 0)
287 goto out_free_pgd;
288
289 if (paravirt_pgd_alloc(mm) != 0)
290 goto out_free_pmds;
291
292
293
294
295
296
297 spin_lock(&pgd_lock);
298
299 pgd_ctor(mm, pgd);
300 pgd_prepopulate_pmd(mm, pgd, pmds);
301
302 spin_unlock(&pgd_lock);
303
304 return pgd;
305
306out_free_pmds:
307 free_pmds(pmds);
308out_free_pgd:
309 free_page((unsigned long)pgd);
310out:
311 return NULL;
312}
313
314void pgd_free(struct mm_struct *mm, pgd_t *pgd)
315{
316 pgd_mop_up_pmds(mm, pgd);
317 pgd_dtor(pgd);
318 paravirt_pgd_free(mm, pgd);
319 free_page((unsigned long)pgd);
320}
321
322
323
324
325
326
327
328
329int ptep_set_access_flags(struct vm_area_struct *vma,
330 unsigned long address, pte_t *ptep,
331 pte_t entry, int dirty)
332{
333 int changed = !pte_same(*ptep, entry);
334
335 if (changed && dirty) {
336 *ptep = entry;
337 pte_update_defer(vma->vm_mm, address, ptep);
338 }
339
340 return changed;
341}
342
343#ifdef CONFIG_TRANSPARENT_HUGEPAGE
344int pmdp_set_access_flags(struct vm_area_struct *vma,
345 unsigned long address, pmd_t *pmdp,
346 pmd_t entry, int dirty)
347{
348 int changed = !pmd_same(*pmdp, entry);
349
350 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
351
352 if (changed && dirty) {
353 *pmdp = entry;
354 pmd_update_defer(vma->vm_mm, address, pmdp);
355
356
357
358
359
360
361 }
362
363 return changed;
364}
365#endif
366
367int ptep_test_and_clear_young(struct vm_area_struct *vma,
368 unsigned long addr, pte_t *ptep)
369{
370 int ret = 0;
371
372 if (pte_young(*ptep))
373 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
374 (unsigned long *) &ptep->pte);
375
376 if (ret)
377 pte_update(vma->vm_mm, addr, ptep);
378
379 return ret;
380}
381
382#ifdef CONFIG_TRANSPARENT_HUGEPAGE
383int pmdp_test_and_clear_young(struct vm_area_struct *vma,
384 unsigned long addr, pmd_t *pmdp)
385{
386 int ret = 0;
387
388 if (pmd_young(*pmdp))
389 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
390 (unsigned long *)pmdp);
391
392 if (ret)
393 pmd_update(vma->vm_mm, addr, pmdp);
394
395 return ret;
396}
397#endif
398
399int ptep_clear_flush_young(struct vm_area_struct *vma,
400 unsigned long address, pte_t *ptep)
401{
402 int young;
403
404 young = ptep_test_and_clear_young(vma, address, ptep);
405 if (young)
406 flush_tlb_page(vma, address);
407
408 return young;
409}
410
411#ifdef CONFIG_TRANSPARENT_HUGEPAGE
412int pmdp_clear_flush_young(struct vm_area_struct *vma,
413 unsigned long address, pmd_t *pmdp)
414{
415 int young;
416
417 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
418
419 young = pmdp_test_and_clear_young(vma, address, pmdp);
420 if (young)
421 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
422
423 return young;
424}
425
426void pmdp_splitting_flush(struct vm_area_struct *vma,
427 unsigned long address, pmd_t *pmdp)
428{
429 int set;
430 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
431 set = !test_and_set_bit(_PAGE_BIT_SPLITTING,
432 (unsigned long *)pmdp);
433 if (set) {
434 pmd_update(vma->vm_mm, address, pmdp);
435
436 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
437 }
438}
439#endif
440
441
442
443
444
445
446
447
448void __init reserve_top_address(unsigned long reserve)
449{
450#ifdef CONFIG_X86_32
451 BUG_ON(fixmaps_set > 0);
452 printk(KERN_INFO "Reserving virtual address space above 0x%08x\n",
453 (int)-reserve);
454 __FIXADDR_TOP = -reserve - PAGE_SIZE;
455#endif
456}
457
458int fixmaps_set;
459
460void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
461{
462 unsigned long address = __fix_to_virt(idx);
463
464 if (idx >= __end_of_fixed_addresses) {
465 BUG();
466 return;
467 }
468 set_pte_vaddr(address, pte);
469 fixmaps_set++;
470}
471
472void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
473 pgprot_t flags)
474{
475 __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
476}
477