1#include <linux/mm.h>
2#include <linux/gfp.h>
3#include <asm/pgalloc.h>
4#include <asm/pgtable.h>
5#include <asm/tlb.h>
6#include <asm/fixmap.h>
7#include <asm/mtrr.h>
8
9#define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | __GFP_ZERO)
10
11#ifdef CONFIG_HIGHPTE
12#define PGALLOC_USER_GFP __GFP_HIGHMEM
13#else
14#define PGALLOC_USER_GFP 0
15#endif
16
17gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP;
18
19pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
20{
21 return (pte_t *)__get_free_page(PGALLOC_GFP & ~__GFP_ACCOUNT);
22}
23
24pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
25{
26 struct page *pte;
27
28 pte = alloc_pages(__userpte_alloc_gfp, 0);
29 if (!pte)
30 return NULL;
31 if (!pgtable_page_ctor(pte)) {
32 __free_page(pte);
33 return NULL;
34 }
35 return pte;
36}
37
38static int __init setup_userpte(char *arg)
39{
40 if (!arg)
41 return -EINVAL;
42
43
44
45
46
47 if (strcmp(arg, "nohigh") == 0)
48 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
49 else
50 return -EINVAL;
51 return 0;
52}
53early_param("userpte", setup_userpte);
54
55void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
56{
57 pgtable_page_dtor(pte);
58 paravirt_release_pte(page_to_pfn(pte));
59 tlb_remove_page(tlb, pte);
60}
61
62#if CONFIG_PGTABLE_LEVELS > 2
63void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
64{
65 struct page *page = virt_to_page(pmd);
66 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
67
68
69
70
71#ifdef CONFIG_X86_PAE
72 tlb->need_flush_all = 1;
73#endif
74 pgtable_pmd_page_dtor(page);
75 tlb_remove_page(tlb, page);
76}
77
78#if CONFIG_PGTABLE_LEVELS > 3
79void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
80{
81 paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
82 tlb_remove_page(tlb, virt_to_page(pud));
83}
84#endif
85#endif
86
87static inline void pgd_list_add(pgd_t *pgd)
88{
89 struct page *page = virt_to_page(pgd);
90
91 list_add(&page->lru, &pgd_list);
92}
93
94static inline void pgd_list_del(pgd_t *pgd)
95{
96 struct page *page = virt_to_page(pgd);
97
98 list_del(&page->lru);
99}
100
101#define UNSHARED_PTRS_PER_PGD \
102 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
103
104
105static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
106{
107 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
108 virt_to_page(pgd)->index = (pgoff_t)mm;
109}
110
111struct mm_struct *pgd_page_get_mm(struct page *page)
112{
113 return (struct mm_struct *)page->index;
114}
115
116static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
117{
118
119
120
121 if (CONFIG_PGTABLE_LEVELS == 2 ||
122 (CONFIG_PGTABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
123 CONFIG_PGTABLE_LEVELS == 4) {
124 clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
125 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
126 KERNEL_PGD_PTRS);
127 }
128
129
130 if (!SHARED_KERNEL_PMD) {
131 pgd_set_mm(pgd, mm);
132 pgd_list_add(pgd);
133 }
134}
135
136static void pgd_dtor(pgd_t *pgd)
137{
138 if (SHARED_KERNEL_PMD)
139 return;
140
141 spin_lock(&pgd_lock);
142 pgd_list_del(pgd);
143 spin_unlock(&pgd_lock);
144}
145
146
147
148
149
150
151
152
153
154
155
156
157#ifdef CONFIG_X86_PAE
158
159
160
161
162
163
164
165
166
167
168
169#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
170
171void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
172{
173 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
174
175
176
177 set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
178
179
180
181
182
183
184
185 flush_tlb_mm(mm);
186}
187#else
188
189
190#define PREALLOCATED_PMDS 0
191
192#endif
193
194static void free_pmds(struct mm_struct *mm, pmd_t *pmds[])
195{
196 int i;
197
198 for(i = 0; i < PREALLOCATED_PMDS; i++)
199 if (pmds[i]) {
200 pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
201 free_page((unsigned long)pmds[i]);
202 mm_dec_nr_pmds(mm);
203 }
204}
205
206static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
207{
208 int i;
209 bool failed = false;
210 gfp_t gfp = PGALLOC_GFP;
211
212 if (mm == &init_mm)
213 gfp &= ~__GFP_ACCOUNT;
214
215 for(i = 0; i < PREALLOCATED_PMDS; i++) {
216 pmd_t *pmd = (pmd_t *)__get_free_page(gfp);
217 if (!pmd)
218 failed = true;
219 if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
220 free_page((unsigned long)pmd);
221 pmd = NULL;
222 failed = true;
223 }
224 if (pmd)
225 mm_inc_nr_pmds(mm);
226 pmds[i] = pmd;
227 }
228
229 if (failed) {
230 free_pmds(mm, pmds);
231 return -ENOMEM;
232 }
233
234 return 0;
235}
236
237
238
239
240
241
242
243static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
244{
245 int i;
246
247 for(i = 0; i < PREALLOCATED_PMDS; i++) {
248 pgd_t pgd = pgdp[i];
249
250 if (pgd_val(pgd) != 0) {
251 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
252
253 pgdp[i] = native_make_pgd(0);
254
255 paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
256 pmd_free(mm, pmd);
257 mm_dec_nr_pmds(mm);
258 }
259 }
260}
261
262static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
263{
264 pud_t *pud;
265 int i;
266
267 if (PREALLOCATED_PMDS == 0)
268 return;
269
270 pud = pud_offset(pgd, 0);
271
272 for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
273 pmd_t *pmd = pmds[i];
274
275 if (i >= KERNEL_PGD_BOUNDARY)
276 memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
277 sizeof(pmd_t) * PTRS_PER_PMD);
278
279 pud_populate(mm, pud, pmd);
280 }
281}
282
283
284
285
286
287
288
289
290#ifdef CONFIG_X86_PAE
291
292#include <linux/slab.h>
293
294#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
295#define PGD_ALIGN 32
296
297static struct kmem_cache *pgd_cache;
298
299static int __init pgd_cache_init(void)
300{
301
302
303
304
305 if (!SHARED_KERNEL_PMD)
306 return 0;
307
308
309
310
311
312
313
314 pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_ALIGN,
315 SLAB_PANIC, NULL);
316 if (!pgd_cache)
317 return -ENOMEM;
318
319 return 0;
320}
321core_initcall(pgd_cache_init);
322
323static inline pgd_t *_pgd_alloc(void)
324{
325
326
327
328
329 if (!SHARED_KERNEL_PMD)
330 return (pgd_t *)__get_free_page(PGALLOC_GFP);
331
332
333
334
335
336 return kmem_cache_alloc(pgd_cache, PGALLOC_GFP);
337}
338
339static inline void _pgd_free(pgd_t *pgd)
340{
341 if (!SHARED_KERNEL_PMD)
342 free_page((unsigned long)pgd);
343 else
344 kmem_cache_free(pgd_cache, pgd);
345}
346#else
347static inline pgd_t *_pgd_alloc(void)
348{
349 return (pgd_t *)__get_free_page(PGALLOC_GFP);
350}
351
352static inline void _pgd_free(pgd_t *pgd)
353{
354 free_page((unsigned long)pgd);
355}
356#endif
357
358pgd_t *pgd_alloc(struct mm_struct *mm)
359{
360 pgd_t *pgd;
361 pmd_t *pmds[PREALLOCATED_PMDS];
362
363 pgd = _pgd_alloc();
364
365 if (pgd == NULL)
366 goto out;
367
368 mm->pgd = pgd;
369
370 if (preallocate_pmds(mm, pmds) != 0)
371 goto out_free_pgd;
372
373 if (paravirt_pgd_alloc(mm) != 0)
374 goto out_free_pmds;
375
376
377
378
379
380
381 spin_lock(&pgd_lock);
382
383 pgd_ctor(mm, pgd);
384 pgd_prepopulate_pmd(mm, pgd, pmds);
385
386 spin_unlock(&pgd_lock);
387
388 return pgd;
389
390out_free_pmds:
391 free_pmds(mm, pmds);
392out_free_pgd:
393 _pgd_free(pgd);
394out:
395 return NULL;
396}
397
398void pgd_free(struct mm_struct *mm, pgd_t *pgd)
399{
400 pgd_mop_up_pmds(mm, pgd);
401 pgd_dtor(pgd);
402 paravirt_pgd_free(mm, pgd);
403 _pgd_free(pgd);
404}
405
406
407
408
409
410
411
412
413int ptep_set_access_flags(struct vm_area_struct *vma,
414 unsigned long address, pte_t *ptep,
415 pte_t entry, int dirty)
416{
417 int changed = !pte_same(*ptep, entry);
418
419 if (changed && dirty) {
420 *ptep = entry;
421 pte_update(vma->vm_mm, address, ptep);
422 }
423
424 return changed;
425}
426
427#ifdef CONFIG_TRANSPARENT_HUGEPAGE
428int pmdp_set_access_flags(struct vm_area_struct *vma,
429 unsigned long address, pmd_t *pmdp,
430 pmd_t entry, int dirty)
431{
432 int changed = !pmd_same(*pmdp, entry);
433
434 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
435
436 if (changed && dirty) {
437 *pmdp = entry;
438
439
440
441
442
443
444 }
445
446 return changed;
447}
448#endif
449
450int ptep_test_and_clear_young(struct vm_area_struct *vma,
451 unsigned long addr, pte_t *ptep)
452{
453 int ret = 0;
454
455 if (pte_young(*ptep))
456 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
457 (unsigned long *) &ptep->pte);
458
459 if (ret)
460 pte_update(vma->vm_mm, addr, ptep);
461
462 return ret;
463}
464
465#ifdef CONFIG_TRANSPARENT_HUGEPAGE
466int pmdp_test_and_clear_young(struct vm_area_struct *vma,
467 unsigned long addr, pmd_t *pmdp)
468{
469 int ret = 0;
470
471 if (pmd_young(*pmdp))
472 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
473 (unsigned long *)pmdp);
474
475 return ret;
476}
477#endif
478
479int ptep_clear_flush_young(struct vm_area_struct *vma,
480 unsigned long address, pte_t *ptep)
481{
482
483
484
485
486
487
488
489
490
491
492
493
494
495 return ptep_test_and_clear_young(vma, address, ptep);
496}
497
498#ifdef CONFIG_TRANSPARENT_HUGEPAGE
499int pmdp_clear_flush_young(struct vm_area_struct *vma,
500 unsigned long address, pmd_t *pmdp)
501{
502 int young;
503
504 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
505
506 young = pmdp_test_and_clear_young(vma, address, pmdp);
507 if (young)
508 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
509
510 return young;
511}
512#endif
513
514
515
516
517
518
519
520
521void __init reserve_top_address(unsigned long reserve)
522{
523#ifdef CONFIG_X86_32
524 BUG_ON(fixmaps_set > 0);
525 __FIXADDR_TOP = round_down(-reserve, 1 << PMD_SHIFT) - PAGE_SIZE;
526 printk(KERN_INFO "Reserving virtual address space above 0x%08lx (rounded to 0x%08lx)\n",
527 -reserve, __FIXADDR_TOP + PAGE_SIZE);
528#endif
529}
530
531int fixmaps_set;
532
533void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
534{
535 unsigned long address = __fix_to_virt(idx);
536
537 if (idx >= __end_of_fixed_addresses) {
538 BUG();
539 return;
540 }
541 set_pte_vaddr(address, pte);
542 fixmaps_set++;
543}
544
545void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
546 pgprot_t flags)
547{
548 __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
549}
550
551#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
571{
572 u8 mtrr, uniform;
573
574 mtrr = mtrr_type_lookup(addr, addr + PUD_SIZE, &uniform);
575 if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) &&
576 (mtrr != MTRR_TYPE_WRBACK))
577 return 0;
578
579 prot = pgprot_4k_2_large(prot);
580
581 set_pte((pte_t *)pud, pfn_pte(
582 (u64)addr >> PAGE_SHIFT,
583 __pgprot(pgprot_val(prot) | _PAGE_PSE)));
584
585 return 1;
586}
587
588
589
590
591
592
593
594
595int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
596{
597 u8 mtrr, uniform;
598
599 mtrr = mtrr_type_lookup(addr, addr + PMD_SIZE, &uniform);
600 if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) &&
601 (mtrr != MTRR_TYPE_WRBACK)) {
602 pr_warn_once("%s: Cannot satisfy [mem %#010llx-%#010llx] with a huge-page mapping due to MTRR override.\n",
603 __func__, addr, addr + PMD_SIZE);
604 return 0;
605 }
606
607 prot = pgprot_4k_2_large(prot);
608
609 set_pte((pte_t *)pmd, pfn_pte(
610 (u64)addr >> PAGE_SHIFT,
611 __pgprot(pgprot_val(prot) | _PAGE_PSE)));
612
613 return 1;
614}
615
616
617
618
619
620
621int pud_clear_huge(pud_t *pud)
622{
623 if (pud_large(*pud)) {
624 pud_clear(pud);
625 return 1;
626 }
627
628 return 0;
629}
630
631
632
633
634
635
636int pmd_clear_huge(pmd_t *pmd)
637{
638 if (pmd_large(*pmd)) {
639 pmd_clear(pmd);
640 return 1;
641 }
642
643 return 0;
644}
645#endif
646