1
2#include <linux/mm.h>
3#include <linux/gfp.h>
4#include <linux/hugetlb.h>
5#include <asm/pgalloc.h>
6#include <asm/pgtable.h>
7#include <asm/tlb.h>
8#include <asm/fixmap.h>
9#include <asm/mtrr.h>
10
11#ifdef CONFIG_DYNAMIC_PHYSICAL_MASK
12phys_addr_t physical_mask __ro_after_init = (1ULL << __PHYSICAL_MASK_SHIFT) - 1;
13EXPORT_SYMBOL(physical_mask);
14#endif
15
16#define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO)
17
18#ifdef CONFIG_HIGHPTE
19#define PGALLOC_USER_GFP __GFP_HIGHMEM
20#else
21#define PGALLOC_USER_GFP 0
22#endif
23
24gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP;
25
26pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
27{
28 return (pte_t *)__get_free_page(PGALLOC_GFP & ~__GFP_ACCOUNT);
29}
30
31pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
32{
33 struct page *pte;
34
35 pte = alloc_pages(__userpte_alloc_gfp, 0);
36 if (!pte)
37 return NULL;
38 if (!pgtable_page_ctor(pte)) {
39 __free_page(pte);
40 return NULL;
41 }
42 return pte;
43}
44
45static int __init setup_userpte(char *arg)
46{
47 if (!arg)
48 return -EINVAL;
49
50
51
52
53
54 if (strcmp(arg, "nohigh") == 0)
55 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
56 else
57 return -EINVAL;
58 return 0;
59}
60early_param("userpte", setup_userpte);
61
62void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
63{
64 pgtable_page_dtor(pte);
65 paravirt_release_pte(page_to_pfn(pte));
66 paravirt_tlb_remove_table(tlb, pte);
67}
68
69#if CONFIG_PGTABLE_LEVELS > 2
70void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
71{
72 struct page *page = virt_to_page(pmd);
73 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
74
75
76
77
78#ifdef CONFIG_X86_PAE
79 tlb->need_flush_all = 1;
80#endif
81 pgtable_pmd_page_dtor(page);
82 paravirt_tlb_remove_table(tlb, page);
83}
84
85#if CONFIG_PGTABLE_LEVELS > 3
86void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
87{
88 paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
89 paravirt_tlb_remove_table(tlb, virt_to_page(pud));
90}
91
92#if CONFIG_PGTABLE_LEVELS > 4
93void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d)
94{
95 paravirt_release_p4d(__pa(p4d) >> PAGE_SHIFT);
96 paravirt_tlb_remove_table(tlb, virt_to_page(p4d));
97}
98#endif
99#endif
100#endif
101
102static inline void pgd_list_add(pgd_t *pgd)
103{
104 struct page *page = virt_to_page(pgd);
105
106 list_add(&page->lru, &pgd_list);
107}
108
109static inline void pgd_list_del(pgd_t *pgd)
110{
111 struct page *page = virt_to_page(pgd);
112
113 list_del(&page->lru);
114}
115
116#define UNSHARED_PTRS_PER_PGD \
117 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
118#define MAX_UNSHARED_PTRS_PER_PGD \
119 max_t(size_t, KERNEL_PGD_BOUNDARY, PTRS_PER_PGD)
120
121
122static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
123{
124 virt_to_page(pgd)->pt_mm = mm;
125}
126
127struct mm_struct *pgd_page_get_mm(struct page *page)
128{
129 return page->pt_mm;
130}
131
132static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
133{
134
135
136
137 if (CONFIG_PGTABLE_LEVELS == 2 ||
138 (CONFIG_PGTABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
139 CONFIG_PGTABLE_LEVELS >= 4) {
140 clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
141 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
142 KERNEL_PGD_PTRS);
143 }
144
145
146 if (!SHARED_KERNEL_PMD) {
147 pgd_set_mm(pgd, mm);
148 pgd_list_add(pgd);
149 }
150}
151
152static void pgd_dtor(pgd_t *pgd)
153{
154 if (SHARED_KERNEL_PMD)
155 return;
156
157 spin_lock(&pgd_lock);
158 pgd_list_del(pgd);
159 spin_unlock(&pgd_lock);
160}
161
162
163
164
165
166
167
168
169
170
171
172
173#ifdef CONFIG_X86_PAE
174
175
176
177
178
179
180
181
182
183
184
185#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
186#define MAX_PREALLOCATED_PMDS MAX_UNSHARED_PTRS_PER_PGD
187
188
189
190
191
192
193#define PREALLOCATED_USER_PMDS (static_cpu_has(X86_FEATURE_PTI) ? \
194 KERNEL_PGD_PTRS : 0)
195#define MAX_PREALLOCATED_USER_PMDS KERNEL_PGD_PTRS
196
197void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
198{
199 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
200
201
202
203 set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
204
205
206
207
208
209
210
211 flush_tlb_mm(mm);
212}
213#else
214
215
216#define PREALLOCATED_PMDS 0
217#define MAX_PREALLOCATED_PMDS 0
218#define PREALLOCATED_USER_PMDS 0
219#define MAX_PREALLOCATED_USER_PMDS 0
220#endif
221
222static void free_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
223{
224 int i;
225
226 for (i = 0; i < count; i++)
227 if (pmds[i]) {
228 pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
229 free_page((unsigned long)pmds[i]);
230 mm_dec_nr_pmds(mm);
231 }
232}
233
234static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
235{
236 int i;
237 bool failed = false;
238 gfp_t gfp = PGALLOC_GFP;
239
240 if (mm == &init_mm)
241 gfp &= ~__GFP_ACCOUNT;
242
243 for (i = 0; i < count; i++) {
244 pmd_t *pmd = (pmd_t *)__get_free_page(gfp);
245 if (!pmd)
246 failed = true;
247 if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
248 free_page((unsigned long)pmd);
249 pmd = NULL;
250 failed = true;
251 }
252 if (pmd)
253 mm_inc_nr_pmds(mm);
254 pmds[i] = pmd;
255 }
256
257 if (failed) {
258 free_pmds(mm, pmds, count);
259 return -ENOMEM;
260 }
261
262 return 0;
263}
264
265
266
267
268
269
270
271static void mop_up_one_pmd(struct mm_struct *mm, pgd_t *pgdp)
272{
273 pgd_t pgd = *pgdp;
274
275 if (pgd_val(pgd) != 0) {
276 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
277
278 pgd_clear(pgdp);
279
280 paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
281 pmd_free(mm, pmd);
282 mm_dec_nr_pmds(mm);
283 }
284}
285
286static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
287{
288 int i;
289
290 for (i = 0; i < PREALLOCATED_PMDS; i++)
291 mop_up_one_pmd(mm, &pgdp[i]);
292
293#ifdef CONFIG_PAGE_TABLE_ISOLATION
294
295 if (!static_cpu_has(X86_FEATURE_PTI))
296 return;
297
298 pgdp = kernel_to_user_pgdp(pgdp);
299
300 for (i = 0; i < PREALLOCATED_USER_PMDS; i++)
301 mop_up_one_pmd(mm, &pgdp[i + KERNEL_PGD_BOUNDARY]);
302#endif
303}
304
305static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
306{
307 p4d_t *p4d;
308 pud_t *pud;
309 int i;
310
311 if (PREALLOCATED_PMDS == 0)
312 return;
313
314 p4d = p4d_offset(pgd, 0);
315 pud = pud_offset(p4d, 0);
316
317 for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
318 pmd_t *pmd = pmds[i];
319
320 if (i >= KERNEL_PGD_BOUNDARY)
321 memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
322 sizeof(pmd_t) * PTRS_PER_PMD);
323
324 pud_populate(mm, pud, pmd);
325 }
326}
327
328#ifdef CONFIG_PAGE_TABLE_ISOLATION
329static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
330 pgd_t *k_pgd, pmd_t *pmds[])
331{
332 pgd_t *s_pgd = kernel_to_user_pgdp(swapper_pg_dir);
333 pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd);
334 p4d_t *u_p4d;
335 pud_t *u_pud;
336 int i;
337
338 u_p4d = p4d_offset(u_pgd, 0);
339 u_pud = pud_offset(u_p4d, 0);
340
341 s_pgd += KERNEL_PGD_BOUNDARY;
342 u_pud += KERNEL_PGD_BOUNDARY;
343
344 for (i = 0; i < PREALLOCATED_USER_PMDS; i++, u_pud++, s_pgd++) {
345 pmd_t *pmd = pmds[i];
346
347 memcpy(pmd, (pmd_t *)pgd_page_vaddr(*s_pgd),
348 sizeof(pmd_t) * PTRS_PER_PMD);
349
350 pud_populate(mm, u_pud, pmd);
351 }
352
353}
354#else
355static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
356 pgd_t *k_pgd, pmd_t *pmds[])
357{
358}
359#endif
360
361
362
363
364
365
366
367#ifdef CONFIG_X86_PAE
368
369#include <linux/slab.h>
370
371#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
372#define PGD_ALIGN 32
373
374static struct kmem_cache *pgd_cache;
375
376static int __init pgd_cache_init(void)
377{
378
379
380
381
382 if (!SHARED_KERNEL_PMD)
383 return 0;
384
385
386
387
388
389
390
391 pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_ALIGN,
392 SLAB_PANIC, NULL);
393 return 0;
394}
395core_initcall(pgd_cache_init);
396
397static inline pgd_t *_pgd_alloc(void)
398{
399
400
401
402
403 if (!SHARED_KERNEL_PMD)
404 return (pgd_t *)__get_free_pages(PGALLOC_GFP,
405 PGD_ALLOCATION_ORDER);
406
407
408
409
410
411 return kmem_cache_alloc(pgd_cache, PGALLOC_GFP);
412}
413
414static inline void _pgd_free(pgd_t *pgd)
415{
416 if (!SHARED_KERNEL_PMD)
417 free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
418 else
419 kmem_cache_free(pgd_cache, pgd);
420}
421#else
422
423static inline pgd_t *_pgd_alloc(void)
424{
425 return (pgd_t *)__get_free_pages(PGALLOC_GFP, PGD_ALLOCATION_ORDER);
426}
427
428static inline void _pgd_free(pgd_t *pgd)
429{
430 free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
431}
432#endif
433
434pgd_t *pgd_alloc(struct mm_struct *mm)
435{
436 pgd_t *pgd;
437 pmd_t *u_pmds[MAX_PREALLOCATED_USER_PMDS];
438 pmd_t *pmds[MAX_PREALLOCATED_PMDS];
439
440 pgd = _pgd_alloc();
441
442 if (pgd == NULL)
443 goto out;
444
445 mm->pgd = pgd;
446
447 if (preallocate_pmds(mm, pmds, PREALLOCATED_PMDS) != 0)
448 goto out_free_pgd;
449
450 if (preallocate_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS) != 0)
451 goto out_free_pmds;
452
453 if (paravirt_pgd_alloc(mm) != 0)
454 goto out_free_user_pmds;
455
456
457
458
459
460
461 spin_lock(&pgd_lock);
462
463 pgd_ctor(mm, pgd);
464 pgd_prepopulate_pmd(mm, pgd, pmds);
465 pgd_prepopulate_user_pmd(mm, pgd, u_pmds);
466
467 spin_unlock(&pgd_lock);
468
469 return pgd;
470
471out_free_user_pmds:
472 free_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS);
473out_free_pmds:
474 free_pmds(mm, pmds, PREALLOCATED_PMDS);
475out_free_pgd:
476 _pgd_free(pgd);
477out:
478 return NULL;
479}
480
481void pgd_free(struct mm_struct *mm, pgd_t *pgd)
482{
483 pgd_mop_up_pmds(mm, pgd);
484 pgd_dtor(pgd);
485 paravirt_pgd_free(mm, pgd);
486 _pgd_free(pgd);
487}
488
489
490
491
492
493
494
495
496int ptep_set_access_flags(struct vm_area_struct *vma,
497 unsigned long address, pte_t *ptep,
498 pte_t entry, int dirty)
499{
500 int changed = !pte_same(*ptep, entry);
501
502 if (changed && dirty)
503 set_pte(ptep, entry);
504
505 return changed;
506}
507
508#ifdef CONFIG_TRANSPARENT_HUGEPAGE
509int pmdp_set_access_flags(struct vm_area_struct *vma,
510 unsigned long address, pmd_t *pmdp,
511 pmd_t entry, int dirty)
512{
513 int changed = !pmd_same(*pmdp, entry);
514
515 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
516
517 if (changed && dirty) {
518 set_pmd(pmdp, entry);
519
520
521
522
523
524
525 }
526
527 return changed;
528}
529
530int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
531 pud_t *pudp, pud_t entry, int dirty)
532{
533 int changed = !pud_same(*pudp, entry);
534
535 VM_BUG_ON(address & ~HPAGE_PUD_MASK);
536
537 if (changed && dirty) {
538 set_pud(pudp, entry);
539
540
541
542
543
544
545 }
546
547 return changed;
548}
549#endif
550
551int ptep_test_and_clear_young(struct vm_area_struct *vma,
552 unsigned long addr, pte_t *ptep)
553{
554 int ret = 0;
555
556 if (pte_young(*ptep))
557 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
558 (unsigned long *) &ptep->pte);
559
560 return ret;
561}
562
563#ifdef CONFIG_TRANSPARENT_HUGEPAGE
564int pmdp_test_and_clear_young(struct vm_area_struct *vma,
565 unsigned long addr, pmd_t *pmdp)
566{
567 int ret = 0;
568
569 if (pmd_young(*pmdp))
570 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
571 (unsigned long *)pmdp);
572
573 return ret;
574}
575int pudp_test_and_clear_young(struct vm_area_struct *vma,
576 unsigned long addr, pud_t *pudp)
577{
578 int ret = 0;
579
580 if (pud_young(*pudp))
581 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
582 (unsigned long *)pudp);
583
584 return ret;
585}
586#endif
587
588int ptep_clear_flush_young(struct vm_area_struct *vma,
589 unsigned long address, pte_t *ptep)
590{
591
592
593
594
595
596
597
598
599
600
601
602
603
604 return ptep_test_and_clear_young(vma, address, ptep);
605}
606
607#ifdef CONFIG_TRANSPARENT_HUGEPAGE
608int pmdp_clear_flush_young(struct vm_area_struct *vma,
609 unsigned long address, pmd_t *pmdp)
610{
611 int young;
612
613 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
614
615 young = pmdp_test_and_clear_young(vma, address, pmdp);
616 if (young)
617 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
618
619 return young;
620}
621#endif
622
623
624
625
626
627
628
629
630void __init reserve_top_address(unsigned long reserve)
631{
632#ifdef CONFIG_X86_32
633 BUG_ON(fixmaps_set > 0);
634 __FIXADDR_TOP = round_down(-reserve, 1 << PMD_SHIFT) - PAGE_SIZE;
635 printk(KERN_INFO "Reserving virtual address space above 0x%08lx (rounded to 0x%08lx)\n",
636 -reserve, __FIXADDR_TOP + PAGE_SIZE);
637#endif
638}
639
640int fixmaps_set;
641
642void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
643{
644 unsigned long address = __fix_to_virt(idx);
645
646#ifdef CONFIG_X86_64
647
648
649
650
651 BUILD_BUG_ON(__end_of_permanent_fixed_addresses >
652 (FIXMAP_PMD_NUM * PTRS_PER_PTE));
653#endif
654
655 if (idx >= __end_of_fixed_addresses) {
656 BUG();
657 return;
658 }
659 set_pte_vaddr(address, pte);
660 fixmaps_set++;
661}
662
663void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
664 pgprot_t flags)
665{
666
667 pgprot_val(flags) &= __default_kernel_pte_mask;
668
669 __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
670}
671
672#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
673#ifdef CONFIG_X86_5LEVEL
674
675
676
677
678
679int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
680{
681 return 0;
682}
683
684
685
686
687
688
689int p4d_clear_huge(p4d_t *p4d)
690{
691 return 0;
692}
693#endif
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
714{
715 u8 mtrr, uniform;
716
717 mtrr = mtrr_type_lookup(addr, addr + PUD_SIZE, &uniform);
718 if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) &&
719 (mtrr != MTRR_TYPE_WRBACK))
720 return 0;
721
722
723 if (pud_present(*pud) && !pud_huge(*pud))
724 return 0;
725
726 prot = pgprot_4k_2_large(prot);
727
728 set_pte((pte_t *)pud, pfn_pte(
729 (u64)addr >> PAGE_SHIFT,
730 __pgprot(pgprot_val(prot) | _PAGE_PSE)));
731
732 return 1;
733}
734
735
736
737
738
739
740
741
742int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
743{
744 u8 mtrr, uniform;
745
746 mtrr = mtrr_type_lookup(addr, addr + PMD_SIZE, &uniform);
747 if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) &&
748 (mtrr != MTRR_TYPE_WRBACK)) {
749 pr_warn_once("%s: Cannot satisfy [mem %#010llx-%#010llx] with a huge-page mapping due to MTRR override.\n",
750 __func__, addr, addr + PMD_SIZE);
751 return 0;
752 }
753
754
755 if (pmd_present(*pmd) && !pmd_huge(*pmd))
756 return 0;
757
758 prot = pgprot_4k_2_large(prot);
759
760 set_pte((pte_t *)pmd, pfn_pte(
761 (u64)addr >> PAGE_SHIFT,
762 __pgprot(pgprot_val(prot) | _PAGE_PSE)));
763
764 return 1;
765}
766
767
768
769
770
771
772int pud_clear_huge(pud_t *pud)
773{
774 if (pud_large(*pud)) {
775 pud_clear(pud);
776 return 1;
777 }
778
779 return 0;
780}
781
782
783
784
785
786
787int pmd_clear_huge(pmd_t *pmd)
788{
789 if (pmd_large(*pmd)) {
790 pmd_clear(pmd);
791 return 1;
792 }
793
794 return 0;
795}
796
797#ifdef CONFIG_X86_64
798
799
800
801
802
803
804
805
806
807
808int pud_free_pmd_page(pud_t *pud, unsigned long addr)
809{
810 pmd_t *pmd, *pmd_sv;
811 pte_t *pte;
812 int i;
813
814 if (pud_none(*pud))
815 return 1;
816
817 pmd = (pmd_t *)pud_page_vaddr(*pud);
818 pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL);
819 if (!pmd_sv)
820 return 0;
821
822 for (i = 0; i < PTRS_PER_PMD; i++) {
823 pmd_sv[i] = pmd[i];
824 if (!pmd_none(pmd[i]))
825 pmd_clear(&pmd[i]);
826 }
827
828 pud_clear(pud);
829
830
831 flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
832
833 for (i = 0; i < PTRS_PER_PMD; i++) {
834 if (!pmd_none(pmd_sv[i])) {
835 pte = (pte_t *)pmd_page_vaddr(pmd_sv[i]);
836 free_page((unsigned long)pte);
837 }
838 }
839
840 free_page((unsigned long)pmd_sv);
841 free_page((unsigned long)pmd);
842
843 return 1;
844}
845
846
847
848
849
850
851
852
853
854int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
855{
856 pte_t *pte;
857
858 if (pmd_none(*pmd))
859 return 1;
860
861 pte = (pte_t *)pmd_page_vaddr(*pmd);
862 pmd_clear(pmd);
863
864
865 flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
866
867 free_page((unsigned long)pte);
868
869 return 1;
870}
871
872#else
873
874int pud_free_pmd_page(pud_t *pud, unsigned long addr)
875{
876 return pud_none(*pud);
877}
878
879
880
881
882
883int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
884{
885 return pmd_none(*pmd);
886}
887
888#endif
889#endif
890