1
2#ifndef _ASM_GENERIC_PGTABLE_H
3#define _ASM_GENERIC_PGTABLE_H
4
5#include <linux/pfn.h>
6
7#ifndef __ASSEMBLY__
8#ifdef CONFIG_MMU
9
10#include <linux/mm_types.h>
11#include <linux/bug.h>
12#include <linux/errno.h>
13#include <asm-generic/pgtable_uffd.h>
14
15#if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \
16 defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS
17#error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{P4D,PUD,PMD}_FOLDED
18#endif
19
20
21
22
23
24
25
26#ifndef USER_PGTABLES_CEILING
27#define USER_PGTABLES_CEILING 0UL
28#endif
29
30#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
31extern int ptep_set_access_flags(struct vm_area_struct *vma,
32 unsigned long address, pte_t *ptep,
33 pte_t entry, int dirty);
34#endif
35
36#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
37#ifdef CONFIG_TRANSPARENT_HUGEPAGE
38extern int pmdp_set_access_flags(struct vm_area_struct *vma,
39 unsigned long address, pmd_t *pmdp,
40 pmd_t entry, int dirty);
41extern int pudp_set_access_flags(struct vm_area_struct *vma,
42 unsigned long address, pud_t *pudp,
43 pud_t entry, int dirty);
44#else
45static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
46 unsigned long address, pmd_t *pmdp,
47 pmd_t entry, int dirty)
48{
49 BUILD_BUG();
50 return 0;
51}
52static inline int pudp_set_access_flags(struct vm_area_struct *vma,
53 unsigned long address, pud_t *pudp,
54 pud_t entry, int dirty)
55{
56 BUILD_BUG();
57 return 0;
58}
59#endif
60#endif
61
62#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
63static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
64 unsigned long address,
65 pte_t *ptep)
66{
67 pte_t pte = *ptep;
68 int r = 1;
69 if (!pte_young(pte))
70 r = 0;
71 else
72 set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte));
73 return r;
74}
75#endif
76
77#ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
78#ifdef CONFIG_TRANSPARENT_HUGEPAGE
79static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
80 unsigned long address,
81 pmd_t *pmdp)
82{
83 pmd_t pmd = *pmdp;
84 int r = 1;
85 if (!pmd_young(pmd))
86 r = 0;
87 else
88 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd));
89 return r;
90}
91#else
92static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
93 unsigned long address,
94 pmd_t *pmdp)
95{
96 BUILD_BUG();
97 return 0;
98}
99#endif
100#endif
101
102#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
103int ptep_clear_flush_young(struct vm_area_struct *vma,
104 unsigned long address, pte_t *ptep);
105#endif
106
107#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
108#ifdef CONFIG_TRANSPARENT_HUGEPAGE
109extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
110 unsigned long address, pmd_t *pmdp);
111#else
112
113
114
115
116static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
117 unsigned long address, pmd_t *pmdp)
118{
119 BUILD_BUG();
120 return 0;
121}
122#endif
123#endif
124
125#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
126static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
127 unsigned long address,
128 pte_t *ptep)
129{
130 pte_t pte = *ptep;
131 pte_clear(mm, address, ptep);
132 return pte;
133}
134#endif
135
136#ifdef CONFIG_TRANSPARENT_HUGEPAGE
137#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
138static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
139 unsigned long address,
140 pmd_t *pmdp)
141{
142 pmd_t pmd = *pmdp;
143 pmd_clear(pmdp);
144 return pmd;
145}
146#endif
147#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
148static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
149 unsigned long address,
150 pud_t *pudp)
151{
152 pud_t pud = *pudp;
153
154 pud_clear(pudp);
155 return pud;
156}
157#endif
158#endif
159
160#ifdef CONFIG_TRANSPARENT_HUGEPAGE
161#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
162static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
163 unsigned long address, pmd_t *pmdp,
164 int full)
165{
166 return pmdp_huge_get_and_clear(mm, address, pmdp);
167}
168#endif
169
170#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL
171static inline pud_t pudp_huge_get_and_clear_full(struct mm_struct *mm,
172 unsigned long address, pud_t *pudp,
173 int full)
174{
175 return pudp_huge_get_and_clear(mm, address, pudp);
176}
177#endif
178#endif
179
180#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
181static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
182 unsigned long address, pte_t *ptep,
183 int full)
184{
185 pte_t pte;
186 pte = ptep_get_and_clear(mm, address, ptep);
187 return pte;
188}
189#endif
190
191
192
193
194
195
196#ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
197static inline void pte_clear_not_present_full(struct mm_struct *mm,
198 unsigned long address,
199 pte_t *ptep,
200 int full)
201{
202 pte_clear(mm, address, ptep);
203}
204#endif
205
206#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
207extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
208 unsigned long address,
209 pte_t *ptep);
210#endif
211
212#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
213extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
214 unsigned long address,
215 pmd_t *pmdp);
216extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma,
217 unsigned long address,
218 pud_t *pudp);
219#endif
220
221#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
222struct mm_struct;
223static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
224{
225 pte_t old_pte = *ptep;
226 set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
227}
228#endif
229
230#ifndef pte_savedwrite
231#define pte_savedwrite pte_write
232#endif
233
234#ifndef pte_mk_savedwrite
235#define pte_mk_savedwrite pte_mkwrite
236#endif
237
238#ifndef pte_clear_savedwrite
239#define pte_clear_savedwrite pte_wrprotect
240#endif
241
242#ifndef pmd_savedwrite
243#define pmd_savedwrite pmd_write
244#endif
245
246#ifndef pmd_mk_savedwrite
247#define pmd_mk_savedwrite pmd_mkwrite
248#endif
249
250#ifndef pmd_clear_savedwrite
251#define pmd_clear_savedwrite pmd_wrprotect
252#endif
253
254#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
255#ifdef CONFIG_TRANSPARENT_HUGEPAGE
256static inline void pmdp_set_wrprotect(struct mm_struct *mm,
257 unsigned long address, pmd_t *pmdp)
258{
259 pmd_t old_pmd = *pmdp;
260 set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd));
261}
262#else
263static inline void pmdp_set_wrprotect(struct mm_struct *mm,
264 unsigned long address, pmd_t *pmdp)
265{
266 BUILD_BUG();
267}
268#endif
269#endif
270#ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT
271#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
272static inline void pudp_set_wrprotect(struct mm_struct *mm,
273 unsigned long address, pud_t *pudp)
274{
275 pud_t old_pud = *pudp;
276
277 set_pud_at(mm, address, pudp, pud_wrprotect(old_pud));
278}
279#else
280static inline void pudp_set_wrprotect(struct mm_struct *mm,
281 unsigned long address, pud_t *pudp)
282{
283 BUILD_BUG();
284}
285#endif
286#endif
287
288#ifndef pmdp_collapse_flush
289#ifdef CONFIG_TRANSPARENT_HUGEPAGE
290extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
291 unsigned long address, pmd_t *pmdp);
292#else
293static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
294 unsigned long address,
295 pmd_t *pmdp)
296{
297 BUILD_BUG();
298 return *pmdp;
299}
300#define pmdp_collapse_flush pmdp_collapse_flush
301#endif
302#endif
303
304#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
305extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
306 pgtable_t pgtable);
307#endif
308
309#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
310extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
311#endif
312
313#ifdef CONFIG_TRANSPARENT_HUGEPAGE
314
315
316
317
318
319static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma,
320 unsigned long address, pmd_t *pmdp, pmd_t pmd)
321{
322 pmd_t old_pmd = *pmdp;
323 set_pmd_at(vma->vm_mm, address, pmdp, pmd);
324 return old_pmd;
325}
326#endif
327
328#ifndef __HAVE_ARCH_PMDP_INVALIDATE
329extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
330 pmd_t *pmdp);
331#endif
332
333#ifndef __HAVE_ARCH_PTE_SAME
334static inline int pte_same(pte_t pte_a, pte_t pte_b)
335{
336 return pte_val(pte_a) == pte_val(pte_b);
337}
338#endif
339
340#ifndef __HAVE_ARCH_PTE_UNUSED
341
342
343
344
345
346
347static inline int pte_unused(pte_t pte)
348{
349 return 0;
350}
351#endif
352
353#ifndef pte_access_permitted
354#define pte_access_permitted(pte, write) \
355 (pte_present(pte) && (!(write) || pte_write(pte)))
356#endif
357
358#ifndef pmd_access_permitted
359#define pmd_access_permitted(pmd, write) \
360 (pmd_present(pmd) && (!(write) || pmd_write(pmd)))
361#endif
362
363#ifndef pud_access_permitted
364#define pud_access_permitted(pud, write) \
365 (pud_present(pud) && (!(write) || pud_write(pud)))
366#endif
367
368#ifndef p4d_access_permitted
369#define p4d_access_permitted(p4d, write) \
370 (p4d_present(p4d) && (!(write) || p4d_write(p4d)))
371#endif
372
373#ifndef pgd_access_permitted
374#define pgd_access_permitted(pgd, write) \
375 (pgd_present(pgd) && (!(write) || pgd_write(pgd)))
376#endif
377
378#ifndef __HAVE_ARCH_PMD_SAME
379static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
380{
381 return pmd_val(pmd_a) == pmd_val(pmd_b);
382}
383
384static inline int pud_same(pud_t pud_a, pud_t pud_b)
385{
386 return pud_val(pud_a) == pud_val(pud_b);
387}
388#endif
389
390#ifndef __HAVE_ARCH_P4D_SAME
391static inline int p4d_same(p4d_t p4d_a, p4d_t p4d_b)
392{
393 return p4d_val(p4d_a) == p4d_val(p4d_b);
394}
395#endif
396
397#ifndef __HAVE_ARCH_PGD_SAME
398static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b)
399{
400 return pgd_val(pgd_a) == pgd_val(pgd_b);
401}
402#endif
403
404
405
406
407
408
409
410
411
412#define set_pte_safe(ptep, pte) \
413({ \
414 WARN_ON_ONCE(pte_present(*ptep) && !pte_same(*ptep, pte)); \
415 set_pte(ptep, pte); \
416})
417
418#define set_pmd_safe(pmdp, pmd) \
419({ \
420 WARN_ON_ONCE(pmd_present(*pmdp) && !pmd_same(*pmdp, pmd)); \
421 set_pmd(pmdp, pmd); \
422})
423
424#define set_pud_safe(pudp, pud) \
425({ \
426 WARN_ON_ONCE(pud_present(*pudp) && !pud_same(*pudp, pud)); \
427 set_pud(pudp, pud); \
428})
429
430#define set_p4d_safe(p4dp, p4d) \
431({ \
432 WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \
433 set_p4d(p4dp, p4d); \
434})
435
436#define set_pgd_safe(pgdp, pgd) \
437({ \
438 WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \
439 set_pgd(pgdp, pgd); \
440})
441
442#ifndef __HAVE_ARCH_DO_SWAP_PAGE
443
444
445
446
447
448
449
450
451static inline void arch_do_swap_page(struct mm_struct *mm,
452 struct vm_area_struct *vma,
453 unsigned long addr,
454 pte_t pte, pte_t oldpte)
455{
456
457}
458#endif
459
460#ifndef __HAVE_ARCH_UNMAP_ONE
461
462
463
464
465
466
467
468
469static inline int arch_unmap_one(struct mm_struct *mm,
470 struct vm_area_struct *vma,
471 unsigned long addr,
472 pte_t orig_pte)
473{
474 return 0;
475}
476#endif
477
478#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
479#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
480#endif
481
482#ifndef __HAVE_ARCH_MOVE_PTE
483#define move_pte(pte, prot, old_addr, new_addr) (pte)
484#endif
485
486#ifndef pte_accessible
487# define pte_accessible(mm, pte) ((void)(pte), 1)
488#endif
489
490#ifndef flush_tlb_fix_spurious_fault
491#define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address)
492#endif
493
494#ifndef pgprot_noncached
495#define pgprot_noncached(prot) (prot)
496#endif
497
498#ifndef pgprot_writecombine
499#define pgprot_writecombine pgprot_noncached
500#endif
501
502#ifndef pgprot_writethrough
503#define pgprot_writethrough pgprot_noncached
504#endif
505
506#ifndef pgprot_device
507#define pgprot_device pgprot_noncached
508#endif
509
510#ifndef pgprot_modify
511#define pgprot_modify pgprot_modify
512static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
513{
514 if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot)))
515 newprot = pgprot_noncached(newprot);
516 if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot)))
517 newprot = pgprot_writecombine(newprot);
518 if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot)))
519 newprot = pgprot_device(newprot);
520 return newprot;
521}
522#endif
523
524
525
526
527
528
529
530#define pgd_addr_end(addr, end) \
531({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
532 (__boundary - 1 < (end) - 1)? __boundary: (end); \
533})
534
535#ifndef p4d_addr_end
536#define p4d_addr_end(addr, end) \
537({ unsigned long __boundary = ((addr) + P4D_SIZE) & P4D_MASK; \
538 (__boundary - 1 < (end) - 1)? __boundary: (end); \
539})
540#endif
541
542#ifndef pud_addr_end
543#define pud_addr_end(addr, end) \
544({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \
545 (__boundary - 1 < (end) - 1)? __boundary: (end); \
546})
547#endif
548
549#ifndef pmd_addr_end
550#define pmd_addr_end(addr, end) \
551({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
552 (__boundary - 1 < (end) - 1)? __boundary: (end); \
553})
554#endif
555
556
557
558
559
560
561void pgd_clear_bad(pgd_t *);
562
563#ifndef __PAGETABLE_P4D_FOLDED
564void p4d_clear_bad(p4d_t *);
565#else
566#define p4d_clear_bad(p4d) do { } while (0)
567#endif
568
569#ifndef __PAGETABLE_PUD_FOLDED
570void pud_clear_bad(pud_t *);
571#else
572#define pud_clear_bad(p4d) do { } while (0)
573#endif
574
575void pmd_clear_bad(pmd_t *);
576
577static inline int pgd_none_or_clear_bad(pgd_t *pgd)
578{
579 if (pgd_none(*pgd))
580 return 1;
581 if (unlikely(pgd_bad(*pgd))) {
582 pgd_clear_bad(pgd);
583 return 1;
584 }
585 return 0;
586}
587
588static inline int p4d_none_or_clear_bad(p4d_t *p4d)
589{
590 if (p4d_none(*p4d))
591 return 1;
592 if (unlikely(p4d_bad(*p4d))) {
593 p4d_clear_bad(p4d);
594 return 1;
595 }
596 return 0;
597}
598
599static inline int pud_none_or_clear_bad(pud_t *pud)
600{
601 if (pud_none(*pud))
602 return 1;
603 if (unlikely(pud_bad(*pud))) {
604 pud_clear_bad(pud);
605 return 1;
606 }
607 return 0;
608}
609
610static inline int pmd_none_or_clear_bad(pmd_t *pmd)
611{
612 if (pmd_none(*pmd))
613 return 1;
614 if (unlikely(pmd_bad(*pmd))) {
615 pmd_clear_bad(pmd);
616 return 1;
617 }
618 return 0;
619}
620
621static inline pte_t __ptep_modify_prot_start(struct vm_area_struct *vma,
622 unsigned long addr,
623 pte_t *ptep)
624{
625
626
627
628
629
630 return ptep_get_and_clear(vma->vm_mm, addr, ptep);
631}
632
633static inline void __ptep_modify_prot_commit(struct vm_area_struct *vma,
634 unsigned long addr,
635 pte_t *ptep, pte_t pte)
636{
637
638
639
640
641 set_pte_at(vma->vm_mm, addr, ptep, pte);
642}
643
644#ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
660 unsigned long addr,
661 pte_t *ptep)
662{
663 return __ptep_modify_prot_start(vma, addr, ptep);
664}
665
666
667
668
669
670static inline void ptep_modify_prot_commit(struct vm_area_struct *vma,
671 unsigned long addr,
672 pte_t *ptep, pte_t old_pte, pte_t pte)
673{
674 __ptep_modify_prot_commit(vma, addr, ptep, pte);
675}
676#endif
677#endif
678
679
680
681
682
683#ifndef pgprot_encrypted
684#define pgprot_encrypted(prot) (prot)
685#endif
686
687#ifndef pgprot_decrypted
688#define pgprot_decrypted(prot) (prot)
689#endif
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
707#define arch_enter_lazy_mmu_mode() do {} while (0)
708#define arch_leave_lazy_mmu_mode() do {} while (0)
709#define arch_flush_lazy_mmu_mode() do {} while (0)
710#endif
711
712
713
714
715
716
717
718
719
720
721
722
723#ifndef __HAVE_ARCH_START_CONTEXT_SWITCH
724#define arch_start_context_switch(prev) do {} while (0)
725#endif
726
727#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
728#ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION
729static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
730{
731 return pmd;
732}
733
734static inline int pmd_swp_soft_dirty(pmd_t pmd)
735{
736 return 0;
737}
738
739static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
740{
741 return pmd;
742}
743#endif
744#else
745static inline int pte_soft_dirty(pte_t pte)
746{
747 return 0;
748}
749
750static inline int pmd_soft_dirty(pmd_t pmd)
751{
752 return 0;
753}
754
755static inline pte_t pte_mksoft_dirty(pte_t pte)
756{
757 return pte;
758}
759
760static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
761{
762 return pmd;
763}
764
765static inline pte_t pte_clear_soft_dirty(pte_t pte)
766{
767 return pte;
768}
769
770static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
771{
772 return pmd;
773}
774
775static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
776{
777 return pte;
778}
779
780static inline int pte_swp_soft_dirty(pte_t pte)
781{
782 return 0;
783}
784
785static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
786{
787 return pte;
788}
789
790static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
791{
792 return pmd;
793}
794
795static inline int pmd_swp_soft_dirty(pmd_t pmd)
796{
797 return 0;
798}
799
800static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
801{
802 return pmd;
803}
804#endif
805
806#ifndef __HAVE_PFNMAP_TRACKING
807
808
809
810
811
812
813
814
815
816
817static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
818 unsigned long pfn, unsigned long addr,
819 unsigned long size)
820{
821 return 0;
822}
823
824
825
826
827
828static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
829 pfn_t pfn)
830{
831}
832
833
834
835
836
837static inline int track_pfn_copy(struct vm_area_struct *vma)
838{
839 return 0;
840}
841
842
843
844
845
846
847static inline void untrack_pfn(struct vm_area_struct *vma,
848 unsigned long pfn, unsigned long size)
849{
850}
851
852
853
854
855static inline void untrack_pfn_moved(struct vm_area_struct *vma)
856{
857}
858#else
859extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
860 unsigned long pfn, unsigned long addr,
861 unsigned long size);
862extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
863 pfn_t pfn);
864extern int track_pfn_copy(struct vm_area_struct *vma);
865extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
866 unsigned long size);
867extern void untrack_pfn_moved(struct vm_area_struct *vma);
868#endif
869
870#ifdef __HAVE_COLOR_ZERO_PAGE
871static inline int is_zero_pfn(unsigned long pfn)
872{
873 extern unsigned long zero_pfn;
874 unsigned long offset_from_zero_pfn = pfn - zero_pfn;
875 return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
876}
877
878#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
879
880#else
881static inline int is_zero_pfn(unsigned long pfn)
882{
883 extern unsigned long zero_pfn;
884 return pfn == zero_pfn;
885}
886
887static inline unsigned long my_zero_pfn(unsigned long addr)
888{
889 extern unsigned long zero_pfn;
890 return zero_pfn;
891}
892#endif
893
894#ifdef CONFIG_MMU
895
896#ifndef CONFIG_TRANSPARENT_HUGEPAGE
897static inline int pmd_trans_huge(pmd_t pmd)
898{
899 return 0;
900}
901#ifndef pmd_write
902static inline int pmd_write(pmd_t pmd)
903{
904 BUG();
905 return 0;
906}
907#endif
908#endif
909
910#ifndef pud_write
911static inline int pud_write(pud_t pud)
912{
913 BUG();
914 return 0;
915}
916#endif
917
918#if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
919static inline int pmd_devmap(pmd_t pmd)
920{
921 return 0;
922}
923static inline int pud_devmap(pud_t pud)
924{
925 return 0;
926}
927static inline int pgd_devmap(pgd_t pgd)
928{
929 return 0;
930}
931#endif
932
933#if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
934 (defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
935 !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
936static inline int pud_trans_huge(pud_t pud)
937{
938 return 0;
939}
940#endif
941
942
943static inline int pud_none_or_trans_huge_or_dev_or_clear_bad(pud_t *pud)
944{
945 pud_t pudval = READ_ONCE(*pud);
946
947 if (pud_none(pudval) || pud_trans_huge(pudval) || pud_devmap(pudval))
948 return 1;
949 if (unlikely(pud_bad(pudval))) {
950 pud_clear_bad(pud);
951 return 1;
952 }
953 return 0;
954}
955
956
957static inline int pud_trans_unstable(pud_t *pud)
958{
959#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
960 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
961 return pud_none_or_trans_huge_or_dev_or_clear_bad(pud);
962#else
963 return 0;
964#endif
965}
966
967#ifndef pmd_read_atomic
968static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
969{
970
971
972
973
974
975 return *pmdp;
976}
977#endif
978
979#ifndef arch_needs_pgtable_deposit
980#define arch_needs_pgtable_deposit() (false)
981#endif
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
1004{
1005 pmd_t pmdval = pmd_read_atomic(pmd);
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1021 barrier();
1022#endif
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038 if (pmd_none(pmdval) || pmd_trans_huge(pmdval) ||
1039 (IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION) && !pmd_present(pmdval)))
1040 return 1;
1041 if (unlikely(pmd_bad(pmdval))) {
1042 pmd_clear_bad(pmd);
1043 return 1;
1044 }
1045 return 0;
1046}
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060static inline int pmd_trans_unstable(pmd_t *pmd)
1061{
1062#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1063 return pmd_none_or_trans_huge_or_clear_bad(pmd);
1064#else
1065 return 0;
1066#endif
1067}
1068
1069#ifndef CONFIG_NUMA_BALANCING
1070
1071
1072
1073
1074
1075
1076
1077
1078static inline int pte_protnone(pte_t pte)
1079{
1080 return 0;
1081}
1082
1083static inline int pmd_protnone(pmd_t pmd)
1084{
1085 return 0;
1086}
1087#endif
1088
1089#endif
1090
1091#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
1092
1093#ifndef __PAGETABLE_P4D_FOLDED
1094int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot);
1095int p4d_clear_huge(p4d_t *p4d);
1096#else
1097static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
1098{
1099 return 0;
1100}
1101static inline int p4d_clear_huge(p4d_t *p4d)
1102{
1103 return 0;
1104}
1105#endif
1106
1107int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
1108int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
1109int pud_clear_huge(pud_t *pud);
1110int pmd_clear_huge(pmd_t *pmd);
1111int p4d_free_pud_page(p4d_t *p4d, unsigned long addr);
1112int pud_free_pmd_page(pud_t *pud, unsigned long addr);
1113int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
1114#else
1115static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
1116{
1117 return 0;
1118}
1119static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
1120{
1121 return 0;
1122}
1123static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
1124{
1125 return 0;
1126}
1127static inline int p4d_clear_huge(p4d_t *p4d)
1128{
1129 return 0;
1130}
1131static inline int pud_clear_huge(pud_t *pud)
1132{
1133 return 0;
1134}
1135static inline int pmd_clear_huge(pmd_t *pmd)
1136{
1137 return 0;
1138}
1139static inline int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
1140{
1141 return 0;
1142}
1143static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
1144{
1145 return 0;
1146}
1147static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
1148{
1149 return 0;
1150}
1151#endif
1152
1153#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
1154#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
1165#define flush_pud_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
1166#else
1167#define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG()
1168#define flush_pud_tlb_range(vma, addr, end) BUILD_BUG()
1169#endif
1170#endif
1171
1172struct file;
1173int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
1174 unsigned long size, pgprot_t *vma_prot);
1175
1176#ifndef CONFIG_X86_ESPFIX64
1177static inline void init_espfix_bsp(void) { }
1178#endif
1179
1180extern void __init pgtable_cache_init(void);
1181
1182#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
1183static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
1184{
1185 return true;
1186}
1187
1188static inline bool arch_has_pfn_modify_check(void)
1189{
1190 return false;
1191}
1192#endif
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204#ifndef PAGE_KERNEL_RO
1205# define PAGE_KERNEL_RO PAGE_KERNEL
1206#endif
1207
1208#ifndef PAGE_KERNEL_EXEC
1209# define PAGE_KERNEL_EXEC PAGE_KERNEL
1210#endif
1211
1212#endif
1213
1214#ifndef io_remap_pfn_range
1215#define io_remap_pfn_range remap_pfn_range
1216#endif
1217
1218#ifndef has_transparent_hugepage
1219#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1220#define has_transparent_hugepage() 1
1221#else
1222#define has_transparent_hugepage() 0
1223#endif
1224#endif
1225
1226
1227
1228
1229
1230#ifndef mm_p4d_folded
1231#define mm_p4d_folded(mm) __is_defined(__PAGETABLE_P4D_FOLDED)
1232#endif
1233
1234#ifndef mm_pud_folded
1235#define mm_pud_folded(mm) __is_defined(__PAGETABLE_PUD_FOLDED)
1236#endif
1237
1238#ifndef mm_pmd_folded
1239#define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED)
1240#endif
1241
1242
1243
1244
1245
1246
1247
1248
1249#ifndef pgd_leaf
1250#define pgd_leaf(x) 0
1251#endif
1252#ifndef p4d_leaf
1253#define p4d_leaf(x) 0
1254#endif
1255#ifndef pud_leaf
1256#define pud_leaf(x) 0
1257#endif
1258#ifndef pmd_leaf
1259#define pmd_leaf(x) 0
1260#endif
1261
1262#endif
1263