1
2#ifndef _ASM_GENERIC_PGTABLE_H
3#define _ASM_GENERIC_PGTABLE_H
4
5#include <linux/pfn.h>
6
7#ifndef __ASSEMBLY__
8#ifdef CONFIG_MMU
9
10#include <linux/mm_types.h>
11#include <linux/bug.h>
12#include <linux/errno.h>
13
14#if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \
15 defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS
16#error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{P4D,PUD,PMD}_FOLDED
17#endif
18
19
20
21
22
23
24
25#ifndef USER_PGTABLES_CEILING
26#define USER_PGTABLES_CEILING 0UL
27#endif
28
29#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
30extern int ptep_set_access_flags(struct vm_area_struct *vma,
31 unsigned long address, pte_t *ptep,
32 pte_t entry, int dirty);
33#endif
34
35#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
36#ifdef CONFIG_TRANSPARENT_HUGEPAGE
37extern int pmdp_set_access_flags(struct vm_area_struct *vma,
38 unsigned long address, pmd_t *pmdp,
39 pmd_t entry, int dirty);
40extern int pudp_set_access_flags(struct vm_area_struct *vma,
41 unsigned long address, pud_t *pudp,
42 pud_t entry, int dirty);
43#else
44static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
45 unsigned long address, pmd_t *pmdp,
46 pmd_t entry, int dirty)
47{
48 BUILD_BUG();
49 return 0;
50}
51static inline int pudp_set_access_flags(struct vm_area_struct *vma,
52 unsigned long address, pud_t *pudp,
53 pud_t entry, int dirty)
54{
55 BUILD_BUG();
56 return 0;
57}
58#endif
59#endif
60
61#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
62static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
63 unsigned long address,
64 pte_t *ptep)
65{
66 pte_t pte = *ptep;
67 int r = 1;
68 if (!pte_young(pte))
69 r = 0;
70 else
71 set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte));
72 return r;
73}
74#endif
75
76#ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
77#ifdef CONFIG_TRANSPARENT_HUGEPAGE
78static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
79 unsigned long address,
80 pmd_t *pmdp)
81{
82 pmd_t pmd = *pmdp;
83 int r = 1;
84 if (!pmd_young(pmd))
85 r = 0;
86 else
87 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd));
88 return r;
89}
90#else
91static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
92 unsigned long address,
93 pmd_t *pmdp)
94{
95 BUILD_BUG();
96 return 0;
97}
98#endif
99#endif
100
101#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
102int ptep_clear_flush_young(struct vm_area_struct *vma,
103 unsigned long address, pte_t *ptep);
104#endif
105
106#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
107#ifdef CONFIG_TRANSPARENT_HUGEPAGE
108extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
109 unsigned long address, pmd_t *pmdp);
110#else
111
112
113
114
115static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
116 unsigned long address, pmd_t *pmdp)
117{
118 BUILD_BUG();
119 return 0;
120}
121#endif
122#endif
123
124#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
125static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
126 unsigned long address,
127 pte_t *ptep)
128{
129 pte_t pte = *ptep;
130 pte_clear(mm, address, ptep);
131 return pte;
132}
133#endif
134
135#ifdef CONFIG_TRANSPARENT_HUGEPAGE
136#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
137static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
138 unsigned long address,
139 pmd_t *pmdp)
140{
141 pmd_t pmd = *pmdp;
142 pmd_clear(pmdp);
143 return pmd;
144}
145#endif
146#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
147static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
148 unsigned long address,
149 pud_t *pudp)
150{
151 pud_t pud = *pudp;
152
153 pud_clear(pudp);
154 return pud;
155}
156#endif
157#endif
158
159#ifdef CONFIG_TRANSPARENT_HUGEPAGE
160#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
161static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
162 unsigned long address, pmd_t *pmdp,
163 int full)
164{
165 return pmdp_huge_get_and_clear(mm, address, pmdp);
166}
167#endif
168
169#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL
170static inline pud_t pudp_huge_get_and_clear_full(struct mm_struct *mm,
171 unsigned long address, pud_t *pudp,
172 int full)
173{
174 return pudp_huge_get_and_clear(mm, address, pudp);
175}
176#endif
177#endif
178
179#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
180static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
181 unsigned long address, pte_t *ptep,
182 int full)
183{
184 pte_t pte;
185 pte = ptep_get_and_clear(mm, address, ptep);
186 return pte;
187}
188#endif
189
190
191
192
193
194
195#ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
196static inline void pte_clear_not_present_full(struct mm_struct *mm,
197 unsigned long address,
198 pte_t *ptep,
199 int full)
200{
201 pte_clear(mm, address, ptep);
202}
203#endif
204
205#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
206extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
207 unsigned long address,
208 pte_t *ptep);
209#endif
210
211#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
212extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
213 unsigned long address,
214 pmd_t *pmdp);
215extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma,
216 unsigned long address,
217 pud_t *pudp);
218#endif
219
220#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
221struct mm_struct;
222static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
223{
224 pte_t old_pte = *ptep;
225 set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
226}
227#endif
228
229#ifndef pte_savedwrite
230#define pte_savedwrite pte_write
231#endif
232
233#ifndef pte_mk_savedwrite
234#define pte_mk_savedwrite pte_mkwrite
235#endif
236
237#ifndef pte_clear_savedwrite
238#define pte_clear_savedwrite pte_wrprotect
239#endif
240
241#ifndef pmd_savedwrite
242#define pmd_savedwrite pmd_write
243#endif
244
245#ifndef pmd_mk_savedwrite
246#define pmd_mk_savedwrite pmd_mkwrite
247#endif
248
249#ifndef pmd_clear_savedwrite
250#define pmd_clear_savedwrite pmd_wrprotect
251#endif
252
253#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
254#ifdef CONFIG_TRANSPARENT_HUGEPAGE
255static inline void pmdp_set_wrprotect(struct mm_struct *mm,
256 unsigned long address, pmd_t *pmdp)
257{
258 pmd_t old_pmd = *pmdp;
259 set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd));
260}
261#else
262static inline void pmdp_set_wrprotect(struct mm_struct *mm,
263 unsigned long address, pmd_t *pmdp)
264{
265 BUILD_BUG();
266}
267#endif
268#endif
269#ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT
270#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
271static inline void pudp_set_wrprotect(struct mm_struct *mm,
272 unsigned long address, pud_t *pudp)
273{
274 pud_t old_pud = *pudp;
275
276 set_pud_at(mm, address, pudp, pud_wrprotect(old_pud));
277}
278#else
279static inline void pudp_set_wrprotect(struct mm_struct *mm,
280 unsigned long address, pud_t *pudp)
281{
282 BUILD_BUG();
283}
284#endif
285#endif
286
287#ifndef pmdp_collapse_flush
288#ifdef CONFIG_TRANSPARENT_HUGEPAGE
289extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
290 unsigned long address, pmd_t *pmdp);
291#else
292static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
293 unsigned long address,
294 pmd_t *pmdp)
295{
296 BUILD_BUG();
297 return *pmdp;
298}
299#define pmdp_collapse_flush pmdp_collapse_flush
300#endif
301#endif
302
303#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
304extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
305 pgtable_t pgtable);
306#endif
307
308#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
309extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
310#endif
311
312#ifndef __HAVE_ARCH_PMDP_INVALIDATE
313extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
314 pmd_t *pmdp);
315#endif
316
317#ifndef __HAVE_ARCH_PMDP_HUGE_SPLIT_PREPARE
318static inline void pmdp_huge_split_prepare(struct vm_area_struct *vma,
319 unsigned long address, pmd_t *pmdp)
320{
321
322}
323#endif
324
325#ifndef __HAVE_ARCH_PTE_SAME
326static inline int pte_same(pte_t pte_a, pte_t pte_b)
327{
328 return pte_val(pte_a) == pte_val(pte_b);
329}
330#endif
331
332#ifndef __HAVE_ARCH_PTE_UNUSED
333
334
335
336
337
338
339static inline int pte_unused(pte_t pte)
340{
341 return 0;
342}
343#endif
344
345#ifndef pte_access_permitted
346#define pte_access_permitted(pte, write) \
347 (pte_present(pte) && (!(write) || pte_write(pte)))
348#endif
349
350#ifndef pmd_access_permitted
351#define pmd_access_permitted(pmd, write) \
352 (pmd_present(pmd) && (!(write) || pmd_write(pmd)))
353#endif
354
355#ifndef pud_access_permitted
356#define pud_access_permitted(pud, write) \
357 (pud_present(pud) && (!(write) || pud_write(pud)))
358#endif
359
360#ifndef p4d_access_permitted
361#define p4d_access_permitted(p4d, write) \
362 (p4d_present(p4d) && (!(write) || p4d_write(p4d)))
363#endif
364
365#ifndef pgd_access_permitted
366#define pgd_access_permitted(pgd, write) \
367 (pgd_present(pgd) && (!(write) || pgd_write(pgd)))
368#endif
369
370#ifndef __HAVE_ARCH_PMD_SAME
371#ifdef CONFIG_TRANSPARENT_HUGEPAGE
372static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
373{
374 return pmd_val(pmd_a) == pmd_val(pmd_b);
375}
376
377static inline int pud_same(pud_t pud_a, pud_t pud_b)
378{
379 return pud_val(pud_a) == pud_val(pud_b);
380}
381#else
382static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
383{
384 BUILD_BUG();
385 return 0;
386}
387
388static inline int pud_same(pud_t pud_a, pud_t pud_b)
389{
390 BUILD_BUG();
391 return 0;
392}
393#endif
394#endif
395
396#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
397#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
398#endif
399
400#ifndef __HAVE_ARCH_MOVE_PTE
401#define move_pte(pte, prot, old_addr, new_addr) (pte)
402#endif
403
404#ifndef pte_accessible
405# define pte_accessible(mm, pte) ((void)(pte), 1)
406#endif
407
408#ifndef flush_tlb_fix_spurious_fault
409#define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address)
410#endif
411
412#ifndef pgprot_noncached
413#define pgprot_noncached(prot) (prot)
414#endif
415
416#ifndef pgprot_writecombine
417#define pgprot_writecombine pgprot_noncached
418#endif
419
420#ifndef pgprot_writethrough
421#define pgprot_writethrough pgprot_noncached
422#endif
423
424#ifndef pgprot_device
425#define pgprot_device pgprot_noncached
426#endif
427
428#ifndef pgprot_modify
429#define pgprot_modify pgprot_modify
430static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
431{
432 if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot)))
433 newprot = pgprot_noncached(newprot);
434 if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot)))
435 newprot = pgprot_writecombine(newprot);
436 if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot)))
437 newprot = pgprot_device(newprot);
438 return newprot;
439}
440#endif
441
442
443
444
445
446
447
448#define pgd_addr_end(addr, end) \
449({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
450 (__boundary - 1 < (end) - 1)? __boundary: (end); \
451})
452
453#ifndef p4d_addr_end
454#define p4d_addr_end(addr, end) \
455({ unsigned long __boundary = ((addr) + P4D_SIZE) & P4D_MASK; \
456 (__boundary - 1 < (end) - 1)? __boundary: (end); \
457})
458#endif
459
460#ifndef pud_addr_end
461#define pud_addr_end(addr, end) \
462({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \
463 (__boundary - 1 < (end) - 1)? __boundary: (end); \
464})
465#endif
466
467#ifndef pmd_addr_end
468#define pmd_addr_end(addr, end) \
469({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
470 (__boundary - 1 < (end) - 1)? __boundary: (end); \
471})
472#endif
473
474
475
476
477
478
479void pgd_clear_bad(pgd_t *);
480void p4d_clear_bad(p4d_t *);
481void pud_clear_bad(pud_t *);
482void pmd_clear_bad(pmd_t *);
483
484static inline int pgd_none_or_clear_bad(pgd_t *pgd)
485{
486 if (pgd_none(*pgd))
487 return 1;
488 if (unlikely(pgd_bad(*pgd))) {
489 pgd_clear_bad(pgd);
490 return 1;
491 }
492 return 0;
493}
494
495static inline int p4d_none_or_clear_bad(p4d_t *p4d)
496{
497 if (p4d_none(*p4d))
498 return 1;
499 if (unlikely(p4d_bad(*p4d))) {
500 p4d_clear_bad(p4d);
501 return 1;
502 }
503 return 0;
504}
505
506static inline int pud_none_or_clear_bad(pud_t *pud)
507{
508 if (pud_none(*pud))
509 return 1;
510 if (unlikely(pud_bad(*pud))) {
511 pud_clear_bad(pud);
512 return 1;
513 }
514 return 0;
515}
516
517static inline int pmd_none_or_clear_bad(pmd_t *pmd)
518{
519 if (pmd_none(*pmd))
520 return 1;
521 if (unlikely(pmd_bad(*pmd))) {
522 pmd_clear_bad(pmd);
523 return 1;
524 }
525 return 0;
526}
527
528static inline pte_t __ptep_modify_prot_start(struct mm_struct *mm,
529 unsigned long addr,
530 pte_t *ptep)
531{
532
533
534
535
536
537 return ptep_get_and_clear(mm, addr, ptep);
538}
539
540static inline void __ptep_modify_prot_commit(struct mm_struct *mm,
541 unsigned long addr,
542 pte_t *ptep, pte_t pte)
543{
544
545
546
547
548 set_pte_at(mm, addr, ptep, pte);
549}
550
551#ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
567 unsigned long addr,
568 pte_t *ptep)
569{
570 return __ptep_modify_prot_start(mm, addr, ptep);
571}
572
573
574
575
576
577static inline void ptep_modify_prot_commit(struct mm_struct *mm,
578 unsigned long addr,
579 pte_t *ptep, pte_t pte)
580{
581 __ptep_modify_prot_commit(mm, addr, ptep, pte);
582}
583#endif
584#endif
585
586
587
588
589
590#ifndef pgprot_encrypted
591#define pgprot_encrypted(prot) (prot)
592#endif
593
594#ifndef pgprot_decrypted
595#define pgprot_decrypted(prot) (prot)
596#endif
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
614#define arch_enter_lazy_mmu_mode() do {} while (0)
615#define arch_leave_lazy_mmu_mode() do {} while (0)
616#define arch_flush_lazy_mmu_mode() do {} while (0)
617#endif
618
619
620
621
622
623
624
625
626
627
628
629
630#ifndef __HAVE_ARCH_START_CONTEXT_SWITCH
631#define arch_start_context_switch(prev) do {} while (0)
632#endif
633
634#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
635#ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION
636static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
637{
638 return pmd;
639}
640
641static inline int pmd_swp_soft_dirty(pmd_t pmd)
642{
643 return 0;
644}
645
646static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
647{
648 return pmd;
649}
650#endif
651#else
652static inline int pte_soft_dirty(pte_t pte)
653{
654 return 0;
655}
656
657static inline int pmd_soft_dirty(pmd_t pmd)
658{
659 return 0;
660}
661
662static inline pte_t pte_mksoft_dirty(pte_t pte)
663{
664 return pte;
665}
666
667static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
668{
669 return pmd;
670}
671
672static inline pte_t pte_clear_soft_dirty(pte_t pte)
673{
674 return pte;
675}
676
677static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
678{
679 return pmd;
680}
681
682static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
683{
684 return pte;
685}
686
687static inline int pte_swp_soft_dirty(pte_t pte)
688{
689 return 0;
690}
691
692static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
693{
694 return pte;
695}
696
697static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
698{
699 return pmd;
700}
701
702static inline int pmd_swp_soft_dirty(pmd_t pmd)
703{
704 return 0;
705}
706
707static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
708{
709 return pmd;
710}
711#endif
712
713#ifndef __HAVE_PFNMAP_TRACKING
714
715
716
717
718
719
720
721
722
723
724static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
725 unsigned long pfn, unsigned long addr,
726 unsigned long size)
727{
728 return 0;
729}
730
731
732
733
734
735static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
736 pfn_t pfn)
737{
738}
739
740
741
742
743
744static inline int track_pfn_copy(struct vm_area_struct *vma)
745{
746 return 0;
747}
748
749
750
751
752
753
754static inline void untrack_pfn(struct vm_area_struct *vma,
755 unsigned long pfn, unsigned long size)
756{
757}
758
759
760
761
762static inline void untrack_pfn_moved(struct vm_area_struct *vma)
763{
764}
765#else
766extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
767 unsigned long pfn, unsigned long addr,
768 unsigned long size);
769extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
770 pfn_t pfn);
771extern int track_pfn_copy(struct vm_area_struct *vma);
772extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
773 unsigned long size);
774extern void untrack_pfn_moved(struct vm_area_struct *vma);
775#endif
776
777#ifdef __HAVE_COLOR_ZERO_PAGE
778static inline int is_zero_pfn(unsigned long pfn)
779{
780 extern unsigned long zero_pfn;
781 unsigned long offset_from_zero_pfn = pfn - zero_pfn;
782 return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
783}
784
785#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
786
787#else
788static inline int is_zero_pfn(unsigned long pfn)
789{
790 extern unsigned long zero_pfn;
791 return pfn == zero_pfn;
792}
793
794static inline unsigned long my_zero_pfn(unsigned long addr)
795{
796 extern unsigned long zero_pfn;
797 return zero_pfn;
798}
799#endif
800
801#ifdef CONFIG_MMU
802
803#ifndef CONFIG_TRANSPARENT_HUGEPAGE
804static inline int pmd_trans_huge(pmd_t pmd)
805{
806 return 0;
807}
808#ifndef __HAVE_ARCH_PMD_WRITE
809static inline int pmd_write(pmd_t pmd)
810{
811 BUG();
812 return 0;
813}
814#endif
815#endif
816
817#if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
818 (defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
819 !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
820static inline int pud_trans_huge(pud_t pud)
821{
822 return 0;
823}
824#endif
825
826#ifndef pmd_read_atomic
827static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
828{
829
830
831
832
833
834 return *pmdp;
835}
836#endif
837
838#ifndef arch_needs_pgtable_deposit
839#define arch_needs_pgtable_deposit() (false)
840#endif
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
863{
864 pmd_t pmdval = pmd_read_atomic(pmd);
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879#ifdef CONFIG_TRANSPARENT_HUGEPAGE
880 barrier();
881#endif
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897 if (pmd_none(pmdval) || pmd_trans_huge(pmdval) ||
898 (IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION) && !pmd_present(pmdval)))
899 return 1;
900 if (unlikely(pmd_bad(pmdval))) {
901 pmd_clear_bad(pmd);
902 return 1;
903 }
904 return 0;
905}
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920static inline int pmd_trans_unstable(pmd_t *pmd)
921{
922#ifdef CONFIG_TRANSPARENT_HUGEPAGE
923 return pmd_none_or_trans_huge_or_clear_bad(pmd);
924#else
925 return 0;
926#endif
927}
928
929#ifndef CONFIG_NUMA_BALANCING
930
931
932
933
934
935
936
937
938static inline int pte_protnone(pte_t pte)
939{
940 return 0;
941}
942
943static inline int pmd_protnone(pmd_t pmd)
944{
945 return 0;
946}
947#endif
948
949#endif
950
951#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
952
953#ifndef __PAGETABLE_P4D_FOLDED
954int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot);
955int p4d_clear_huge(p4d_t *p4d);
956#else
957static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
958{
959 return 0;
960}
961static inline int p4d_clear_huge(p4d_t *p4d)
962{
963 return 0;
964}
965#endif
966
967int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
968int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
969int pud_clear_huge(pud_t *pud);
970int pmd_clear_huge(pmd_t *pmd);
971#else
972static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
973{
974 return 0;
975}
976static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
977{
978 return 0;
979}
980static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
981{
982 return 0;
983}
984static inline int p4d_clear_huge(p4d_t *p4d)
985{
986 return 0;
987}
988static inline int pud_clear_huge(pud_t *pud)
989{
990 return 0;
991}
992static inline int pmd_clear_huge(pmd_t *pmd)
993{
994 return 0;
995}
996#endif
997
998#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
999#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
1010#define flush_pud_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
1011#else
1012#define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG()
1013#define flush_pud_tlb_range(vma, addr, end) BUILD_BUG()
1014#endif
1015#endif
1016
1017struct file;
1018int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
1019 unsigned long size, pgprot_t *vma_prot);
1020#endif
1021
1022#ifndef io_remap_pfn_range
1023#define io_remap_pfn_range remap_pfn_range
1024#endif
1025
1026#ifndef has_transparent_hugepage
1027#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1028#define has_transparent_hugepage() 1
1029#else
1030#define has_transparent_hugepage() 0
1031#endif
1032#endif
1033
1034#endif
1035