1
2#ifndef _LINUX_MM_H
3#define _LINUX_MM_H
4
5#include <linux/errno.h>
6
7#ifdef __KERNEL__
8
9#include <linux/mmdebug.h>
10#include <linux/gfp.h>
11#include <linux/bug.h>
12#include <linux/list.h>
13#include <linux/mmzone.h>
14#include <linux/rbtree.h>
15#include <linux/atomic.h>
16#include <linux/debug_locks.h>
17#include <linux/mm_types.h>
18#include <linux/mmap_lock.h>
19#include <linux/range.h>
20#include <linux/pfn.h>
21#include <linux/percpu-refcount.h>
22#include <linux/bit_spinlock.h>
23#include <linux/shrinker.h>
24#include <linux/resource.h>
25#include <linux/page_ext.h>
26#include <linux/err.h>
27#include <linux/page-flags.h>
28#include <linux/page_ref.h>
29#include <linux/memremap.h>
30#include <linux/overflow.h>
31#include <linux/sizes.h>
32#include <linux/sched.h>
33#include <linux/pgtable.h>
34
35struct mempolicy;
36struct anon_vma;
37struct anon_vma_chain;
38struct file_ra_state;
39struct user_struct;
40struct writeback_control;
41struct bdi_writeback;
42struct pt_regs;
43
44extern int sysctl_page_lock_unfairness;
45
46void init_mm_internals(void);
47
48#ifndef CONFIG_NEED_MULTIPLE_NODES
49extern unsigned long max_mapnr;
50
51static inline void set_max_mapnr(unsigned long limit)
52{
53 max_mapnr = limit;
54}
55#else
56static inline void set_max_mapnr(unsigned long limit) { }
57#endif
58
59extern atomic_long_t _totalram_pages;
60static inline unsigned long totalram_pages(void)
61{
62 return (unsigned long)atomic_long_read(&_totalram_pages);
63}
64
65static inline void totalram_pages_inc(void)
66{
67 atomic_long_inc(&_totalram_pages);
68}
69
70static inline void totalram_pages_dec(void)
71{
72 atomic_long_dec(&_totalram_pages);
73}
74
75static inline void totalram_pages_add(long count)
76{
77 atomic_long_add(count, &_totalram_pages);
78}
79
80extern void * high_memory;
81extern int page_cluster;
82
83#ifdef CONFIG_SYSCTL
84extern int sysctl_legacy_va_layout;
85#else
86#define sysctl_legacy_va_layout 0
87#endif
88
89#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
90extern const int mmap_rnd_bits_min;
91extern const int mmap_rnd_bits_max;
92extern int mmap_rnd_bits __read_mostly;
93#endif
94#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
95extern const int mmap_rnd_compat_bits_min;
96extern const int mmap_rnd_compat_bits_max;
97extern int mmap_rnd_compat_bits __read_mostly;
98#endif
99
100#include <asm/page.h>
101#include <asm/processor.h>
102
103
104
105
106
107
108
109
110#ifndef untagged_addr
111#define untagged_addr(addr) (addr)
112#endif
113
114#ifndef __pa_symbol
115#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
116#endif
117
118#ifndef page_to_virt
119#define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x)))
120#endif
121
122#ifndef lm_alias
123#define lm_alias(x) __va(__pa_symbol(x))
124#endif
125
126
127
128
129
130
131
132
133#ifndef mm_forbids_zeropage
134#define mm_forbids_zeropage(X) (0)
135#endif
136
137
138
139
140
141
142
143#if BITS_PER_LONG == 64
144
145
146
147
148
149
150#define mm_zero_struct_page(pp) __mm_zero_struct_page(pp)
151static inline void __mm_zero_struct_page(struct page *page)
152{
153 unsigned long *_pp = (void *)page;
154
155
156 BUILD_BUG_ON(sizeof(struct page) & 7);
157 BUILD_BUG_ON(sizeof(struct page) < 56);
158 BUILD_BUG_ON(sizeof(struct page) > 80);
159
160 switch (sizeof(struct page)) {
161 case 80:
162 _pp[9] = 0;
163 fallthrough;
164 case 72:
165 _pp[8] = 0;
166 fallthrough;
167 case 64:
168 _pp[7] = 0;
169 fallthrough;
170 case 56:
171 _pp[6] = 0;
172 _pp[5] = 0;
173 _pp[4] = 0;
174 _pp[3] = 0;
175 _pp[2] = 0;
176 _pp[1] = 0;
177 _pp[0] = 0;
178 }
179}
180#else
181#define mm_zero_struct_page(pp) ((void)memset((pp), 0, sizeof(struct page)))
182#endif
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200#define MAPCOUNT_ELF_CORE_MARGIN (5)
201#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
202
203extern int sysctl_max_map_count;
204
205extern unsigned long sysctl_user_reserve_kbytes;
206extern unsigned long sysctl_admin_reserve_kbytes;
207
208extern int sysctl_overcommit_memory;
209extern int sysctl_overcommit_ratio;
210extern unsigned long sysctl_overcommit_kbytes;
211
212int overcommit_ratio_handler(struct ctl_table *, int, void *, size_t *,
213 loff_t *);
214int overcommit_kbytes_handler(struct ctl_table *, int, void *, size_t *,
215 loff_t *);
216int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *,
217 loff_t *);
218
219#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
220
221
222#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
223
224
225#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
226
227#define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
228
229
230
231
232
233
234
235
236
237
238struct vm_area_struct *vm_area_alloc(struct mm_struct *);
239struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
240void vm_area_free(struct vm_area_struct *);
241
242#ifndef CONFIG_MMU
243extern struct rb_root nommu_region_tree;
244extern struct rw_semaphore nommu_region_sem;
245
246extern unsigned int kobjsize(const void *objp);
247#endif
248
249
250
251
252
253#define VM_NONE 0x00000000
254
255#define VM_READ 0x00000001
256#define VM_WRITE 0x00000002
257#define VM_EXEC 0x00000004
258#define VM_SHARED 0x00000008
259
260
261#define VM_MAYREAD 0x00000010
262#define VM_MAYWRITE 0x00000020
263#define VM_MAYEXEC 0x00000040
264#define VM_MAYSHARE 0x00000080
265
266#define VM_GROWSDOWN 0x00000100
267#define VM_UFFD_MISSING 0x00000200
268#define VM_PFNMAP 0x00000400
269#define VM_DENYWRITE 0x00000800
270#define VM_UFFD_WP 0x00001000
271
272#define VM_LOCKED 0x00002000
273#define VM_IO 0x00004000
274
275
276#define VM_SEQ_READ 0x00008000
277#define VM_RAND_READ 0x00010000
278
279#define VM_DONTCOPY 0x00020000
280#define VM_DONTEXPAND 0x00040000
281#define VM_LOCKONFAULT 0x00080000
282#define VM_ACCOUNT 0x00100000
283#define VM_NORESERVE 0x00200000
284#define VM_HUGETLB 0x00400000
285#define VM_SYNC 0x00800000
286#define VM_ARCH_1 0x01000000
287#define VM_WIPEONFORK 0x02000000
288#define VM_DONTDUMP 0x04000000
289
290#ifdef CONFIG_MEM_SOFT_DIRTY
291# define VM_SOFTDIRTY 0x08000000
292#else
293# define VM_SOFTDIRTY 0
294#endif
295
296#define VM_MIXEDMAP 0x10000000
297#define VM_HUGEPAGE 0x20000000
298#define VM_NOHUGEPAGE 0x40000000
299#define VM_MERGEABLE 0x80000000
300
301#ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS
302#define VM_HIGH_ARCH_BIT_0 32
303#define VM_HIGH_ARCH_BIT_1 33
304#define VM_HIGH_ARCH_BIT_2 34
305#define VM_HIGH_ARCH_BIT_3 35
306#define VM_HIGH_ARCH_BIT_4 36
307#define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0)
308#define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1)
309#define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2)
310#define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3)
311#define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4)
312#endif
313
314#ifdef CONFIG_ARCH_HAS_PKEYS
315# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0
316# define VM_PKEY_BIT0 VM_HIGH_ARCH_0
317# define VM_PKEY_BIT1 VM_HIGH_ARCH_1
318# define VM_PKEY_BIT2 VM_HIGH_ARCH_2
319# define VM_PKEY_BIT3 VM_HIGH_ARCH_3
320#ifdef CONFIG_PPC
321# define VM_PKEY_BIT4 VM_HIGH_ARCH_4
322#else
323# define VM_PKEY_BIT4 0
324#endif
325#endif
326
327#if defined(CONFIG_X86)
328# define VM_PAT VM_ARCH_1
329#elif defined(CONFIG_PPC)
330# define VM_SAO VM_ARCH_1
331#elif defined(CONFIG_PARISC)
332# define VM_GROWSUP VM_ARCH_1
333#elif defined(CONFIG_IA64)
334# define VM_GROWSUP VM_ARCH_1
335#elif defined(CONFIG_SPARC64)
336# define VM_SPARC_ADI VM_ARCH_1
337# define VM_ARCH_CLEAR VM_SPARC_ADI
338#elif defined(CONFIG_ARM64)
339# define VM_ARM64_BTI VM_ARCH_1
340# define VM_ARCH_CLEAR VM_ARM64_BTI
341#elif !defined(CONFIG_MMU)
342# define VM_MAPPED_COPY VM_ARCH_1
343#endif
344
345#if defined(CONFIG_ARM64_MTE)
346# define VM_MTE VM_HIGH_ARCH_0
347# define VM_MTE_ALLOWED VM_HIGH_ARCH_1
348#else
349# define VM_MTE VM_NONE
350# define VM_MTE_ALLOWED VM_NONE
351#endif
352
353#ifndef VM_GROWSUP
354# define VM_GROWSUP VM_NONE
355#endif
356
357
358#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ)
359
360#define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)
361
362
363#define VM_DATA_FLAGS_TSK_EXEC (VM_READ | VM_WRITE | TASK_EXEC | \
364 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
365#define VM_DATA_FLAGS_NON_EXEC (VM_READ | VM_WRITE | VM_MAYREAD | \
366 VM_MAYWRITE | VM_MAYEXEC)
367#define VM_DATA_FLAGS_EXEC (VM_READ | VM_WRITE | VM_EXEC | \
368 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
369
370#ifndef VM_DATA_DEFAULT_FLAGS
371#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_EXEC
372#endif
373
374#ifndef VM_STACK_DEFAULT_FLAGS
375#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
376#endif
377
378#ifdef CONFIG_STACK_GROWSUP
379#define VM_STACK VM_GROWSUP
380#else
381#define VM_STACK VM_GROWSDOWN
382#endif
383
384#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
385
386
387#define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
388
389
390
391
392
393#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
394
395
396#define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
397
398
399#define VM_INIT_DEF_MASK VM_NOHUGEPAGE
400
401
402#define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT))
403
404
405#ifndef VM_ARCH_CLEAR
406# define VM_ARCH_CLEAR VM_NONE
407#endif
408#define VM_FLAGS_CLEAR (ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR)
409
410
411
412
413
414extern pgprot_t protection_map[16];
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449#define FAULT_FLAG_WRITE 0x01
450#define FAULT_FLAG_MKWRITE 0x02
451#define FAULT_FLAG_ALLOW_RETRY 0x04
452#define FAULT_FLAG_RETRY_NOWAIT 0x08
453#define FAULT_FLAG_KILLABLE 0x10
454#define FAULT_FLAG_TRIED 0x20
455#define FAULT_FLAG_USER 0x40
456#define FAULT_FLAG_REMOTE 0x80
457#define FAULT_FLAG_INSTRUCTION 0x100
458#define FAULT_FLAG_INTERRUPTIBLE 0x200
459
460
461
462
463
464#define FAULT_FLAG_DEFAULT (FAULT_FLAG_ALLOW_RETRY | \
465 FAULT_FLAG_KILLABLE | \
466 FAULT_FLAG_INTERRUPTIBLE)
467
468
469
470
471
472
473
474
475
476
477
478
479
480static inline bool fault_flag_allow_retry_first(unsigned int flags)
481{
482 return (flags & FAULT_FLAG_ALLOW_RETRY) &&
483 (!(flags & FAULT_FLAG_TRIED));
484}
485
486#define FAULT_FLAG_TRACE \
487 { FAULT_FLAG_WRITE, "WRITE" }, \
488 { FAULT_FLAG_MKWRITE, "MKWRITE" }, \
489 { FAULT_FLAG_ALLOW_RETRY, "ALLOW_RETRY" }, \
490 { FAULT_FLAG_RETRY_NOWAIT, "RETRY_NOWAIT" }, \
491 { FAULT_FLAG_KILLABLE, "KILLABLE" }, \
492 { FAULT_FLAG_TRIED, "TRIED" }, \
493 { FAULT_FLAG_USER, "USER" }, \
494 { FAULT_FLAG_REMOTE, "REMOTE" }, \
495 { FAULT_FLAG_INSTRUCTION, "INSTRUCTION" }, \
496 { FAULT_FLAG_INTERRUPTIBLE, "INTERRUPTIBLE" }
497
498
499
500
501
502
503
504
505
506
507
508struct vm_fault {
509 struct vm_area_struct *vma;
510 unsigned int flags;
511 gfp_t gfp_mask;
512 pgoff_t pgoff;
513 unsigned long address;
514 pmd_t *pmd;
515
516 pud_t *pud;
517
518
519 pte_t orig_pte;
520
521 struct page *cow_page;
522 struct page *page;
523
524
525
526
527
528 pte_t *pte;
529
530
531
532 spinlock_t *ptl;
533
534
535
536 pgtable_t prealloc_pte;
537
538
539
540
541
542
543};
544
545
546enum page_entry_size {
547 PE_SIZE_PTE = 0,
548 PE_SIZE_PMD,
549 PE_SIZE_PUD,
550};
551
552
553
554
555
556
557struct vm_operations_struct {
558 void (*open)(struct vm_area_struct * area);
559 void (*close)(struct vm_area_struct * area);
560 int (*split)(struct vm_area_struct * area, unsigned long addr);
561 int (*mremap)(struct vm_area_struct * area);
562 vm_fault_t (*fault)(struct vm_fault *vmf);
563 vm_fault_t (*huge_fault)(struct vm_fault *vmf,
564 enum page_entry_size pe_size);
565 void (*map_pages)(struct vm_fault *vmf,
566 pgoff_t start_pgoff, pgoff_t end_pgoff);
567 unsigned long (*pagesize)(struct vm_area_struct * area);
568
569
570
571 vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
572
573
574 vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
575
576
577
578
579 int (*access)(struct vm_area_struct *vma, unsigned long addr,
580 void *buf, int len, int write);
581
582
583
584
585 const char *(*name)(struct vm_area_struct *vma);
586
587#ifdef CONFIG_NUMA
588
589
590
591
592
593
594
595 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
596
597
598
599
600
601
602
603
604
605
606
607 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
608 unsigned long addr);
609#endif
610
611
612
613
614
615 struct page *(*find_special_page)(struct vm_area_struct *vma,
616 unsigned long addr);
617};
618
619static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
620{
621 static const struct vm_operations_struct dummy_vm_ops = {};
622
623 memset(vma, 0, sizeof(*vma));
624 vma->vm_mm = mm;
625 vma->vm_ops = &dummy_vm_ops;
626 INIT_LIST_HEAD(&vma->anon_vma_chain);
627}
628
629static inline void vma_set_anonymous(struct vm_area_struct *vma)
630{
631 vma->vm_ops = NULL;
632}
633
634static inline bool vma_is_anonymous(struct vm_area_struct *vma)
635{
636 return !vma->vm_ops;
637}
638
639static inline bool vma_is_temporary_stack(struct vm_area_struct *vma)
640{
641 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
642
643 if (!maybe_stack)
644 return false;
645
646 if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
647 VM_STACK_INCOMPLETE_SETUP)
648 return true;
649
650 return false;
651}
652
653static inline bool vma_is_foreign(struct vm_area_struct *vma)
654{
655 if (!current->mm)
656 return true;
657
658 if (current->mm != vma->vm_mm)
659 return true;
660
661 return false;
662}
663
664static inline bool vma_is_accessible(struct vm_area_struct *vma)
665{
666 return vma->vm_flags & VM_ACCESS_FLAGS;
667}
668
669#ifdef CONFIG_SHMEM
670
671
672
673
674bool vma_is_shmem(struct vm_area_struct *vma);
675#else
676static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
677#endif
678
679int vma_is_stack_for_current(struct vm_area_struct *vma);
680
681
682#define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
683
684struct mmu_gather;
685struct inode;
686
687#include <linux/huge_mm.h>
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705static inline int put_page_testzero(struct page *page)
706{
707 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
708 return page_ref_dec_and_test(page);
709}
710
711
712
713
714
715
716
717static inline int get_page_unless_zero(struct page *page)
718{
719 return page_ref_add_unless(page, 1, 0);
720}
721
722extern int page_is_ram(unsigned long pfn);
723
724enum {
725 REGION_INTERSECTS,
726 REGION_DISJOINT,
727 REGION_MIXED,
728};
729
730int region_intersects(resource_size_t offset, size_t size, unsigned long flags,
731 unsigned long desc);
732
733
734struct page *vmalloc_to_page(const void *addr);
735unsigned long vmalloc_to_pfn(const void *addr);
736
737
738
739
740
741
742
743
744#ifndef is_ioremap_addr
745#define is_ioremap_addr(x) is_vmalloc_addr(x)
746#endif
747
748#ifdef CONFIG_MMU
749extern bool is_vmalloc_addr(const void *x);
750extern int is_vmalloc_or_module_addr(const void *x);
751#else
752static inline bool is_vmalloc_addr(const void *x)
753{
754 return false;
755}
756static inline int is_vmalloc_or_module_addr(const void *x)
757{
758 return 0;
759}
760#endif
761
762extern void *kvmalloc_node(size_t size, gfp_t flags, int node);
763static inline void *kvmalloc(size_t size, gfp_t flags)
764{
765 return kvmalloc_node(size, flags, NUMA_NO_NODE);
766}
767static inline void *kvzalloc_node(size_t size, gfp_t flags, int node)
768{
769 return kvmalloc_node(size, flags | __GFP_ZERO, node);
770}
771static inline void *kvzalloc(size_t size, gfp_t flags)
772{
773 return kvmalloc(size, flags | __GFP_ZERO);
774}
775
776static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
777{
778 size_t bytes;
779
780 if (unlikely(check_mul_overflow(n, size, &bytes)))
781 return NULL;
782
783 return kvmalloc(bytes, flags);
784}
785
786static inline void *kvcalloc(size_t n, size_t size, gfp_t flags)
787{
788 return kvmalloc_array(n, size, flags | __GFP_ZERO);
789}
790
791extern void kvfree(const void *addr);
792extern void kvfree_sensitive(const void *addr, size_t len);
793
794static inline int head_compound_mapcount(struct page *head)
795{
796 return atomic_read(compound_mapcount_ptr(head)) + 1;
797}
798
799
800
801
802
803
804static inline int compound_mapcount(struct page *page)
805{
806 VM_BUG_ON_PAGE(!PageCompound(page), page);
807 page = compound_head(page);
808 return head_compound_mapcount(page);
809}
810
811
812
813
814
815
816static inline void page_mapcount_reset(struct page *page)
817{
818 atomic_set(&(page)->_mapcount, -1);
819}
820
821int __page_mapcount(struct page *page);
822
823
824
825
826
827
828
829
830
831static inline int page_mapcount(struct page *page)
832{
833 if (unlikely(PageCompound(page)))
834 return __page_mapcount(page);
835 return atomic_read(&page->_mapcount) + 1;
836}
837
838#ifdef CONFIG_TRANSPARENT_HUGEPAGE
839int total_mapcount(struct page *page);
840int page_trans_huge_mapcount(struct page *page, int *total_mapcount);
841#else
842static inline int total_mapcount(struct page *page)
843{
844 return page_mapcount(page);
845}
846static inline int page_trans_huge_mapcount(struct page *page,
847 int *total_mapcount)
848{
849 int mapcount = page_mapcount(page);
850 if (total_mapcount)
851 *total_mapcount = mapcount;
852 return mapcount;
853}
854#endif
855
856static inline struct page *virt_to_head_page(const void *x)
857{
858 struct page *page = virt_to_page(x);
859
860 return compound_head(page);
861}
862
863void __put_page(struct page *page);
864
865void put_pages_list(struct list_head *pages);
866
867void split_page(struct page *page, unsigned int order);
868
869
870
871
872
873
874typedef void compound_page_dtor(struct page *);
875
876
877enum compound_dtor_id {
878 NULL_COMPOUND_DTOR,
879 COMPOUND_PAGE_DTOR,
880#ifdef CONFIG_HUGETLB_PAGE
881 HUGETLB_PAGE_DTOR,
882#endif
883#ifdef CONFIG_TRANSPARENT_HUGEPAGE
884 TRANSHUGE_PAGE_DTOR,
885#endif
886 NR_COMPOUND_DTORS,
887};
888extern compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS];
889
890static inline void set_compound_page_dtor(struct page *page,
891 enum compound_dtor_id compound_dtor)
892{
893 VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page);
894 page[1].compound_dtor = compound_dtor;
895}
896
897static inline void destroy_compound_page(struct page *page)
898{
899 VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page);
900 compound_page_dtors[page[1].compound_dtor](page);
901}
902
903static inline unsigned int compound_order(struct page *page)
904{
905 if (!PageHead(page))
906 return 0;
907 return page[1].compound_order;
908}
909
910static inline bool hpage_pincount_available(struct page *page)
911{
912
913
914
915
916
917 page = compound_head(page);
918 return PageCompound(page) && compound_order(page) > 1;
919}
920
921static inline int head_compound_pincount(struct page *head)
922{
923 return atomic_read(compound_pincount_ptr(head));
924}
925
926static inline int compound_pincount(struct page *page)
927{
928 VM_BUG_ON_PAGE(!hpage_pincount_available(page), page);
929 page = compound_head(page);
930 return head_compound_pincount(page);
931}
932
933static inline void set_compound_order(struct page *page, unsigned int order)
934{
935 page[1].compound_order = order;
936 page[1].compound_nr = 1U << order;
937}
938
939
940static inline unsigned long compound_nr(struct page *page)
941{
942 if (!PageHead(page))
943 return 1;
944 return page[1].compound_nr;
945}
946
947
948static inline unsigned long page_size(struct page *page)
949{
950 return PAGE_SIZE << compound_order(page);
951}
952
953
954static inline unsigned int page_shift(struct page *page)
955{
956 return PAGE_SHIFT + compound_order(page);
957}
958
959void free_compound_page(struct page *page);
960
961#ifdef CONFIG_MMU
962
963
964
965
966
967
968static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
969{
970 if (likely(vma->vm_flags & VM_WRITE))
971 pte = pte_mkwrite(pte);
972 return pte;
973}
974
975vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct page *page);
976vm_fault_t finish_fault(struct vm_fault *vmf);
977vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
978#endif
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
1047#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
1048#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
1049#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
1050#define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH)
1051
1052
1053
1054
1055
1056
1057#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
1058#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
1059#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
1060#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
1061#define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0))
1062
1063
1064#ifdef NODE_NOT_IN_PAGE_FLAGS
1065#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
1066#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \
1067 SECTIONS_PGOFF : ZONES_PGOFF)
1068#else
1069#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
1070#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \
1071 NODES_PGOFF : ZONES_PGOFF)
1072#endif
1073
1074#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
1075
1076#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
1077#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
1078#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
1079#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
1080#define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1)
1081#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
1082
1083static inline enum zone_type page_zonenum(const struct page *page)
1084{
1085 ASSERT_EXCLUSIVE_BITS(page->flags, ZONES_MASK << ZONES_PGSHIFT);
1086 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
1087}
1088
1089#ifdef CONFIG_ZONE_DEVICE
1090static inline bool is_zone_device_page(const struct page *page)
1091{
1092 return page_zonenum(page) == ZONE_DEVICE;
1093}
1094extern void memmap_init_zone_device(struct zone *, unsigned long,
1095 unsigned long, struct dev_pagemap *);
1096#else
1097static inline bool is_zone_device_page(const struct page *page)
1098{
1099 return false;
1100}
1101#endif
1102
1103#ifdef CONFIG_DEV_PAGEMAP_OPS
1104void free_devmap_managed_page(struct page *page);
1105DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
1106
1107static inline bool page_is_devmap_managed(struct page *page)
1108{
1109 if (!static_branch_unlikely(&devmap_managed_key))
1110 return false;
1111 if (!is_zone_device_page(page))
1112 return false;
1113 switch (page->pgmap->type) {
1114 case MEMORY_DEVICE_PRIVATE:
1115 case MEMORY_DEVICE_FS_DAX:
1116 return true;
1117 default:
1118 break;
1119 }
1120 return false;
1121}
1122
1123void put_devmap_managed_page(struct page *page);
1124
1125#else
1126static inline bool page_is_devmap_managed(struct page *page)
1127{
1128 return false;
1129}
1130
1131static inline void put_devmap_managed_page(struct page *page)
1132{
1133}
1134#endif
1135
1136static inline bool is_device_private_page(const struct page *page)
1137{
1138 return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) &&
1139 IS_ENABLED(CONFIG_DEVICE_PRIVATE) &&
1140 is_zone_device_page(page) &&
1141 page->pgmap->type == MEMORY_DEVICE_PRIVATE;
1142}
1143
1144static inline bool is_pci_p2pdma_page(const struct page *page)
1145{
1146 return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) &&
1147 IS_ENABLED(CONFIG_PCI_P2PDMA) &&
1148 is_zone_device_page(page) &&
1149 page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA;
1150}
1151
1152
1153#define page_ref_zero_or_close_to_overflow(page) \
1154 ((unsigned int) page_ref_count(page) + 127u <= 127u)
1155
1156static inline void get_page(struct page *page)
1157{
1158 page = compound_head(page);
1159
1160
1161
1162
1163 VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(page), page);
1164 page_ref_inc(page);
1165}
1166
1167bool __must_check try_grab_page(struct page *page, unsigned int flags);
1168
1169static inline __must_check bool try_get_page(struct page *page)
1170{
1171 page = compound_head(page);
1172 if (WARN_ON_ONCE(page_ref_count(page) <= 0))
1173 return false;
1174 page_ref_inc(page);
1175 return true;
1176}
1177
1178static inline void put_page(struct page *page)
1179{
1180 page = compound_head(page);
1181
1182
1183
1184
1185
1186
1187
1188 if (page_is_devmap_managed(page)) {
1189 put_devmap_managed_page(page);
1190 return;
1191 }
1192
1193 if (put_page_testzero(page))
1194 __put_page(page);
1195}
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228#define GUP_PIN_COUNTING_BIAS (1U << 10)
1229
1230void unpin_user_page(struct page *page);
1231void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
1232 bool make_dirty);
1233void unpin_user_pages(struct page **pages, unsigned long npages);
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261static inline bool page_maybe_dma_pinned(struct page *page)
1262{
1263 if (hpage_pincount_available(page))
1264 return compound_pincount(page) > 0;
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274 return ((unsigned int)page_ref_count(compound_head(page))) >=
1275 GUP_PIN_COUNTING_BIAS;
1276}
1277
1278#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
1279#define SECTION_IN_PAGE_FLAGS
1280#endif
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290static inline int page_zone_id(struct page *page)
1291{
1292 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
1293}
1294
1295#ifdef NODE_NOT_IN_PAGE_FLAGS
1296extern int page_to_nid(const struct page *page);
1297#else
1298static inline int page_to_nid(const struct page *page)
1299{
1300 struct page *p = (struct page *)page;
1301
1302 return (PF_POISONED_CHECK(p)->flags >> NODES_PGSHIFT) & NODES_MASK;
1303}
1304#endif
1305
1306#ifdef CONFIG_NUMA_BALANCING
1307static inline int cpu_pid_to_cpupid(int cpu, int pid)
1308{
1309 return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
1310}
1311
1312static inline int cpupid_to_pid(int cpupid)
1313{
1314 return cpupid & LAST__PID_MASK;
1315}
1316
1317static inline int cpupid_to_cpu(int cpupid)
1318{
1319 return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
1320}
1321
1322static inline int cpupid_to_nid(int cpupid)
1323{
1324 return cpu_to_node(cpupid_to_cpu(cpupid));
1325}
1326
1327static inline bool cpupid_pid_unset(int cpupid)
1328{
1329 return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
1330}
1331
1332static inline bool cpupid_cpu_unset(int cpupid)
1333{
1334 return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
1335}
1336
1337static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
1338{
1339 return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
1340}
1341
1342#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
1343#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
1344static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
1345{
1346 return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
1347}
1348
1349static inline int page_cpupid_last(struct page *page)
1350{
1351 return page->_last_cpupid;
1352}
1353static inline void page_cpupid_reset_last(struct page *page)
1354{
1355 page->_last_cpupid = -1 & LAST_CPUPID_MASK;
1356}
1357#else
1358static inline int page_cpupid_last(struct page *page)
1359{
1360 return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
1361}
1362
1363extern int page_cpupid_xchg_last(struct page *page, int cpupid);
1364
1365static inline void page_cpupid_reset_last(struct page *page)
1366{
1367 page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
1368}
1369#endif
1370#else
1371static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
1372{
1373 return page_to_nid(page);
1374}
1375
1376static inline int page_cpupid_last(struct page *page)
1377{
1378 return page_to_nid(page);
1379}
1380
1381static inline int cpupid_to_nid(int cpupid)
1382{
1383 return -1;
1384}
1385
1386static inline int cpupid_to_pid(int cpupid)
1387{
1388 return -1;
1389}
1390
1391static inline int cpupid_to_cpu(int cpupid)
1392{
1393 return -1;
1394}
1395
1396static inline int cpu_pid_to_cpupid(int nid, int pid)
1397{
1398 return -1;
1399}
1400
1401static inline bool cpupid_pid_unset(int cpupid)
1402{
1403 return true;
1404}
1405
1406static inline void page_cpupid_reset_last(struct page *page)
1407{
1408}
1409
1410static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
1411{
1412 return false;
1413}
1414#endif
1415
1416#ifdef CONFIG_KASAN_SW_TAGS
1417static inline u8 page_kasan_tag(const struct page *page)
1418{
1419 return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
1420}
1421
1422static inline void page_kasan_tag_set(struct page *page, u8 tag)
1423{
1424 page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
1425 page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
1426}
1427
1428static inline void page_kasan_tag_reset(struct page *page)
1429{
1430 page_kasan_tag_set(page, 0xff);
1431}
1432#else
1433static inline u8 page_kasan_tag(const struct page *page)
1434{
1435 return 0xff;
1436}
1437
1438static inline void page_kasan_tag_set(struct page *page, u8 tag) { }
1439static inline void page_kasan_tag_reset(struct page *page) { }
1440#endif
1441
1442static inline struct zone *page_zone(const struct page *page)
1443{
1444 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
1445}
1446
1447static inline pg_data_t *page_pgdat(const struct page *page)
1448{
1449 return NODE_DATA(page_to_nid(page));
1450}
1451
1452#ifdef SECTION_IN_PAGE_FLAGS
1453static inline void set_page_section(struct page *page, unsigned long section)
1454{
1455 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
1456 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
1457}
1458
1459static inline unsigned long page_to_section(const struct page *page)
1460{
1461 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
1462}
1463#endif
1464
1465static inline void set_page_zone(struct page *page, enum zone_type zone)
1466{
1467 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
1468 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
1469}
1470
1471static inline void set_page_node(struct page *page, unsigned long node)
1472{
1473 page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
1474 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
1475}
1476
1477static inline void set_page_links(struct page *page, enum zone_type zone,
1478 unsigned long node, unsigned long pfn)
1479{
1480 set_page_zone(page, zone);
1481 set_page_node(page, node);
1482#ifdef SECTION_IN_PAGE_FLAGS
1483 set_page_section(page, pfn_to_section_nr(pfn));
1484#endif
1485}
1486
1487#ifdef CONFIG_MEMCG
1488static inline struct mem_cgroup *page_memcg(struct page *page)
1489{
1490 return page->mem_cgroup;
1491}
1492static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
1493{
1494 WARN_ON_ONCE(!rcu_read_lock_held());
1495 return READ_ONCE(page->mem_cgroup);
1496}
1497#else
1498static inline struct mem_cgroup *page_memcg(struct page *page)
1499{
1500 return NULL;
1501}
1502static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
1503{
1504 WARN_ON_ONCE(!rcu_read_lock_held());
1505 return NULL;
1506}
1507#endif
1508
1509
1510
1511
1512#include <linux/vmstat.h>
1513
1514static __always_inline void *lowmem_page_address(const struct page *page)
1515{
1516 return page_to_virt(page);
1517}
1518
1519#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
1520#define HASHED_PAGE_VIRTUAL
1521#endif
1522
1523#if defined(WANT_PAGE_VIRTUAL)
1524static inline void *page_address(const struct page *page)
1525{
1526 return page->virtual;
1527}
1528static inline void set_page_address(struct page *page, void *address)
1529{
1530 page->virtual = address;
1531}
1532#define page_address_init() do { } while(0)
1533#endif
1534
1535#if defined(HASHED_PAGE_VIRTUAL)
1536void *page_address(const struct page *page);
1537void set_page_address(struct page *page, void *virtual);
1538void page_address_init(void);
1539#endif
1540
1541#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
1542#define page_address(page) lowmem_page_address(page)
1543#define set_page_address(page, address) do { } while(0)
1544#define page_address_init() do { } while(0)
1545#endif
1546
1547extern void *page_rmapping(struct page *page);
1548extern struct anon_vma *page_anon_vma(struct page *page);
1549extern struct address_space *page_mapping(struct page *page);
1550
1551extern struct address_space *__page_file_mapping(struct page *);
1552
1553static inline
1554struct address_space *page_file_mapping(struct page *page)
1555{
1556 if (unlikely(PageSwapCache(page)))
1557 return __page_file_mapping(page);
1558
1559 return page->mapping;
1560}
1561
1562extern pgoff_t __page_file_index(struct page *page);
1563
1564
1565
1566
1567
1568static inline pgoff_t page_index(struct page *page)
1569{
1570 if (unlikely(PageSwapCache(page)))
1571 return __page_file_index(page);
1572 return page->index;
1573}
1574
1575bool page_mapped(struct page *page);
1576struct address_space *page_mapping(struct page *page);
1577struct address_space *page_mapping_file(struct page *page);
1578
1579
1580
1581
1582
1583
1584static inline bool page_is_pfmemalloc(struct page *page)
1585{
1586
1587
1588
1589
1590 return page->index == -1UL;
1591}
1592
1593
1594
1595
1596
1597static inline void set_page_pfmemalloc(struct page *page)
1598{
1599 page->index = -1UL;
1600}
1601
1602static inline void clear_page_pfmemalloc(struct page *page)
1603{
1604 page->index = 0;
1605}
1606
1607
1608
1609
1610extern void pagefault_out_of_memory(void);
1611
1612#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
1613#define offset_in_thp(page, p) ((unsigned long)(p) & (thp_size(page) - 1))
1614
1615
1616
1617
1618
1619#define SHOW_MEM_FILTER_NODES (0x0001u)
1620
1621extern void show_free_areas(unsigned int flags, nodemask_t *nodemask);
1622
1623#ifdef CONFIG_MMU
1624extern bool can_do_mlock(void);
1625#else
1626static inline bool can_do_mlock(void) { return false; }
1627#endif
1628extern int user_shm_lock(size_t, struct user_struct *);
1629extern void user_shm_unlock(size_t, struct user_struct *);
1630
1631
1632
1633
1634struct zap_details {
1635 struct address_space *check_mapping;
1636 pgoff_t first_index;
1637 pgoff_t last_index;
1638};
1639
1640struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
1641 pte_t pte);
1642struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
1643 pmd_t pmd);
1644
1645void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1646 unsigned long size);
1647void zap_page_range(struct vm_area_struct *vma, unsigned long address,
1648 unsigned long size);
1649void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
1650 unsigned long start, unsigned long end);
1651
1652struct mmu_notifier_range;
1653
1654void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
1655 unsigned long end, unsigned long floor, unsigned long ceiling);
1656int
1657copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
1658int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
1659 struct mmu_notifier_range *range,
1660 pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
1661int follow_pfn(struct vm_area_struct *vma, unsigned long address,
1662 unsigned long *pfn);
1663int follow_phys(struct vm_area_struct *vma, unsigned long address,
1664 unsigned int flags, unsigned long *prot, resource_size_t *phys);
1665int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
1666 void *buf, int len, int write);
1667
1668extern void truncate_pagecache(struct inode *inode, loff_t new);
1669extern void truncate_setsize(struct inode *inode, loff_t newsize);
1670void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
1671void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
1672int truncate_inode_page(struct address_space *mapping, struct page *page);
1673int generic_error_remove_page(struct address_space *mapping, struct page *page);
1674int invalidate_inode_page(struct page *page);
1675
1676#ifdef CONFIG_MMU
1677extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
1678 unsigned long address, unsigned int flags,
1679 struct pt_regs *regs);
1680extern int fixup_user_fault(struct mm_struct *mm,
1681 unsigned long address, unsigned int fault_flags,
1682 bool *unlocked);
1683void unmap_mapping_pages(struct address_space *mapping,
1684 pgoff_t start, pgoff_t nr, bool even_cows);
1685void unmap_mapping_range(struct address_space *mapping,
1686 loff_t const holebegin, loff_t const holelen, int even_cows);
1687#else
1688static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
1689 unsigned long address, unsigned int flags,
1690 struct pt_regs *regs)
1691{
1692
1693 BUG();
1694 return VM_FAULT_SIGBUS;
1695}
1696static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address,
1697 unsigned int fault_flags, bool *unlocked)
1698{
1699
1700 BUG();
1701 return -EFAULT;
1702}
1703static inline void unmap_mapping_pages(struct address_space *mapping,
1704 pgoff_t start, pgoff_t nr, bool even_cows) { }
1705static inline void unmap_mapping_range(struct address_space *mapping,
1706 loff_t const holebegin, loff_t const holelen, int even_cows) { }
1707#endif
1708
1709static inline void unmap_shared_mapping_range(struct address_space *mapping,
1710 loff_t const holebegin, loff_t const holelen)
1711{
1712 unmap_mapping_range(mapping, holebegin, holelen, 0);
1713}
1714
1715extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
1716 void *buf, int len, unsigned int gup_flags);
1717extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1718 void *buf, int len, unsigned int gup_flags);
1719extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
1720 unsigned long addr, void *buf, int len, unsigned int gup_flags);
1721
1722long get_user_pages_remote(struct mm_struct *mm,
1723 unsigned long start, unsigned long nr_pages,
1724 unsigned int gup_flags, struct page **pages,
1725 struct vm_area_struct **vmas, int *locked);
1726long pin_user_pages_remote(struct mm_struct *mm,
1727 unsigned long start, unsigned long nr_pages,
1728 unsigned int gup_flags, struct page **pages,
1729 struct vm_area_struct **vmas, int *locked);
1730long get_user_pages(unsigned long start, unsigned long nr_pages,
1731 unsigned int gup_flags, struct page **pages,
1732 struct vm_area_struct **vmas);
1733long pin_user_pages(unsigned long start, unsigned long nr_pages,
1734 unsigned int gup_flags, struct page **pages,
1735 struct vm_area_struct **vmas);
1736long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
1737 unsigned int gup_flags, struct page **pages, int *locked);
1738long pin_user_pages_locked(unsigned long start, unsigned long nr_pages,
1739 unsigned int gup_flags, struct page **pages, int *locked);
1740long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
1741 struct page **pages, unsigned int gup_flags);
1742long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
1743 struct page **pages, unsigned int gup_flags);
1744
1745int get_user_pages_fast(unsigned long start, int nr_pages,
1746 unsigned int gup_flags, struct page **pages);
1747int pin_user_pages_fast(unsigned long start, int nr_pages,
1748 unsigned int gup_flags, struct page **pages);
1749
1750int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
1751int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
1752 struct task_struct *task, bool bypass_rlim);
1753
1754
1755struct frame_vector {
1756 unsigned int nr_allocated;
1757 unsigned int nr_frames;
1758 bool got_ref;
1759 bool is_pfns;
1760 void *ptrs[];
1761
1762
1763};
1764
1765struct frame_vector *frame_vector_create(unsigned int nr_frames);
1766void frame_vector_destroy(struct frame_vector *vec);
1767int get_vaddr_frames(unsigned long start, unsigned int nr_pfns,
1768 unsigned int gup_flags, struct frame_vector *vec);
1769void put_vaddr_frames(struct frame_vector *vec);
1770int frame_vector_to_pages(struct frame_vector *vec);
1771void frame_vector_to_pfns(struct frame_vector *vec);
1772
1773static inline unsigned int frame_vector_count(struct frame_vector *vec)
1774{
1775 return vec->nr_frames;
1776}
1777
1778static inline struct page **frame_vector_pages(struct frame_vector *vec)
1779{
1780 if (vec->is_pfns) {
1781 int err = frame_vector_to_pages(vec);
1782
1783 if (err)
1784 return ERR_PTR(err);
1785 }
1786 return (struct page **)(vec->ptrs);
1787}
1788
1789static inline unsigned long *frame_vector_pfns(struct frame_vector *vec)
1790{
1791 if (!vec->is_pfns)
1792 frame_vector_to_pfns(vec);
1793 return (unsigned long *)(vec->ptrs);
1794}
1795
1796struct kvec;
1797int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
1798 struct page **pages);
1799int get_kernel_page(unsigned long start, int write, struct page **pages);
1800struct page *get_dump_page(unsigned long addr);
1801
1802extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
1803extern void do_invalidatepage(struct page *page, unsigned int offset,
1804 unsigned int length);
1805
1806void __set_page_dirty(struct page *, struct address_space *, int warn);
1807int __set_page_dirty_nobuffers(struct page *page);
1808int __set_page_dirty_no_writeback(struct page *page);
1809int redirty_page_for_writepage(struct writeback_control *wbc,
1810 struct page *page);
1811void account_page_dirtied(struct page *page, struct address_space *mapping);
1812void account_page_cleaned(struct page *page, struct address_space *mapping,
1813 struct bdi_writeback *wb);
1814int set_page_dirty(struct page *page);
1815int set_page_dirty_lock(struct page *page);
1816void __cancel_dirty_page(struct page *page);
1817static inline void cancel_dirty_page(struct page *page)
1818{
1819
1820 if (PageDirty(page))
1821 __cancel_dirty_page(page);
1822}
1823int clear_page_dirty_for_io(struct page *page);
1824
1825int get_cmdline(struct task_struct *task, char *buffer, int buflen);
1826
1827extern unsigned long move_page_tables(struct vm_area_struct *vma,
1828 unsigned long old_addr, struct vm_area_struct *new_vma,
1829 unsigned long new_addr, unsigned long len,
1830 bool need_rmap_locks);
1831
1832
1833
1834
1835
1836
1837
1838
1839#define MM_CP_DIRTY_ACCT (1UL << 0)
1840
1841#define MM_CP_PROT_NUMA (1UL << 1)
1842
1843#define MM_CP_UFFD_WP (1UL << 2)
1844#define MM_CP_UFFD_WP_RESOLVE (1UL << 3)
1845#define MM_CP_UFFD_WP_ALL (MM_CP_UFFD_WP | \
1846 MM_CP_UFFD_WP_RESOLVE)
1847
1848extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
1849 unsigned long end, pgprot_t newprot,
1850 unsigned long cp_flags);
1851extern int mprotect_fixup(struct vm_area_struct *vma,
1852 struct vm_area_struct **pprev, unsigned long start,
1853 unsigned long end, unsigned long newflags);
1854
1855
1856
1857
1858int get_user_pages_fast_only(unsigned long start, int nr_pages,
1859 unsigned int gup_flags, struct page **pages);
1860int pin_user_pages_fast_only(unsigned long start, int nr_pages,
1861 unsigned int gup_flags, struct page **pages);
1862
1863static inline bool get_user_page_fast_only(unsigned long addr,
1864 unsigned int gup_flags, struct page **pagep)
1865{
1866 return get_user_pages_fast_only(addr, 1, gup_flags, pagep) == 1;
1867}
1868
1869
1870
1871static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
1872{
1873 long val = atomic_long_read(&mm->rss_stat.count[member]);
1874
1875#ifdef SPLIT_RSS_COUNTING
1876
1877
1878
1879
1880 if (val < 0)
1881 val = 0;
1882#endif
1883 return (unsigned long)val;
1884}
1885
1886void mm_trace_rss_stat(struct mm_struct *mm, int member, long count);
1887
1888static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1889{
1890 long count = atomic_long_add_return(value, &mm->rss_stat.count[member]);
1891
1892 mm_trace_rss_stat(mm, member, count);
1893}
1894
1895static inline void inc_mm_counter(struct mm_struct *mm, int member)
1896{
1897 long count = atomic_long_inc_return(&mm->rss_stat.count[member]);
1898
1899 mm_trace_rss_stat(mm, member, count);
1900}
1901
1902static inline void dec_mm_counter(struct mm_struct *mm, int member)
1903{
1904 long count = atomic_long_dec_return(&mm->rss_stat.count[member]);
1905
1906 mm_trace_rss_stat(mm, member, count);
1907}
1908
1909
1910static inline int mm_counter_file(struct page *page)
1911{
1912 if (PageSwapBacked(page))
1913 return MM_SHMEMPAGES;
1914 return MM_FILEPAGES;
1915}
1916
1917static inline int mm_counter(struct page *page)
1918{
1919 if (PageAnon(page))
1920 return MM_ANONPAGES;
1921 return mm_counter_file(page);
1922}
1923
1924static inline unsigned long get_mm_rss(struct mm_struct *mm)
1925{
1926 return get_mm_counter(mm, MM_FILEPAGES) +
1927 get_mm_counter(mm, MM_ANONPAGES) +
1928 get_mm_counter(mm, MM_SHMEMPAGES);
1929}
1930
1931static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
1932{
1933 return max(mm->hiwater_rss, get_mm_rss(mm));
1934}
1935
1936static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
1937{
1938 return max(mm->hiwater_vm, mm->total_vm);
1939}
1940
1941static inline void update_hiwater_rss(struct mm_struct *mm)
1942{
1943 unsigned long _rss = get_mm_rss(mm);
1944
1945 if ((mm)->hiwater_rss < _rss)
1946 (mm)->hiwater_rss = _rss;
1947}
1948
1949static inline void update_hiwater_vm(struct mm_struct *mm)
1950{
1951 if (mm->hiwater_vm < mm->total_vm)
1952 mm->hiwater_vm = mm->total_vm;
1953}
1954
1955static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
1956{
1957 mm->hiwater_rss = get_mm_rss(mm);
1958}
1959
1960static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
1961 struct mm_struct *mm)
1962{
1963 unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
1964
1965 if (*maxrss < hiwater_rss)
1966 *maxrss = hiwater_rss;
1967}
1968
1969#if defined(SPLIT_RSS_COUNTING)
1970void sync_mm_rss(struct mm_struct *mm);
1971#else
1972static inline void sync_mm_rss(struct mm_struct *mm)
1973{
1974}
1975#endif
1976
1977#ifndef CONFIG_ARCH_HAS_PTE_SPECIAL
1978static inline int pte_special(pte_t pte)
1979{
1980 return 0;
1981}
1982
1983static inline pte_t pte_mkspecial(pte_t pte)
1984{
1985 return pte;
1986}
1987#endif
1988
1989#ifndef CONFIG_ARCH_HAS_PTE_DEVMAP
1990static inline int pte_devmap(pte_t pte)
1991{
1992 return 0;
1993}
1994#endif
1995
1996int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
1997
1998extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1999 spinlock_t **ptl);
2000static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
2001 spinlock_t **ptl)
2002{
2003 pte_t *ptep;
2004 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
2005 return ptep;
2006}
2007
2008#ifdef __PAGETABLE_P4D_FOLDED
2009static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
2010 unsigned long address)
2011{
2012 return 0;
2013}
2014#else
2015int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
2016#endif
2017
2018#if defined(__PAGETABLE_PUD_FOLDED) || !defined(CONFIG_MMU)
2019static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d,
2020 unsigned long address)
2021{
2022 return 0;
2023}
2024static inline void mm_inc_nr_puds(struct mm_struct *mm) {}
2025static inline void mm_dec_nr_puds(struct mm_struct *mm) {}
2026
2027#else
2028int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
2029
2030static inline void mm_inc_nr_puds(struct mm_struct *mm)
2031{
2032 if (mm_pud_folded(mm))
2033 return;
2034 atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
2035}
2036
2037static inline void mm_dec_nr_puds(struct mm_struct *mm)
2038{
2039 if (mm_pud_folded(mm))
2040 return;
2041 atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
2042}
2043#endif
2044
2045#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
2046static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
2047 unsigned long address)
2048{
2049 return 0;
2050}
2051
2052static inline void mm_inc_nr_pmds(struct mm_struct *mm) {}
2053static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
2054
2055#else
2056int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
2057
2058static inline void mm_inc_nr_pmds(struct mm_struct *mm)
2059{
2060 if (mm_pmd_folded(mm))
2061 return;
2062 atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
2063}
2064
2065static inline void mm_dec_nr_pmds(struct mm_struct *mm)
2066{
2067 if (mm_pmd_folded(mm))
2068 return;
2069 atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
2070}
2071#endif
2072
2073#ifdef CONFIG_MMU
2074static inline void mm_pgtables_bytes_init(struct mm_struct *mm)
2075{
2076 atomic_long_set(&mm->pgtables_bytes, 0);
2077}
2078
2079static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
2080{
2081 return atomic_long_read(&mm->pgtables_bytes);
2082}
2083
2084static inline void mm_inc_nr_ptes(struct mm_struct *mm)
2085{
2086 atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
2087}
2088
2089static inline void mm_dec_nr_ptes(struct mm_struct *mm)
2090{
2091 atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
2092}
2093#else
2094
2095static inline void mm_pgtables_bytes_init(struct mm_struct *mm) {}
2096static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
2097{
2098 return 0;
2099}
2100
2101static inline void mm_inc_nr_ptes(struct mm_struct *mm) {}
2102static inline void mm_dec_nr_ptes(struct mm_struct *mm) {}
2103#endif
2104
2105int __pte_alloc(struct mm_struct *mm, pmd_t *pmd);
2106int __pte_alloc_kernel(pmd_t *pmd);
2107
2108#if defined(CONFIG_MMU)
2109
2110static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
2111 unsigned long address)
2112{
2113 return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ?
2114 NULL : p4d_offset(pgd, address);
2115}
2116
2117static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
2118 unsigned long address)
2119{
2120 return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ?
2121 NULL : pud_offset(p4d, address);
2122}
2123
2124static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
2125{
2126 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
2127 NULL: pmd_offset(pud, address);
2128}
2129#endif
2130
2131#if USE_SPLIT_PTE_PTLOCKS
2132#if ALLOC_SPLIT_PTLOCKS
2133void __init ptlock_cache_init(void);
2134extern bool ptlock_alloc(struct page *page);
2135extern void ptlock_free(struct page *page);
2136
2137static inline spinlock_t *ptlock_ptr(struct page *page)
2138{
2139 return page->ptl;
2140}
2141#else
2142static inline void ptlock_cache_init(void)
2143{
2144}
2145
2146static inline bool ptlock_alloc(struct page *page)
2147{
2148 return true;
2149}
2150
2151static inline void ptlock_free(struct page *page)
2152{
2153}
2154
2155static inline spinlock_t *ptlock_ptr(struct page *page)
2156{
2157 return &page->ptl;
2158}
2159#endif
2160
2161static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
2162{
2163 return ptlock_ptr(pmd_page(*pmd));
2164}
2165
2166static inline bool ptlock_init(struct page *page)
2167{
2168
2169
2170
2171
2172
2173
2174
2175 VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
2176 if (!ptlock_alloc(page))
2177 return false;
2178 spin_lock_init(ptlock_ptr(page));
2179 return true;
2180}
2181
2182#else
2183
2184
2185
2186static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
2187{
2188 return &mm->page_table_lock;
2189}
2190static inline void ptlock_cache_init(void) {}
2191static inline bool ptlock_init(struct page *page) { return true; }
2192static inline void ptlock_free(struct page *page) {}
2193#endif
2194
2195static inline void pgtable_init(void)
2196{
2197 ptlock_cache_init();
2198 pgtable_cache_init();
2199}
2200
2201static inline bool pgtable_pte_page_ctor(struct page *page)
2202{
2203 if (!ptlock_init(page))
2204 return false;
2205 __SetPageTable(page);
2206 inc_zone_page_state(page, NR_PAGETABLE);
2207 return true;
2208}
2209
2210static inline void pgtable_pte_page_dtor(struct page *page)
2211{
2212 ptlock_free(page);
2213 __ClearPageTable(page);
2214 dec_zone_page_state(page, NR_PAGETABLE);
2215}
2216
2217#define pte_offset_map_lock(mm, pmd, address, ptlp) \
2218({ \
2219 spinlock_t *__ptl = pte_lockptr(mm, pmd); \
2220 pte_t *__pte = pte_offset_map(pmd, address); \
2221 *(ptlp) = __ptl; \
2222 spin_lock(__ptl); \
2223 __pte; \
2224})
2225
2226#define pte_unmap_unlock(pte, ptl) do { \
2227 spin_unlock(ptl); \
2228 pte_unmap(pte); \
2229} while (0)
2230
2231#define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd))
2232
2233#define pte_alloc_map(mm, pmd, address) \
2234 (pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address))
2235
2236#define pte_alloc_map_lock(mm, pmd, address, ptlp) \
2237 (pte_alloc(mm, pmd) ? \
2238 NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
2239
2240#define pte_alloc_kernel(pmd, address) \
2241 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \
2242 NULL: pte_offset_kernel(pmd, address))
2243
2244#if USE_SPLIT_PMD_PTLOCKS
2245
2246static struct page *pmd_to_page(pmd_t *pmd)
2247{
2248 unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
2249 return virt_to_page((void *)((unsigned long) pmd & mask));
2250}
2251
2252static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
2253{
2254 return ptlock_ptr(pmd_to_page(pmd));
2255}
2256
2257static inline bool pmd_ptlock_init(struct page *page)
2258{
2259#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2260 page->pmd_huge_pte = NULL;
2261#endif
2262 return ptlock_init(page);
2263}
2264
2265static inline void pmd_ptlock_free(struct page *page)
2266{
2267#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2268 VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
2269#endif
2270 ptlock_free(page);
2271}
2272
2273#define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte)
2274
2275#else
2276
2277static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
2278{
2279 return &mm->page_table_lock;
2280}
2281
2282static inline bool pmd_ptlock_init(struct page *page) { return true; }
2283static inline void pmd_ptlock_free(struct page *page) {}
2284
2285#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
2286
2287#endif
2288
2289static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
2290{
2291 spinlock_t *ptl = pmd_lockptr(mm, pmd);
2292 spin_lock(ptl);
2293 return ptl;
2294}
2295
2296static inline bool pgtable_pmd_page_ctor(struct page *page)
2297{
2298 if (!pmd_ptlock_init(page))
2299 return false;
2300 __SetPageTable(page);
2301 inc_zone_page_state(page, NR_PAGETABLE);
2302 return true;
2303}
2304
2305static inline void pgtable_pmd_page_dtor(struct page *page)
2306{
2307 pmd_ptlock_free(page);
2308 __ClearPageTable(page);
2309 dec_zone_page_state(page, NR_PAGETABLE);
2310}
2311
2312
2313
2314
2315
2316
2317
2318static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud)
2319{
2320 return &mm->page_table_lock;
2321}
2322
2323static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
2324{
2325 spinlock_t *ptl = pud_lockptr(mm, pud);
2326
2327 spin_lock(ptl);
2328 return ptl;
2329}
2330
2331extern void __init pagecache_init(void);
2332extern void __init free_area_init_memoryless_node(int nid);
2333extern void free_initmem(void);
2334
2335
2336
2337
2338
2339
2340
2341extern unsigned long free_reserved_area(void *start, void *end,
2342 int poison, const char *s);
2343
2344#ifdef CONFIG_HIGHMEM
2345
2346
2347
2348
2349extern void free_highmem_page(struct page *page);
2350#endif
2351
2352extern void adjust_managed_page_count(struct page *page, long count);
2353extern void mem_init_print_info(const char *str);
2354
2355extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end);
2356
2357
2358static inline void __free_reserved_page(struct page *page)
2359{
2360 ClearPageReserved(page);
2361 init_page_count(page);
2362 __free_page(page);
2363}
2364
2365static inline void free_reserved_page(struct page *page)
2366{
2367 __free_reserved_page(page);
2368 adjust_managed_page_count(page, 1);
2369}
2370
2371static inline void mark_page_reserved(struct page *page)
2372{
2373 SetPageReserved(page);
2374 adjust_managed_page_count(page, -1);
2375}
2376
2377
2378
2379
2380
2381
2382
2383static inline unsigned long free_initmem_default(int poison)
2384{
2385 extern char __init_begin[], __init_end[];
2386
2387 return free_reserved_area(&__init_begin, &__init_end,
2388 poison, "unused kernel");
2389}
2390
2391static inline unsigned long get_num_physpages(void)
2392{
2393 int nid;
2394 unsigned long phys_pages = 0;
2395
2396 for_each_online_node(nid)
2397 phys_pages += node_present_pages(nid);
2398
2399 return phys_pages;
2400}
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418void free_area_init(unsigned long *max_zone_pfn);
2419unsigned long node_map_pfn_alignment(void);
2420unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
2421 unsigned long end_pfn);
2422extern unsigned long absent_pages_in_range(unsigned long start_pfn,
2423 unsigned long end_pfn);
2424extern void get_pfn_range_for_nid(unsigned int nid,
2425 unsigned long *start_pfn, unsigned long *end_pfn);
2426extern unsigned long find_min_pfn_with_active_regions(void);
2427
2428#ifndef CONFIG_NEED_MULTIPLE_NODES
2429static inline int early_pfn_to_nid(unsigned long pfn)
2430{
2431 return 0;
2432}
2433#else
2434
2435extern int __meminit early_pfn_to_nid(unsigned long pfn);
2436
2437extern int __meminit __early_pfn_to_nid(unsigned long pfn,
2438 struct mminit_pfnnid_cache *state);
2439#endif
2440
2441extern void set_dma_reserve(unsigned long new_dma_reserve);
2442extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long,
2443 enum meminit_context, struct vmem_altmap *, int migratetype);
2444extern void setup_per_zone_wmarks(void);
2445extern int __meminit init_per_zone_wmark_min(void);
2446extern void mem_init(void);
2447extern void __init mmap_init(void);
2448extern void show_mem(unsigned int flags, nodemask_t *nodemask);
2449extern long si_mem_available(void);
2450extern void si_meminfo(struct sysinfo * val);
2451extern void si_meminfo_node(struct sysinfo *val, int nid);
2452#ifdef __HAVE_ARCH_RESERVED_KERNEL_PAGES
2453extern unsigned long arch_reserved_kernel_pages(void);
2454#endif
2455
2456extern __printf(3, 4)
2457void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);
2458
2459extern void setup_per_cpu_pageset(void);
2460
2461
2462extern int min_free_kbytes;
2463extern int watermark_boost_factor;
2464extern int watermark_scale_factor;
2465extern bool arch_has_descending_max_zone_pfns(void);
2466
2467
2468extern atomic_long_t mmap_pages_allocated;
2469extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
2470
2471
2472void vma_interval_tree_insert(struct vm_area_struct *node,
2473 struct rb_root_cached *root);
2474void vma_interval_tree_insert_after(struct vm_area_struct *node,
2475 struct vm_area_struct *prev,
2476 struct rb_root_cached *root);
2477void vma_interval_tree_remove(struct vm_area_struct *node,
2478 struct rb_root_cached *root);
2479struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root,
2480 unsigned long start, unsigned long last);
2481struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
2482 unsigned long start, unsigned long last);
2483
2484#define vma_interval_tree_foreach(vma, root, start, last) \
2485 for (vma = vma_interval_tree_iter_first(root, start, last); \
2486 vma; vma = vma_interval_tree_iter_next(vma, start, last))
2487
2488void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
2489 struct rb_root_cached *root);
2490void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
2491 struct rb_root_cached *root);
2492struct anon_vma_chain *
2493anon_vma_interval_tree_iter_first(struct rb_root_cached *root,
2494 unsigned long start, unsigned long last);
2495struct anon_vma_chain *anon_vma_interval_tree_iter_next(
2496 struct anon_vma_chain *node, unsigned long start, unsigned long last);
2497#ifdef CONFIG_DEBUG_VM_RB
2498void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
2499#endif
2500
2501#define anon_vma_interval_tree_foreach(avc, root, start, last) \
2502 for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
2503 avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
2504
2505
2506extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
2507extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
2508 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
2509 struct vm_area_struct *expand);
2510static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
2511 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
2512{
2513 return __vma_adjust(vma, start, end, pgoff, insert, NULL);
2514}
2515extern struct vm_area_struct *vma_merge(struct mm_struct *,
2516 struct vm_area_struct *prev, unsigned long addr, unsigned long end,
2517 unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
2518 struct mempolicy *, struct vm_userfaultfd_ctx);
2519extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
2520extern int __split_vma(struct mm_struct *, struct vm_area_struct *,
2521 unsigned long addr, int new_below);
2522extern int split_vma(struct mm_struct *, struct vm_area_struct *,
2523 unsigned long addr, int new_below);
2524extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
2525extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
2526 struct rb_node **, struct rb_node *);
2527extern void unlink_file_vma(struct vm_area_struct *);
2528extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
2529 unsigned long addr, unsigned long len, pgoff_t pgoff,
2530 bool *need_rmap_locks);
2531extern void exit_mmap(struct mm_struct *);
2532
2533static inline int check_data_rlimit(unsigned long rlim,
2534 unsigned long new,
2535 unsigned long start,
2536 unsigned long end_data,
2537 unsigned long start_data)
2538{
2539 if (rlim < RLIM_INFINITY) {
2540 if (((new - start) + (end_data - start_data)) > rlim)
2541 return -ENOSPC;
2542 }
2543
2544 return 0;
2545}
2546
2547extern int mm_take_all_locks(struct mm_struct *mm);
2548extern void mm_drop_all_locks(struct mm_struct *mm);
2549
2550extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
2551extern struct file *get_mm_exe_file(struct mm_struct *mm);
2552extern struct file *get_task_exe_file(struct task_struct *task);
2553
2554extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
2555extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
2556
2557extern bool vma_is_special_mapping(const struct vm_area_struct *vma,
2558 const struct vm_special_mapping *sm);
2559extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
2560 unsigned long addr, unsigned long len,
2561 unsigned long flags,
2562 const struct vm_special_mapping *spec);
2563
2564extern int install_special_mapping(struct mm_struct *mm,
2565 unsigned long addr, unsigned long len,
2566 unsigned long flags, struct page **pages);
2567
2568unsigned long randomize_stack_top(unsigned long stack_top);
2569
2570extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
2571
2572extern unsigned long mmap_region(struct file *file, unsigned long addr,
2573 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
2574 struct list_head *uf);
2575extern unsigned long do_mmap(struct file *file, unsigned long addr,
2576 unsigned long len, unsigned long prot, unsigned long flags,
2577 unsigned long pgoff, unsigned long *populate, struct list_head *uf);
2578extern int __do_munmap(struct mm_struct *, unsigned long, size_t,
2579 struct list_head *uf, bool downgrade);
2580extern int do_munmap(struct mm_struct *, unsigned long, size_t,
2581 struct list_head *uf);
2582extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);
2583
2584#ifdef CONFIG_MMU
2585extern int __mm_populate(unsigned long addr, unsigned long len,
2586 int ignore_errors);
2587static inline void mm_populate(unsigned long addr, unsigned long len)
2588{
2589
2590 (void) __mm_populate(addr, len, 1);
2591}
2592#else
2593static inline void mm_populate(unsigned long addr, unsigned long len) {}
2594#endif
2595
2596
2597extern int __must_check vm_brk(unsigned long, unsigned long);
2598extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long);
2599extern int vm_munmap(unsigned long, size_t);
2600extern unsigned long __must_check vm_mmap(struct file *, unsigned long,
2601 unsigned long, unsigned long,
2602 unsigned long, unsigned long);
2603
2604struct vm_unmapped_area_info {
2605#define VM_UNMAPPED_AREA_TOPDOWN 1
2606 unsigned long flags;
2607 unsigned long length;
2608 unsigned long low_limit;
2609 unsigned long high_limit;
2610 unsigned long align_mask;
2611 unsigned long align_offset;
2612};
2613
2614extern unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info);
2615
2616
2617extern void truncate_inode_pages(struct address_space *, loff_t);
2618extern void truncate_inode_pages_range(struct address_space *,
2619 loff_t lstart, loff_t lend);
2620extern void truncate_inode_pages_final(struct address_space *);
2621
2622
2623extern vm_fault_t filemap_fault(struct vm_fault *vmf);
2624extern void filemap_map_pages(struct vm_fault *vmf,
2625 pgoff_t start_pgoff, pgoff_t end_pgoff);
2626extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
2627
2628
2629int __must_check write_one_page(struct page *page);
2630void task_dirty_inc(struct task_struct *tsk);
2631
2632extern unsigned long stack_guard_gap;
2633
2634extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
2635
2636
2637extern int expand_downwards(struct vm_area_struct *vma,
2638 unsigned long address);
2639#if VM_GROWSUP
2640extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
2641#else
2642 #define expand_upwards(vma, address) (0)
2643#endif
2644
2645
2646extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
2647extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
2648 struct vm_area_struct **pprev);
2649
2650
2651
2652static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
2653{
2654 struct vm_area_struct * vma = find_vma(mm,start_addr);
2655
2656 if (vma && end_addr <= vma->vm_start)
2657 vma = NULL;
2658 return vma;
2659}
2660
2661static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
2662{
2663 unsigned long vm_start = vma->vm_start;
2664
2665 if (vma->vm_flags & VM_GROWSDOWN) {
2666 vm_start -= stack_guard_gap;
2667 if (vm_start > vma->vm_start)
2668 vm_start = 0;
2669 }
2670 return vm_start;
2671}
2672
2673static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
2674{
2675 unsigned long vm_end = vma->vm_end;
2676
2677 if (vma->vm_flags & VM_GROWSUP) {
2678 vm_end += stack_guard_gap;
2679 if (vm_end < vma->vm_end)
2680 vm_end = -PAGE_SIZE;
2681 }
2682 return vm_end;
2683}
2684
2685static inline unsigned long vma_pages(struct vm_area_struct *vma)
2686{
2687 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
2688}
2689
2690
2691static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
2692 unsigned long vm_start, unsigned long vm_end)
2693{
2694 struct vm_area_struct *vma = find_vma(mm, vm_start);
2695
2696 if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
2697 vma = NULL;
2698
2699 return vma;
2700}
2701
2702static inline bool range_in_vma(struct vm_area_struct *vma,
2703 unsigned long start, unsigned long end)
2704{
2705 return (vma && vma->vm_start <= start && end <= vma->vm_end);
2706}
2707
2708#ifdef CONFIG_MMU
2709pgprot_t vm_get_page_prot(unsigned long vm_flags);
2710void vma_set_page_prot(struct vm_area_struct *vma);
2711#else
2712static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
2713{
2714 return __pgprot(0);
2715}
2716static inline void vma_set_page_prot(struct vm_area_struct *vma)
2717{
2718 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2719}
2720#endif
2721
2722#ifdef CONFIG_NUMA_BALANCING
2723unsigned long change_prot_numa(struct vm_area_struct *vma,
2724 unsigned long start, unsigned long end);
2725#endif
2726
2727struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
2728int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
2729 unsigned long pfn, unsigned long size, pgprot_t);
2730int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
2731int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
2732 struct page **pages, unsigned long *num);
2733int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2734 unsigned long num);
2735int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
2736 unsigned long num);
2737vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2738 unsigned long pfn);
2739vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2740 unsigned long pfn, pgprot_t pgprot);
2741vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2742 pfn_t pfn);
2743vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
2744 pfn_t pfn, pgprot_t pgprot);
2745vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
2746 unsigned long addr, pfn_t pfn);
2747int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
2748
2749static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
2750 unsigned long addr, struct page *page)
2751{
2752 int err = vm_insert_page(vma, addr, page);
2753
2754 if (err == -ENOMEM)
2755 return VM_FAULT_OOM;
2756 if (err < 0 && err != -EBUSY)
2757 return VM_FAULT_SIGBUS;
2758
2759 return VM_FAULT_NOPAGE;
2760}
2761
2762#ifndef io_remap_pfn_range
2763static inline int io_remap_pfn_range(struct vm_area_struct *vma,
2764 unsigned long addr, unsigned long pfn,
2765 unsigned long size, pgprot_t prot)
2766{
2767 return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot));
2768}
2769#endif
2770
2771static inline vm_fault_t vmf_error(int err)
2772{
2773 if (err == -ENOMEM)
2774 return VM_FAULT_OOM;
2775 return VM_FAULT_SIGBUS;
2776}
2777
2778struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
2779 unsigned int foll_flags);
2780
2781#define FOLL_WRITE 0x01
2782#define FOLL_TOUCH 0x02
2783#define FOLL_GET 0x04
2784#define FOLL_DUMP 0x08
2785#define FOLL_FORCE 0x10
2786#define FOLL_NOWAIT 0x20
2787
2788#define FOLL_POPULATE 0x40
2789#define FOLL_SPLIT 0x80
2790#define FOLL_HWPOISON 0x100
2791#define FOLL_NUMA 0x200
2792#define FOLL_MIGRATION 0x400
2793#define FOLL_TRIED 0x800
2794#define FOLL_MLOCK 0x1000
2795#define FOLL_REMOTE 0x2000
2796#define FOLL_COW 0x4000
2797#define FOLL_ANON 0x8000
2798#define FOLL_LONGTERM 0x10000
2799#define FOLL_SPLIT_PMD 0x20000
2800#define FOLL_PIN 0x40000
2801#define FOLL_FAST_ONLY 0x80000
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
2860{
2861 if (vm_fault & VM_FAULT_OOM)
2862 return -ENOMEM;
2863 if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
2864 return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT;
2865 if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
2866 return -EFAULT;
2867 return 0;
2868}
2869
2870typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data);
2871extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
2872 unsigned long size, pte_fn_t fn, void *data);
2873extern int apply_to_existing_page_range(struct mm_struct *mm,
2874 unsigned long address, unsigned long size,
2875 pte_fn_t fn, void *data);
2876
2877#ifdef CONFIG_PAGE_POISONING
2878extern bool page_poisoning_enabled(void);
2879extern void kernel_poison_pages(struct page *page, int numpages, int enable);
2880#else
2881static inline bool page_poisoning_enabled(void) { return false; }
2882static inline void kernel_poison_pages(struct page *page, int numpages,
2883 int enable) { }
2884#endif
2885
2886#ifdef CONFIG_INIT_ON_ALLOC_DEFAULT_ON
2887DECLARE_STATIC_KEY_TRUE(init_on_alloc);
2888#else
2889DECLARE_STATIC_KEY_FALSE(init_on_alloc);
2890#endif
2891static inline bool want_init_on_alloc(gfp_t flags)
2892{
2893 if (static_branch_unlikely(&init_on_alloc) &&
2894 !page_poisoning_enabled())
2895 return true;
2896 return flags & __GFP_ZERO;
2897}
2898
2899#ifdef CONFIG_INIT_ON_FREE_DEFAULT_ON
2900DECLARE_STATIC_KEY_TRUE(init_on_free);
2901#else
2902DECLARE_STATIC_KEY_FALSE(init_on_free);
2903#endif
2904static inline bool want_init_on_free(void)
2905{
2906 return static_branch_unlikely(&init_on_free) &&
2907 !page_poisoning_enabled();
2908}
2909
2910#ifdef CONFIG_DEBUG_PAGEALLOC
2911extern void init_debug_pagealloc(void);
2912#else
2913static inline void init_debug_pagealloc(void) {}
2914#endif
2915extern bool _debug_pagealloc_enabled_early;
2916DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
2917
2918static inline bool debug_pagealloc_enabled(void)
2919{
2920 return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
2921 _debug_pagealloc_enabled_early;
2922}
2923
2924
2925
2926
2927
2928static inline bool debug_pagealloc_enabled_static(void)
2929{
2930 if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC))
2931 return false;
2932
2933 return static_branch_unlikely(&_debug_pagealloc_enabled);
2934}
2935
2936#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP)
2937extern void __kernel_map_pages(struct page *page, int numpages, int enable);
2938
2939
2940
2941
2942
2943static inline void
2944kernel_map_pages(struct page *page, int numpages, int enable)
2945{
2946 __kernel_map_pages(page, numpages, enable);
2947}
2948#ifdef CONFIG_HIBERNATION
2949extern bool kernel_page_present(struct page *page);
2950#endif
2951#else
2952static inline void
2953kernel_map_pages(struct page *page, int numpages, int enable) {}
2954#ifdef CONFIG_HIBERNATION
2955static inline bool kernel_page_present(struct page *page) { return true; }
2956#endif
2957#endif
2958
2959#ifdef __HAVE_ARCH_GATE_AREA
2960extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
2961extern int in_gate_area_no_mm(unsigned long addr);
2962extern int in_gate_area(struct mm_struct *mm, unsigned long addr);
2963#else
2964static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
2965{
2966 return NULL;
2967}
2968static inline int in_gate_area_no_mm(unsigned long addr) { return 0; }
2969static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
2970{
2971 return 0;
2972}
2973#endif
2974
2975extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);
2976
2977#ifdef CONFIG_SYSCTL
2978extern int sysctl_drop_caches;
2979int drop_caches_sysctl_handler(struct ctl_table *, int, void *, size_t *,
2980 loff_t *);
2981#endif
2982
2983void drop_slab(void);
2984void drop_slab_node(int nid);
2985
2986#ifndef CONFIG_MMU
2987#define randomize_va_space 0
2988#else
2989extern int randomize_va_space;
2990#endif
2991
2992const char * arch_vma_name(struct vm_area_struct *vma);
2993#ifdef CONFIG_MMU
2994void print_vma_addr(char *prefix, unsigned long rip);
2995#else
2996static inline void print_vma_addr(char *prefix, unsigned long rip)
2997{
2998}
2999#endif
3000
3001void *sparse_buffer_alloc(unsigned long size);
3002struct page * __populate_section_memmap(unsigned long pfn,
3003 unsigned long nr_pages, int nid, struct vmem_altmap *altmap);
3004pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
3005p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
3006pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
3007pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
3008pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
3009 struct vmem_altmap *altmap);
3010void *vmemmap_alloc_block(unsigned long size, int node);
3011struct vmem_altmap;
3012void *vmemmap_alloc_block_buf(unsigned long size, int node,
3013 struct vmem_altmap *altmap);
3014void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
3015int vmemmap_populate_basepages(unsigned long start, unsigned long end,
3016 int node, struct vmem_altmap *altmap);
3017int vmemmap_populate(unsigned long start, unsigned long end, int node,
3018 struct vmem_altmap *altmap);
3019void vmemmap_populate_print_last(void);
3020#ifdef CONFIG_MEMORY_HOTPLUG
3021void vmemmap_free(unsigned long start, unsigned long end,
3022 struct vmem_altmap *altmap);
3023#endif
3024void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
3025 unsigned long nr_pages);
3026
3027enum mf_flags {
3028 MF_COUNT_INCREASED = 1 << 0,
3029 MF_ACTION_REQUIRED = 1 << 1,
3030 MF_MUST_KILL = 1 << 2,
3031 MF_SOFT_OFFLINE = 1 << 3,
3032};
3033extern int memory_failure(unsigned long pfn, int flags);
3034extern void memory_failure_queue(unsigned long pfn, int flags);
3035extern void memory_failure_queue_kick(int cpu);
3036extern int unpoison_memory(unsigned long pfn);
3037extern int sysctl_memory_failure_early_kill;
3038extern int sysctl_memory_failure_recovery;
3039extern void shake_page(struct page *p, int access);
3040extern atomic_long_t num_poisoned_pages __read_mostly;
3041extern int soft_offline_page(unsigned long pfn, int flags);
3042
3043
3044
3045
3046
3047enum mf_result {
3048 MF_IGNORED,
3049 MF_FAILED,
3050 MF_DELAYED,
3051 MF_RECOVERED,
3052};
3053
3054enum mf_action_page_type {
3055 MF_MSG_KERNEL,
3056 MF_MSG_KERNEL_HIGH_ORDER,
3057 MF_MSG_SLAB,
3058 MF_MSG_DIFFERENT_COMPOUND,
3059 MF_MSG_POISONED_HUGE,
3060 MF_MSG_HUGE,
3061 MF_MSG_FREE_HUGE,
3062 MF_MSG_NON_PMD_HUGE,
3063 MF_MSG_UNMAP_FAILED,
3064 MF_MSG_DIRTY_SWAPCACHE,
3065 MF_MSG_CLEAN_SWAPCACHE,
3066 MF_MSG_DIRTY_MLOCKED_LRU,
3067 MF_MSG_CLEAN_MLOCKED_LRU,
3068 MF_MSG_DIRTY_UNEVICTABLE_LRU,
3069 MF_MSG_CLEAN_UNEVICTABLE_LRU,
3070 MF_MSG_DIRTY_LRU,
3071 MF_MSG_CLEAN_LRU,
3072 MF_MSG_TRUNCATED_LRU,
3073 MF_MSG_BUDDY,
3074 MF_MSG_BUDDY_2ND,
3075 MF_MSG_DAX,
3076 MF_MSG_UNSPLIT_THP,
3077 MF_MSG_UNKNOWN,
3078};
3079
3080#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
3081extern void clear_huge_page(struct page *page,
3082 unsigned long addr_hint,
3083 unsigned int pages_per_huge_page);
3084extern void copy_user_huge_page(struct page *dst, struct page *src,
3085 unsigned long addr_hint,
3086 struct vm_area_struct *vma,
3087 unsigned int pages_per_huge_page);
3088extern long copy_huge_page_from_user(struct page *dst_page,
3089 const void __user *usr_src,
3090 unsigned int pages_per_huge_page,
3091 bool allow_pagefault);
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103static inline bool vma_is_special_huge(const struct vm_area_struct *vma)
3104{
3105 return vma_is_dax(vma) || (vma->vm_file &&
3106 (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)));
3107}
3108
3109#endif
3110
3111#ifdef CONFIG_DEBUG_PAGEALLOC
3112extern unsigned int _debug_guardpage_minorder;
3113DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
3114
3115static inline unsigned int debug_guardpage_minorder(void)
3116{
3117 return _debug_guardpage_minorder;
3118}
3119
3120static inline bool debug_guardpage_enabled(void)
3121{
3122 return static_branch_unlikely(&_debug_guardpage_enabled);
3123}
3124
3125static inline bool page_is_guard(struct page *page)
3126{
3127 if (!debug_guardpage_enabled())
3128 return false;
3129
3130 return PageGuard(page);
3131}
3132#else
3133static inline unsigned int debug_guardpage_minorder(void) { return 0; }
3134static inline bool debug_guardpage_enabled(void) { return false; }
3135static inline bool page_is_guard(struct page *page) { return false; }
3136#endif
3137
3138#if MAX_NUMNODES > 1
3139void __init setup_nr_node_ids(void);
3140#else
3141static inline void setup_nr_node_ids(void) {}
3142#endif
3143
3144extern int memcmp_pages(struct page *page1, struct page *page2);
3145
3146static inline int pages_identical(struct page *page1, struct page *page2)
3147{
3148 return !memcmp_pages(page1, page2);
3149}
3150
3151#ifdef CONFIG_MAPPING_DIRTY_HELPERS
3152unsigned long clean_record_shared_mapping_range(struct address_space *mapping,
3153 pgoff_t first_index, pgoff_t nr,
3154 pgoff_t bitmap_pgoff,
3155 unsigned long *bitmap,
3156 pgoff_t *start,
3157 pgoff_t *end);
3158
3159unsigned long wp_shared_mapping_range(struct address_space *mapping,
3160 pgoff_t first_index, pgoff_t nr);
3161#endif
3162
3163extern int sysctl_nr_trim_pages;
3164
3165#endif
3166#endif
3167