1
2#ifndef _LINUX_MM_H
3#define _LINUX_MM_H
4
5#include <linux/errno.h>
6
7#ifdef __KERNEL__
8
9#include <linux/mmdebug.h>
10#include <linux/gfp.h>
11#include <linux/bug.h>
12#include <linux/list.h>
13#include <linux/mmzone.h>
14#include <linux/rbtree.h>
15#include <linux/atomic.h>
16#include <linux/debug_locks.h>
17#include <linux/mm_types.h>
18#include <linux/mmap_lock.h>
19#include <linux/range.h>
20#include <linux/pfn.h>
21#include <linux/percpu-refcount.h>
22#include <linux/bit_spinlock.h>
23#include <linux/shrinker.h>
24#include <linux/resource.h>
25#include <linux/page_ext.h>
26#include <linux/err.h>
27#include <linux/page-flags.h>
28#include <linux/page_ref.h>
29#include <linux/memremap.h>
30#include <linux/overflow.h>
31#include <linux/sizes.h>
32#include <linux/sched.h>
33#include <linux/pgtable.h>
34#include <linux/kasan.h>
35
36struct mempolicy;
37struct anon_vma;
38struct anon_vma_chain;
39struct file_ra_state;
40struct user_struct;
41struct writeback_control;
42struct bdi_writeback;
43struct pt_regs;
44
45extern int sysctl_page_lock_unfairness;
46
47void init_mm_internals(void);
48
49#ifndef CONFIG_NUMA
50extern unsigned long max_mapnr;
51
52static inline void set_max_mapnr(unsigned long limit)
53{
54 max_mapnr = limit;
55}
56#else
57static inline void set_max_mapnr(unsigned long limit) { }
58#endif
59
60extern atomic_long_t _totalram_pages;
61static inline unsigned long totalram_pages(void)
62{
63 return (unsigned long)atomic_long_read(&_totalram_pages);
64}
65
66static inline void totalram_pages_inc(void)
67{
68 atomic_long_inc(&_totalram_pages);
69}
70
71static inline void totalram_pages_dec(void)
72{
73 atomic_long_dec(&_totalram_pages);
74}
75
76static inline void totalram_pages_add(long count)
77{
78 atomic_long_add(count, &_totalram_pages);
79}
80
81extern void * high_memory;
82extern int page_cluster;
83
84#ifdef CONFIG_SYSCTL
85extern int sysctl_legacy_va_layout;
86#else
87#define sysctl_legacy_va_layout 0
88#endif
89
90#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
91extern const int mmap_rnd_bits_min;
92extern const int mmap_rnd_bits_max;
93extern int mmap_rnd_bits __read_mostly;
94#endif
95#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
96extern const int mmap_rnd_compat_bits_min;
97extern const int mmap_rnd_compat_bits_max;
98extern int mmap_rnd_compat_bits __read_mostly;
99#endif
100
101#include <asm/page.h>
102#include <asm/processor.h>
103
104
105
106
107
108
109
110
111#ifndef untagged_addr
112#define untagged_addr(addr) (addr)
113#endif
114
115#ifndef __pa_symbol
116#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
117#endif
118
119#ifndef page_to_virt
120#define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x)))
121#endif
122
123#ifndef lm_alias
124#define lm_alias(x) __va(__pa_symbol(x))
125#endif
126
127
128
129
130
131
132
133
134#ifndef mm_forbids_zeropage
135#define mm_forbids_zeropage(X) (0)
136#endif
137
138
139
140
141
142
143
144#if BITS_PER_LONG == 64
145
146
147
148
149
150
151#define mm_zero_struct_page(pp) __mm_zero_struct_page(pp)
152static inline void __mm_zero_struct_page(struct page *page)
153{
154 unsigned long *_pp = (void *)page;
155
156
157 BUILD_BUG_ON(sizeof(struct page) & 7);
158 BUILD_BUG_ON(sizeof(struct page) < 56);
159 BUILD_BUG_ON(sizeof(struct page) > 80);
160
161 switch (sizeof(struct page)) {
162 case 80:
163 _pp[9] = 0;
164 fallthrough;
165 case 72:
166 _pp[8] = 0;
167 fallthrough;
168 case 64:
169 _pp[7] = 0;
170 fallthrough;
171 case 56:
172 _pp[6] = 0;
173 _pp[5] = 0;
174 _pp[4] = 0;
175 _pp[3] = 0;
176 _pp[2] = 0;
177 _pp[1] = 0;
178 _pp[0] = 0;
179 }
180}
181#else
182#define mm_zero_struct_page(pp) ((void)memset((pp), 0, sizeof(struct page)))
183#endif
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201#define MAPCOUNT_ELF_CORE_MARGIN (5)
202#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
203
204extern int sysctl_max_map_count;
205
206extern unsigned long sysctl_user_reserve_kbytes;
207extern unsigned long sysctl_admin_reserve_kbytes;
208
209extern int sysctl_overcommit_memory;
210extern int sysctl_overcommit_ratio;
211extern unsigned long sysctl_overcommit_kbytes;
212
213int overcommit_ratio_handler(struct ctl_table *, int, void *, size_t *,
214 loff_t *);
215int overcommit_kbytes_handler(struct ctl_table *, int, void *, size_t *,
216 loff_t *);
217int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *,
218 loff_t *);
219
220
221
222
223
224int __add_to_page_cache_locked(struct page *page, struct address_space *mapping,
225 pgoff_t index, gfp_t gfp, void **shadowp);
226
227#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
228#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
229#else
230#define nth_page(page,n) ((page) + (n))
231#endif
232
233
234#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
235
236
237#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
238
239#define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
240
241void setup_initial_init_mm(void *start_code, void *end_code,
242 void *end_data, void *brk);
243
244
245
246
247
248
249
250
251
252
253struct vm_area_struct *vm_area_alloc(struct mm_struct *);
254struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
255void vm_area_free(struct vm_area_struct *);
256
257#ifndef CONFIG_MMU
258extern struct rb_root nommu_region_tree;
259extern struct rw_semaphore nommu_region_sem;
260
261extern unsigned int kobjsize(const void *objp);
262#endif
263
264
265
266
267
268#define VM_NONE 0x00000000
269
270#define VM_READ 0x00000001
271#define VM_WRITE 0x00000002
272#define VM_EXEC 0x00000004
273#define VM_SHARED 0x00000008
274
275
276#define VM_MAYREAD 0x00000010
277#define VM_MAYWRITE 0x00000020
278#define VM_MAYEXEC 0x00000040
279#define VM_MAYSHARE 0x00000080
280
281#define VM_GROWSDOWN 0x00000100
282#define VM_UFFD_MISSING 0x00000200
283#define VM_PFNMAP 0x00000400
284#define VM_UFFD_WP 0x00001000
285
286#define VM_LOCKED 0x00002000
287#define VM_IO 0x00004000
288
289
290#define VM_SEQ_READ 0x00008000
291#define VM_RAND_READ 0x00010000
292
293#define VM_DONTCOPY 0x00020000
294#define VM_DONTEXPAND 0x00040000
295#define VM_LOCKONFAULT 0x00080000
296#define VM_ACCOUNT 0x00100000
297#define VM_NORESERVE 0x00200000
298#define VM_HUGETLB 0x00400000
299#define VM_SYNC 0x00800000
300#define VM_ARCH_1 0x01000000
301#define VM_WIPEONFORK 0x02000000
302#define VM_DONTDUMP 0x04000000
303
304#ifdef CONFIG_MEM_SOFT_DIRTY
305# define VM_SOFTDIRTY 0x08000000
306#else
307# define VM_SOFTDIRTY 0
308#endif
309
310#define VM_MIXEDMAP 0x10000000
311#define VM_HUGEPAGE 0x20000000
312#define VM_NOHUGEPAGE 0x40000000
313#define VM_MERGEABLE 0x80000000
314
315#ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS
316#define VM_HIGH_ARCH_BIT_0 32
317#define VM_HIGH_ARCH_BIT_1 33
318#define VM_HIGH_ARCH_BIT_2 34
319#define VM_HIGH_ARCH_BIT_3 35
320#define VM_HIGH_ARCH_BIT_4 36
321#define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0)
322#define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1)
323#define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2)
324#define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3)
325#define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4)
326#endif
327
328#ifdef CONFIG_ARCH_HAS_PKEYS
329# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0
330# define VM_PKEY_BIT0 VM_HIGH_ARCH_0
331# define VM_PKEY_BIT1 VM_HIGH_ARCH_1
332# define VM_PKEY_BIT2 VM_HIGH_ARCH_2
333# define VM_PKEY_BIT3 VM_HIGH_ARCH_3
334#ifdef CONFIG_PPC
335# define VM_PKEY_BIT4 VM_HIGH_ARCH_4
336#else
337# define VM_PKEY_BIT4 0
338#endif
339#endif
340
341#if defined(CONFIG_X86)
342# define VM_PAT VM_ARCH_1
343#elif defined(CONFIG_PPC)
344# define VM_SAO VM_ARCH_1
345#elif defined(CONFIG_PARISC)
346# define VM_GROWSUP VM_ARCH_1
347#elif defined(CONFIG_IA64)
348# define VM_GROWSUP VM_ARCH_1
349#elif defined(CONFIG_SPARC64)
350# define VM_SPARC_ADI VM_ARCH_1
351# define VM_ARCH_CLEAR VM_SPARC_ADI
352#elif defined(CONFIG_ARM64)
353# define VM_ARM64_BTI VM_ARCH_1
354# define VM_ARCH_CLEAR VM_ARM64_BTI
355#elif !defined(CONFIG_MMU)
356# define VM_MAPPED_COPY VM_ARCH_1
357#endif
358
359#if defined(CONFIG_ARM64_MTE)
360# define VM_MTE VM_HIGH_ARCH_0
361# define VM_MTE_ALLOWED VM_HIGH_ARCH_1
362#else
363# define VM_MTE VM_NONE
364# define VM_MTE_ALLOWED VM_NONE
365#endif
366
367#ifndef VM_GROWSUP
368# define VM_GROWSUP VM_NONE
369#endif
370
371#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
372# define VM_UFFD_MINOR_BIT 37
373# define VM_UFFD_MINOR BIT(VM_UFFD_MINOR_BIT)
374#else
375# define VM_UFFD_MINOR VM_NONE
376#endif
377
378
379#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ)
380
381#define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)
382
383
384#define VM_DATA_FLAGS_TSK_EXEC (VM_READ | VM_WRITE | TASK_EXEC | \
385 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
386#define VM_DATA_FLAGS_NON_EXEC (VM_READ | VM_WRITE | VM_MAYREAD | \
387 VM_MAYWRITE | VM_MAYEXEC)
388#define VM_DATA_FLAGS_EXEC (VM_READ | VM_WRITE | VM_EXEC | \
389 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
390
391#ifndef VM_DATA_DEFAULT_FLAGS
392#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_EXEC
393#endif
394
395#ifndef VM_STACK_DEFAULT_FLAGS
396#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
397#endif
398
399#ifdef CONFIG_STACK_GROWSUP
400#define VM_STACK VM_GROWSUP
401#else
402#define VM_STACK VM_GROWSDOWN
403#endif
404
405#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
406
407
408#define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
409
410
411
412
413
414#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
415
416
417#define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
418
419
420#define VM_INIT_DEF_MASK VM_NOHUGEPAGE
421
422
423#define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT))
424
425
426#ifndef VM_ARCH_CLEAR
427# define VM_ARCH_CLEAR VM_NONE
428#endif
429#define VM_FLAGS_CLEAR (ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR)
430
431
432
433
434
435extern pgprot_t protection_map[16];
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469enum fault_flag {
470 FAULT_FLAG_WRITE = 1 << 0,
471 FAULT_FLAG_MKWRITE = 1 << 1,
472 FAULT_FLAG_ALLOW_RETRY = 1 << 2,
473 FAULT_FLAG_RETRY_NOWAIT = 1 << 3,
474 FAULT_FLAG_KILLABLE = 1 << 4,
475 FAULT_FLAG_TRIED = 1 << 5,
476 FAULT_FLAG_USER = 1 << 6,
477 FAULT_FLAG_REMOTE = 1 << 7,
478 FAULT_FLAG_INSTRUCTION = 1 << 8,
479 FAULT_FLAG_INTERRUPTIBLE = 1 << 9,
480};
481
482
483
484
485
486#define FAULT_FLAG_DEFAULT (FAULT_FLAG_ALLOW_RETRY | \
487 FAULT_FLAG_KILLABLE | \
488 FAULT_FLAG_INTERRUPTIBLE)
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503static inline bool fault_flag_allow_retry_first(enum fault_flag flags)
504{
505 return (flags & FAULT_FLAG_ALLOW_RETRY) &&
506 (!(flags & FAULT_FLAG_TRIED));
507}
508
509#define FAULT_FLAG_TRACE \
510 { FAULT_FLAG_WRITE, "WRITE" }, \
511 { FAULT_FLAG_MKWRITE, "MKWRITE" }, \
512 { FAULT_FLAG_ALLOW_RETRY, "ALLOW_RETRY" }, \
513 { FAULT_FLAG_RETRY_NOWAIT, "RETRY_NOWAIT" }, \
514 { FAULT_FLAG_KILLABLE, "KILLABLE" }, \
515 { FAULT_FLAG_TRIED, "TRIED" }, \
516 { FAULT_FLAG_USER, "USER" }, \
517 { FAULT_FLAG_REMOTE, "REMOTE" }, \
518 { FAULT_FLAG_INSTRUCTION, "INSTRUCTION" }, \
519 { FAULT_FLAG_INTERRUPTIBLE, "INTERRUPTIBLE" }
520
521
522
523
524
525
526
527
528
529
530
531struct vm_fault {
532 const struct {
533 struct vm_area_struct *vma;
534 gfp_t gfp_mask;
535 pgoff_t pgoff;
536 unsigned long address;
537 };
538 enum fault_flag flags;
539
540 pmd_t *pmd;
541
542 pud_t *pud;
543
544
545 union {
546 pte_t orig_pte;
547 pmd_t orig_pmd;
548
549
550 };
551
552 struct page *cow_page;
553 struct page *page;
554
555
556
557
558
559 pte_t *pte;
560
561
562
563 spinlock_t *ptl;
564
565
566
567 pgtable_t prealloc_pte;
568
569
570
571
572
573
574};
575
576
577enum page_entry_size {
578 PE_SIZE_PTE = 0,
579 PE_SIZE_PMD,
580 PE_SIZE_PUD,
581};
582
583
584
585
586
587
588struct vm_operations_struct {
589 void (*open)(struct vm_area_struct * area);
590 void (*close)(struct vm_area_struct * area);
591
592 int (*may_split)(struct vm_area_struct *area, unsigned long addr);
593 int (*mremap)(struct vm_area_struct *area);
594
595
596
597
598
599 int (*mprotect)(struct vm_area_struct *vma, unsigned long start,
600 unsigned long end, unsigned long newflags);
601 vm_fault_t (*fault)(struct vm_fault *vmf);
602 vm_fault_t (*huge_fault)(struct vm_fault *vmf,
603 enum page_entry_size pe_size);
604 vm_fault_t (*map_pages)(struct vm_fault *vmf,
605 pgoff_t start_pgoff, pgoff_t end_pgoff);
606 unsigned long (*pagesize)(struct vm_area_struct * area);
607
608
609
610 vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
611
612
613 vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
614
615
616
617
618
619 int (*access)(struct vm_area_struct *vma, unsigned long addr,
620 void *buf, int len, int write);
621
622
623
624
625 const char *(*name)(struct vm_area_struct *vma);
626
627#ifdef CONFIG_NUMA
628
629
630
631
632
633
634
635 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
636
637
638
639
640
641
642
643
644
645
646
647 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
648 unsigned long addr);
649#endif
650
651
652
653
654
655 struct page *(*find_special_page)(struct vm_area_struct *vma,
656 unsigned long addr);
657};
658
659static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
660{
661 static const struct vm_operations_struct dummy_vm_ops = {};
662
663 memset(vma, 0, sizeof(*vma));
664 vma->vm_mm = mm;
665 vma->vm_ops = &dummy_vm_ops;
666 INIT_LIST_HEAD(&vma->anon_vma_chain);
667}
668
669static inline void vma_set_anonymous(struct vm_area_struct *vma)
670{
671 vma->vm_ops = NULL;
672}
673
674static inline bool vma_is_anonymous(struct vm_area_struct *vma)
675{
676 return !vma->vm_ops;
677}
678
679static inline bool vma_is_temporary_stack(struct vm_area_struct *vma)
680{
681 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
682
683 if (!maybe_stack)
684 return false;
685
686 if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
687 VM_STACK_INCOMPLETE_SETUP)
688 return true;
689
690 return false;
691}
692
693static inline bool vma_is_foreign(struct vm_area_struct *vma)
694{
695 if (!current->mm)
696 return true;
697
698 if (current->mm != vma->vm_mm)
699 return true;
700
701 return false;
702}
703
704static inline bool vma_is_accessible(struct vm_area_struct *vma)
705{
706 return vma->vm_flags & VM_ACCESS_FLAGS;
707}
708
709#ifdef CONFIG_SHMEM
710
711
712
713
714bool vma_is_shmem(struct vm_area_struct *vma);
715#else
716static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
717#endif
718
719int vma_is_stack_for_current(struct vm_area_struct *vma);
720
721
722#define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
723
724struct mmu_gather;
725struct inode;
726
727#include <linux/huge_mm.h>
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745static inline int put_page_testzero(struct page *page)
746{
747 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
748 return page_ref_dec_and_test(page);
749}
750
751
752
753
754
755
756
757static inline int get_page_unless_zero(struct page *page)
758{
759 return page_ref_add_unless(page, 1, 0);
760}
761
762extern int page_is_ram(unsigned long pfn);
763
764enum {
765 REGION_INTERSECTS,
766 REGION_DISJOINT,
767 REGION_MIXED,
768};
769
770int region_intersects(resource_size_t offset, size_t size, unsigned long flags,
771 unsigned long desc);
772
773
774struct page *vmalloc_to_page(const void *addr);
775unsigned long vmalloc_to_pfn(const void *addr);
776
777
778
779
780
781
782
783
784#ifndef is_ioremap_addr
785#define is_ioremap_addr(x) is_vmalloc_addr(x)
786#endif
787
788#ifdef CONFIG_MMU
789extern bool is_vmalloc_addr(const void *x);
790extern int is_vmalloc_or_module_addr(const void *x);
791#else
792static inline bool is_vmalloc_addr(const void *x)
793{
794 return false;
795}
796static inline int is_vmalloc_or_module_addr(const void *x)
797{
798 return 0;
799}
800#endif
801
802extern void *kvmalloc_node(size_t size, gfp_t flags, int node);
803static inline void *kvmalloc(size_t size, gfp_t flags)
804{
805 return kvmalloc_node(size, flags, NUMA_NO_NODE);
806}
807static inline void *kvzalloc_node(size_t size, gfp_t flags, int node)
808{
809 return kvmalloc_node(size, flags | __GFP_ZERO, node);
810}
811static inline void *kvzalloc(size_t size, gfp_t flags)
812{
813 return kvmalloc(size, flags | __GFP_ZERO);
814}
815
816static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
817{
818 size_t bytes;
819
820 if (unlikely(check_mul_overflow(n, size, &bytes)))
821 return NULL;
822
823 return kvmalloc(bytes, flags);
824}
825
826static inline void *kvcalloc(size_t n, size_t size, gfp_t flags)
827{
828 return kvmalloc_array(n, size, flags | __GFP_ZERO);
829}
830
831extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize,
832 gfp_t flags);
833extern void kvfree(const void *addr);
834extern void kvfree_sensitive(const void *addr, size_t len);
835
836static inline int head_compound_mapcount(struct page *head)
837{
838 return atomic_read(compound_mapcount_ptr(head)) + 1;
839}
840
841
842
843
844
845
846static inline int compound_mapcount(struct page *page)
847{
848 VM_BUG_ON_PAGE(!PageCompound(page), page);
849 page = compound_head(page);
850 return head_compound_mapcount(page);
851}
852
853
854
855
856
857
858static inline void page_mapcount_reset(struct page *page)
859{
860 atomic_set(&(page)->_mapcount, -1);
861}
862
863int __page_mapcount(struct page *page);
864
865
866
867
868
869
870
871
872
873static inline int page_mapcount(struct page *page)
874{
875 if (unlikely(PageCompound(page)))
876 return __page_mapcount(page);
877 return atomic_read(&page->_mapcount) + 1;
878}
879
880#ifdef CONFIG_TRANSPARENT_HUGEPAGE
881int total_mapcount(struct page *page);
882int page_trans_huge_mapcount(struct page *page, int *total_mapcount);
883#else
884static inline int total_mapcount(struct page *page)
885{
886 return page_mapcount(page);
887}
888static inline int page_trans_huge_mapcount(struct page *page,
889 int *total_mapcount)
890{
891 int mapcount = page_mapcount(page);
892 if (total_mapcount)
893 *total_mapcount = mapcount;
894 return mapcount;
895}
896#endif
897
898static inline struct page *virt_to_head_page(const void *x)
899{
900 struct page *page = virt_to_page(x);
901
902 return compound_head(page);
903}
904
905void __put_page(struct page *page);
906
907void put_pages_list(struct list_head *pages);
908
909void split_page(struct page *page, unsigned int order);
910void copy_huge_page(struct page *dst, struct page *src);
911
912
913
914
915
916
917typedef void compound_page_dtor(struct page *);
918
919
920enum compound_dtor_id {
921 NULL_COMPOUND_DTOR,
922 COMPOUND_PAGE_DTOR,
923#ifdef CONFIG_HUGETLB_PAGE
924 HUGETLB_PAGE_DTOR,
925#endif
926#ifdef CONFIG_TRANSPARENT_HUGEPAGE
927 TRANSHUGE_PAGE_DTOR,
928#endif
929 NR_COMPOUND_DTORS,
930};
931extern compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS];
932
933static inline void set_compound_page_dtor(struct page *page,
934 enum compound_dtor_id compound_dtor)
935{
936 VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page);
937 page[1].compound_dtor = compound_dtor;
938}
939
940static inline void destroy_compound_page(struct page *page)
941{
942 VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page);
943 compound_page_dtors[page[1].compound_dtor](page);
944}
945
946static inline unsigned int compound_order(struct page *page)
947{
948 if (!PageHead(page))
949 return 0;
950 return page[1].compound_order;
951}
952
953static inline bool hpage_pincount_available(struct page *page)
954{
955
956
957
958
959
960 page = compound_head(page);
961 return PageCompound(page) && compound_order(page) > 1;
962}
963
964static inline int head_compound_pincount(struct page *head)
965{
966 return atomic_read(compound_pincount_ptr(head));
967}
968
969static inline int compound_pincount(struct page *page)
970{
971 VM_BUG_ON_PAGE(!hpage_pincount_available(page), page);
972 page = compound_head(page);
973 return head_compound_pincount(page);
974}
975
976static inline void set_compound_order(struct page *page, unsigned int order)
977{
978 page[1].compound_order = order;
979 page[1].compound_nr = 1U << order;
980}
981
982
983static inline unsigned long compound_nr(struct page *page)
984{
985 if (!PageHead(page))
986 return 1;
987 return page[1].compound_nr;
988}
989
990
991static inline unsigned long page_size(struct page *page)
992{
993 return PAGE_SIZE << compound_order(page);
994}
995
996
997static inline unsigned int page_shift(struct page *page)
998{
999 return PAGE_SHIFT + compound_order(page);
1000}
1001
1002void free_compound_page(struct page *page);
1003
1004#ifdef CONFIG_MMU
1005
1006
1007
1008
1009
1010
1011static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
1012{
1013 if (likely(vma->vm_flags & VM_WRITE))
1014 pte = pte_mkwrite(pte);
1015 return pte;
1016}
1017
1018vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page);
1019void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr);
1020
1021vm_fault_t finish_fault(struct vm_fault *vmf);
1022vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
1023#endif
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
1092#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
1093#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
1094#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
1095#define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH)
1096
1097
1098
1099
1100
1101
1102#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
1103#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
1104#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
1105#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
1106#define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0))
1107
1108
1109#ifdef NODE_NOT_IN_PAGE_FLAGS
1110#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
1111#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \
1112 SECTIONS_PGOFF : ZONES_PGOFF)
1113#else
1114#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
1115#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \
1116 NODES_PGOFF : ZONES_PGOFF)
1117#endif
1118
1119#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
1120
1121#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
1122#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
1123#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
1124#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
1125#define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1)
1126#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
1127
1128static inline enum zone_type page_zonenum(const struct page *page)
1129{
1130 ASSERT_EXCLUSIVE_BITS(page->flags, ZONES_MASK << ZONES_PGSHIFT);
1131 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
1132}
1133
1134#ifdef CONFIG_ZONE_DEVICE
1135static inline bool is_zone_device_page(const struct page *page)
1136{
1137 return page_zonenum(page) == ZONE_DEVICE;
1138}
1139extern void memmap_init_zone_device(struct zone *, unsigned long,
1140 unsigned long, struct dev_pagemap *);
1141#else
1142static inline bool is_zone_device_page(const struct page *page)
1143{
1144 return false;
1145}
1146#endif
1147
1148static inline bool is_zone_movable_page(const struct page *page)
1149{
1150 return page_zonenum(page) == ZONE_MOVABLE;
1151}
1152
1153#ifdef CONFIG_DEV_PAGEMAP_OPS
1154void free_devmap_managed_page(struct page *page);
1155DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
1156
1157static inline bool page_is_devmap_managed(struct page *page)
1158{
1159 if (!static_branch_unlikely(&devmap_managed_key))
1160 return false;
1161 if (!is_zone_device_page(page))
1162 return false;
1163 switch (page->pgmap->type) {
1164 case MEMORY_DEVICE_PRIVATE:
1165 case MEMORY_DEVICE_FS_DAX:
1166 return true;
1167 default:
1168 break;
1169 }
1170 return false;
1171}
1172
1173void put_devmap_managed_page(struct page *page);
1174
1175#else
1176static inline bool page_is_devmap_managed(struct page *page)
1177{
1178 return false;
1179}
1180
1181static inline void put_devmap_managed_page(struct page *page)
1182{
1183}
1184#endif
1185
1186static inline bool is_device_private_page(const struct page *page)
1187{
1188 return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) &&
1189 IS_ENABLED(CONFIG_DEVICE_PRIVATE) &&
1190 is_zone_device_page(page) &&
1191 page->pgmap->type == MEMORY_DEVICE_PRIVATE;
1192}
1193
1194static inline bool is_pci_p2pdma_page(const struct page *page)
1195{
1196 return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) &&
1197 IS_ENABLED(CONFIG_PCI_P2PDMA) &&
1198 is_zone_device_page(page) &&
1199 page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA;
1200}
1201
1202
1203#define page_ref_zero_or_close_to_overflow(page) \
1204 ((unsigned int) page_ref_count(page) + 127u <= 127u)
1205
1206static inline void get_page(struct page *page)
1207{
1208 page = compound_head(page);
1209
1210
1211
1212
1213 VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(page), page);
1214 page_ref_inc(page);
1215}
1216
1217bool __must_check try_grab_page(struct page *page, unsigned int flags);
1218struct page *try_grab_compound_head(struct page *page, int refs,
1219 unsigned int flags);
1220
1221
1222static inline __must_check bool try_get_page(struct page *page)
1223{
1224 page = compound_head(page);
1225 if (WARN_ON_ONCE(page_ref_count(page) <= 0))
1226 return false;
1227 page_ref_inc(page);
1228 return true;
1229}
1230
1231static inline void put_page(struct page *page)
1232{
1233 page = compound_head(page);
1234
1235
1236
1237
1238
1239
1240
1241 if (page_is_devmap_managed(page)) {
1242 put_devmap_managed_page(page);
1243 return;
1244 }
1245
1246 if (put_page_testzero(page))
1247 __put_page(page);
1248}
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281#define GUP_PIN_COUNTING_BIAS (1U << 10)
1282
1283void unpin_user_page(struct page *page);
1284void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
1285 bool make_dirty);
1286void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
1287 bool make_dirty);
1288void unpin_user_pages(struct page **pages, unsigned long npages);
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316static inline bool page_maybe_dma_pinned(struct page *page)
1317{
1318 if (hpage_pincount_available(page))
1319 return compound_pincount(page) > 0;
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329 return ((unsigned int)page_ref_count(compound_head(page))) >=
1330 GUP_PIN_COUNTING_BIAS;
1331}
1332
1333static inline bool is_cow_mapping(vm_flags_t flags)
1334{
1335 return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
1336}
1337
1338
1339
1340
1341
1342static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma,
1343 struct page *page)
1344{
1345 if (!is_cow_mapping(vma->vm_flags))
1346 return false;
1347
1348 if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))
1349 return false;
1350
1351 return page_maybe_dma_pinned(page);
1352}
1353
1354#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
1355#define SECTION_IN_PAGE_FLAGS
1356#endif
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366static inline int page_zone_id(struct page *page)
1367{
1368 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
1369}
1370
1371#ifdef NODE_NOT_IN_PAGE_FLAGS
1372extern int page_to_nid(const struct page *page);
1373#else
1374static inline int page_to_nid(const struct page *page)
1375{
1376 struct page *p = (struct page *)page;
1377
1378 return (PF_POISONED_CHECK(p)->flags >> NODES_PGSHIFT) & NODES_MASK;
1379}
1380#endif
1381
1382#ifdef CONFIG_NUMA_BALANCING
1383static inline int cpu_pid_to_cpupid(int cpu, int pid)
1384{
1385 return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
1386}
1387
1388static inline int cpupid_to_pid(int cpupid)
1389{
1390 return cpupid & LAST__PID_MASK;
1391}
1392
1393static inline int cpupid_to_cpu(int cpupid)
1394{
1395 return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
1396}
1397
1398static inline int cpupid_to_nid(int cpupid)
1399{
1400 return cpu_to_node(cpupid_to_cpu(cpupid));
1401}
1402
1403static inline bool cpupid_pid_unset(int cpupid)
1404{
1405 return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
1406}
1407
1408static inline bool cpupid_cpu_unset(int cpupid)
1409{
1410 return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
1411}
1412
1413static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
1414{
1415 return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
1416}
1417
1418#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
1419#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
1420static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
1421{
1422 return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
1423}
1424
1425static inline int page_cpupid_last(struct page *page)
1426{
1427 return page->_last_cpupid;
1428}
1429static inline void page_cpupid_reset_last(struct page *page)
1430{
1431 page->_last_cpupid = -1 & LAST_CPUPID_MASK;
1432}
1433#else
1434static inline int page_cpupid_last(struct page *page)
1435{
1436 return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
1437}
1438
1439extern int page_cpupid_xchg_last(struct page *page, int cpupid);
1440
1441static inline void page_cpupid_reset_last(struct page *page)
1442{
1443 page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
1444}
1445#endif
1446#else
1447static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
1448{
1449 return page_to_nid(page);
1450}
1451
1452static inline int page_cpupid_last(struct page *page)
1453{
1454 return page_to_nid(page);
1455}
1456
1457static inline int cpupid_to_nid(int cpupid)
1458{
1459 return -1;
1460}
1461
1462static inline int cpupid_to_pid(int cpupid)
1463{
1464 return -1;
1465}
1466
1467static inline int cpupid_to_cpu(int cpupid)
1468{
1469 return -1;
1470}
1471
1472static inline int cpu_pid_to_cpupid(int nid, int pid)
1473{
1474 return -1;
1475}
1476
1477static inline bool cpupid_pid_unset(int cpupid)
1478{
1479 return true;
1480}
1481
1482static inline void page_cpupid_reset_last(struct page *page)
1483{
1484}
1485
1486static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
1487{
1488 return false;
1489}
1490#endif
1491
1492#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
1493
1494
1495
1496
1497
1498
1499
1500static inline u8 page_kasan_tag(const struct page *page)
1501{
1502 u8 tag = 0xff;
1503
1504 if (kasan_enabled()) {
1505 tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
1506 tag ^= 0xff;
1507 }
1508
1509 return tag;
1510}
1511
1512static inline void page_kasan_tag_set(struct page *page, u8 tag)
1513{
1514 if (kasan_enabled()) {
1515 tag ^= 0xff;
1516 page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
1517 page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
1518 }
1519}
1520
1521static inline void page_kasan_tag_reset(struct page *page)
1522{
1523 if (kasan_enabled())
1524 page_kasan_tag_set(page, 0xff);
1525}
1526
1527#else
1528
1529static inline u8 page_kasan_tag(const struct page *page)
1530{
1531 return 0xff;
1532}
1533
1534static inline void page_kasan_tag_set(struct page *page, u8 tag) { }
1535static inline void page_kasan_tag_reset(struct page *page) { }
1536
1537#endif
1538
1539static inline struct zone *page_zone(const struct page *page)
1540{
1541 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
1542}
1543
1544static inline pg_data_t *page_pgdat(const struct page *page)
1545{
1546 return NODE_DATA(page_to_nid(page));
1547}
1548
1549#ifdef SECTION_IN_PAGE_FLAGS
1550static inline void set_page_section(struct page *page, unsigned long section)
1551{
1552 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
1553 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
1554}
1555
1556static inline unsigned long page_to_section(const struct page *page)
1557{
1558 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
1559}
1560#endif
1561
1562
1563#ifdef CONFIG_MIGRATION
1564static inline bool is_pinnable_page(struct page *page)
1565{
1566 return !(is_zone_movable_page(page) || is_migrate_cma_page(page)) ||
1567 is_zero_pfn(page_to_pfn(page));
1568}
1569#else
1570static inline bool is_pinnable_page(struct page *page)
1571{
1572 return true;
1573}
1574#endif
1575
1576static inline void set_page_zone(struct page *page, enum zone_type zone)
1577{
1578 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
1579 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
1580}
1581
1582static inline void set_page_node(struct page *page, unsigned long node)
1583{
1584 page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
1585 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
1586}
1587
1588static inline void set_page_links(struct page *page, enum zone_type zone,
1589 unsigned long node, unsigned long pfn)
1590{
1591 set_page_zone(page, zone);
1592 set_page_node(page, node);
1593#ifdef SECTION_IN_PAGE_FLAGS
1594 set_page_section(page, pfn_to_section_nr(pfn));
1595#endif
1596}
1597
1598
1599
1600
1601#include <linux/vmstat.h>
1602
1603static __always_inline void *lowmem_page_address(const struct page *page)
1604{
1605 return page_to_virt(page);
1606}
1607
1608#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
1609#define HASHED_PAGE_VIRTUAL
1610#endif
1611
1612#if defined(WANT_PAGE_VIRTUAL)
1613static inline void *page_address(const struct page *page)
1614{
1615 return page->virtual;
1616}
1617static inline void set_page_address(struct page *page, void *address)
1618{
1619 page->virtual = address;
1620}
1621#define page_address_init() do { } while(0)
1622#endif
1623
1624#if defined(HASHED_PAGE_VIRTUAL)
1625void *page_address(const struct page *page);
1626void set_page_address(struct page *page, void *virtual);
1627void page_address_init(void);
1628#endif
1629
1630#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
1631#define page_address(page) lowmem_page_address(page)
1632#define set_page_address(page, address) do { } while(0)
1633#define page_address_init() do { } while(0)
1634#endif
1635
1636extern void *page_rmapping(struct page *page);
1637extern struct anon_vma *page_anon_vma(struct page *page);
1638extern struct address_space *page_mapping(struct page *page);
1639
1640extern struct address_space *__page_file_mapping(struct page *);
1641
1642static inline
1643struct address_space *page_file_mapping(struct page *page)
1644{
1645 if (unlikely(PageSwapCache(page)))
1646 return __page_file_mapping(page);
1647
1648 return page->mapping;
1649}
1650
1651extern pgoff_t __page_file_index(struct page *page);
1652
1653
1654
1655
1656
1657static inline pgoff_t page_index(struct page *page)
1658{
1659 if (unlikely(PageSwapCache(page)))
1660 return __page_file_index(page);
1661 return page->index;
1662}
1663
1664bool page_mapped(struct page *page);
1665struct address_space *page_mapping(struct page *page);
1666
1667
1668
1669
1670
1671
1672static inline bool page_is_pfmemalloc(const struct page *page)
1673{
1674
1675
1676
1677
1678
1679 return (uintptr_t)page->lru.next & BIT(1);
1680}
1681
1682
1683
1684
1685
1686static inline void set_page_pfmemalloc(struct page *page)
1687{
1688 page->lru.next = (void *)BIT(1);
1689}
1690
1691static inline void clear_page_pfmemalloc(struct page *page)
1692{
1693 page->lru.next = NULL;
1694}
1695
1696
1697
1698
1699extern void pagefault_out_of_memory(void);
1700
1701#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
1702#define offset_in_thp(page, p) ((unsigned long)(p) & (thp_size(page) - 1))
1703
1704
1705
1706
1707
1708#define SHOW_MEM_FILTER_NODES (0x0001u)
1709
1710extern void show_free_areas(unsigned int flags, nodemask_t *nodemask);
1711
1712#ifdef CONFIG_MMU
1713extern bool can_do_mlock(void);
1714#else
1715static inline bool can_do_mlock(void) { return false; }
1716#endif
1717extern int user_shm_lock(size_t, struct ucounts *);
1718extern void user_shm_unlock(size_t, struct ucounts *);
1719
1720
1721
1722
1723struct zap_details {
1724 struct address_space *check_mapping;
1725 pgoff_t first_index;
1726 pgoff_t last_index;
1727 struct page *single_page;
1728};
1729
1730struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
1731 pte_t pte);
1732struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
1733 pmd_t pmd);
1734
1735void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1736 unsigned long size);
1737void zap_page_range(struct vm_area_struct *vma, unsigned long address,
1738 unsigned long size);
1739void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
1740 unsigned long start, unsigned long end);
1741
1742struct mmu_notifier_range;
1743
1744void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
1745 unsigned long end, unsigned long floor, unsigned long ceiling);
1746int
1747copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
1748int follow_invalidate_pte(struct mm_struct *mm, unsigned long address,
1749 struct mmu_notifier_range *range, pte_t **ptepp,
1750 pmd_t **pmdpp, spinlock_t **ptlp);
1751int follow_pte(struct mm_struct *mm, unsigned long address,
1752 pte_t **ptepp, spinlock_t **ptlp);
1753int follow_pfn(struct vm_area_struct *vma, unsigned long address,
1754 unsigned long *pfn);
1755int follow_phys(struct vm_area_struct *vma, unsigned long address,
1756 unsigned int flags, unsigned long *prot, resource_size_t *phys);
1757int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
1758 void *buf, int len, int write);
1759
1760extern void truncate_pagecache(struct inode *inode, loff_t new);
1761extern void truncate_setsize(struct inode *inode, loff_t newsize);
1762void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
1763void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
1764int truncate_inode_page(struct address_space *mapping, struct page *page);
1765int generic_error_remove_page(struct address_space *mapping, struct page *page);
1766int invalidate_inode_page(struct page *page);
1767
1768#ifdef CONFIG_MMU
1769extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
1770 unsigned long address, unsigned int flags,
1771 struct pt_regs *regs);
1772extern int fixup_user_fault(struct mm_struct *mm,
1773 unsigned long address, unsigned int fault_flags,
1774 bool *unlocked);
1775void unmap_mapping_page(struct page *page);
1776void unmap_mapping_pages(struct address_space *mapping,
1777 pgoff_t start, pgoff_t nr, bool even_cows);
1778void unmap_mapping_range(struct address_space *mapping,
1779 loff_t const holebegin, loff_t const holelen, int even_cows);
1780#else
1781static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
1782 unsigned long address, unsigned int flags,
1783 struct pt_regs *regs)
1784{
1785
1786 BUG();
1787 return VM_FAULT_SIGBUS;
1788}
1789static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address,
1790 unsigned int fault_flags, bool *unlocked)
1791{
1792
1793 BUG();
1794 return -EFAULT;
1795}
1796static inline void unmap_mapping_page(struct page *page) { }
1797static inline void unmap_mapping_pages(struct address_space *mapping,
1798 pgoff_t start, pgoff_t nr, bool even_cows) { }
1799static inline void unmap_mapping_range(struct address_space *mapping,
1800 loff_t const holebegin, loff_t const holelen, int even_cows) { }
1801#endif
1802
1803static inline void unmap_shared_mapping_range(struct address_space *mapping,
1804 loff_t const holebegin, loff_t const holelen)
1805{
1806 unmap_mapping_range(mapping, holebegin, holelen, 0);
1807}
1808
1809extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
1810 void *buf, int len, unsigned int gup_flags);
1811extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1812 void *buf, int len, unsigned int gup_flags);
1813extern int __access_remote_vm(struct mm_struct *mm, unsigned long addr,
1814 void *buf, int len, unsigned int gup_flags);
1815
1816long get_user_pages_remote(struct mm_struct *mm,
1817 unsigned long start, unsigned long nr_pages,
1818 unsigned int gup_flags, struct page **pages,
1819 struct vm_area_struct **vmas, int *locked);
1820long pin_user_pages_remote(struct mm_struct *mm,
1821 unsigned long start, unsigned long nr_pages,
1822 unsigned int gup_flags, struct page **pages,
1823 struct vm_area_struct **vmas, int *locked);
1824long get_user_pages(unsigned long start, unsigned long nr_pages,
1825 unsigned int gup_flags, struct page **pages,
1826 struct vm_area_struct **vmas);
1827long pin_user_pages(unsigned long start, unsigned long nr_pages,
1828 unsigned int gup_flags, struct page **pages,
1829 struct vm_area_struct **vmas);
1830long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
1831 unsigned int gup_flags, struct page **pages, int *locked);
1832long pin_user_pages_locked(unsigned long start, unsigned long nr_pages,
1833 unsigned int gup_flags, struct page **pages, int *locked);
1834long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
1835 struct page **pages, unsigned int gup_flags);
1836long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
1837 struct page **pages, unsigned int gup_flags);
1838
1839int get_user_pages_fast(unsigned long start, int nr_pages,
1840 unsigned int gup_flags, struct page **pages);
1841int pin_user_pages_fast(unsigned long start, int nr_pages,
1842 unsigned int gup_flags, struct page **pages);
1843
1844int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
1845int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
1846 struct task_struct *task, bool bypass_rlim);
1847
1848struct kvec;
1849int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
1850 struct page **pages);
1851struct page *get_dump_page(unsigned long addr);
1852
1853extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
1854extern void do_invalidatepage(struct page *page, unsigned int offset,
1855 unsigned int length);
1856
1857int redirty_page_for_writepage(struct writeback_control *wbc,
1858 struct page *page);
1859void account_page_cleaned(struct page *page, struct address_space *mapping,
1860 struct bdi_writeback *wb);
1861int set_page_dirty(struct page *page);
1862int set_page_dirty_lock(struct page *page);
1863void __cancel_dirty_page(struct page *page);
1864static inline void cancel_dirty_page(struct page *page)
1865{
1866
1867 if (PageDirty(page))
1868 __cancel_dirty_page(page);
1869}
1870int clear_page_dirty_for_io(struct page *page);
1871
1872int get_cmdline(struct task_struct *task, char *buffer, int buflen);
1873
1874extern unsigned long move_page_tables(struct vm_area_struct *vma,
1875 unsigned long old_addr, struct vm_area_struct *new_vma,
1876 unsigned long new_addr, unsigned long len,
1877 bool need_rmap_locks);
1878
1879
1880
1881
1882
1883
1884
1885
1886#define MM_CP_DIRTY_ACCT (1UL << 0)
1887
1888#define MM_CP_PROT_NUMA (1UL << 1)
1889
1890#define MM_CP_UFFD_WP (1UL << 2)
1891#define MM_CP_UFFD_WP_RESOLVE (1UL << 3)
1892#define MM_CP_UFFD_WP_ALL (MM_CP_UFFD_WP | \
1893 MM_CP_UFFD_WP_RESOLVE)
1894
1895extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
1896 unsigned long end, pgprot_t newprot,
1897 unsigned long cp_flags);
1898extern int mprotect_fixup(struct vm_area_struct *vma,
1899 struct vm_area_struct **pprev, unsigned long start,
1900 unsigned long end, unsigned long newflags);
1901
1902
1903
1904
1905int get_user_pages_fast_only(unsigned long start, int nr_pages,
1906 unsigned int gup_flags, struct page **pages);
1907int pin_user_pages_fast_only(unsigned long start, int nr_pages,
1908 unsigned int gup_flags, struct page **pages);
1909
1910static inline bool get_user_page_fast_only(unsigned long addr,
1911 unsigned int gup_flags, struct page **pagep)
1912{
1913 return get_user_pages_fast_only(addr, 1, gup_flags, pagep) == 1;
1914}
1915
1916
1917
1918static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
1919{
1920 long val = atomic_long_read(&mm->rss_stat.count[member]);
1921
1922#ifdef SPLIT_RSS_COUNTING
1923
1924
1925
1926
1927 if (val < 0)
1928 val = 0;
1929#endif
1930 return (unsigned long)val;
1931}
1932
1933void mm_trace_rss_stat(struct mm_struct *mm, int member, long count);
1934
1935static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1936{
1937 long count = atomic_long_add_return(value, &mm->rss_stat.count[member]);
1938
1939 mm_trace_rss_stat(mm, member, count);
1940}
1941
1942static inline void inc_mm_counter(struct mm_struct *mm, int member)
1943{
1944 long count = atomic_long_inc_return(&mm->rss_stat.count[member]);
1945
1946 mm_trace_rss_stat(mm, member, count);
1947}
1948
1949static inline void dec_mm_counter(struct mm_struct *mm, int member)
1950{
1951 long count = atomic_long_dec_return(&mm->rss_stat.count[member]);
1952
1953 mm_trace_rss_stat(mm, member, count);
1954}
1955
1956
1957static inline int mm_counter_file(struct page *page)
1958{
1959 if (PageSwapBacked(page))
1960 return MM_SHMEMPAGES;
1961 return MM_FILEPAGES;
1962}
1963
1964static inline int mm_counter(struct page *page)
1965{
1966 if (PageAnon(page))
1967 return MM_ANONPAGES;
1968 return mm_counter_file(page);
1969}
1970
1971static inline unsigned long get_mm_rss(struct mm_struct *mm)
1972{
1973 return get_mm_counter(mm, MM_FILEPAGES) +
1974 get_mm_counter(mm, MM_ANONPAGES) +
1975 get_mm_counter(mm, MM_SHMEMPAGES);
1976}
1977
1978static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
1979{
1980 return max(mm->hiwater_rss, get_mm_rss(mm));
1981}
1982
1983static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
1984{
1985 return max(mm->hiwater_vm, mm->total_vm);
1986}
1987
1988static inline void update_hiwater_rss(struct mm_struct *mm)
1989{
1990 unsigned long _rss = get_mm_rss(mm);
1991
1992 if ((mm)->hiwater_rss < _rss)
1993 (mm)->hiwater_rss = _rss;
1994}
1995
1996static inline void update_hiwater_vm(struct mm_struct *mm)
1997{
1998 if (mm->hiwater_vm < mm->total_vm)
1999 mm->hiwater_vm = mm->total_vm;
2000}
2001
2002static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
2003{
2004 mm->hiwater_rss = get_mm_rss(mm);
2005}
2006
2007static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
2008 struct mm_struct *mm)
2009{
2010 unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
2011
2012 if (*maxrss < hiwater_rss)
2013 *maxrss = hiwater_rss;
2014}
2015
2016#if defined(SPLIT_RSS_COUNTING)
2017void sync_mm_rss(struct mm_struct *mm);
2018#else
2019static inline void sync_mm_rss(struct mm_struct *mm)
2020{
2021}
2022#endif
2023
2024#ifndef CONFIG_ARCH_HAS_PTE_SPECIAL
2025static inline int pte_special(pte_t pte)
2026{
2027 return 0;
2028}
2029
2030static inline pte_t pte_mkspecial(pte_t pte)
2031{
2032 return pte;
2033}
2034#endif
2035
2036#ifndef CONFIG_ARCH_HAS_PTE_DEVMAP
2037static inline int pte_devmap(pte_t pte)
2038{
2039 return 0;
2040}
2041#endif
2042
2043int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
2044
2045extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
2046 spinlock_t **ptl);
2047static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
2048 spinlock_t **ptl)
2049{
2050 pte_t *ptep;
2051 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
2052 return ptep;
2053}
2054
2055#ifdef __PAGETABLE_P4D_FOLDED
2056static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
2057 unsigned long address)
2058{
2059 return 0;
2060}
2061#else
2062int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
2063#endif
2064
2065#if defined(__PAGETABLE_PUD_FOLDED) || !defined(CONFIG_MMU)
2066static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d,
2067 unsigned long address)
2068{
2069 return 0;
2070}
2071static inline void mm_inc_nr_puds(struct mm_struct *mm) {}
2072static inline void mm_dec_nr_puds(struct mm_struct *mm) {}
2073
2074#else
2075int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
2076
2077static inline void mm_inc_nr_puds(struct mm_struct *mm)
2078{
2079 if (mm_pud_folded(mm))
2080 return;
2081 atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
2082}
2083
2084static inline void mm_dec_nr_puds(struct mm_struct *mm)
2085{
2086 if (mm_pud_folded(mm))
2087 return;
2088 atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
2089}
2090#endif
2091
2092#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
2093static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
2094 unsigned long address)
2095{
2096 return 0;
2097}
2098
2099static inline void mm_inc_nr_pmds(struct mm_struct *mm) {}
2100static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
2101
2102#else
2103int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
2104
2105static inline void mm_inc_nr_pmds(struct mm_struct *mm)
2106{
2107 if (mm_pmd_folded(mm))
2108 return;
2109 atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
2110}
2111
2112static inline void mm_dec_nr_pmds(struct mm_struct *mm)
2113{
2114 if (mm_pmd_folded(mm))
2115 return;
2116 atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
2117}
2118#endif
2119
2120#ifdef CONFIG_MMU
2121static inline void mm_pgtables_bytes_init(struct mm_struct *mm)
2122{
2123 atomic_long_set(&mm->pgtables_bytes, 0);
2124}
2125
2126static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
2127{
2128 return atomic_long_read(&mm->pgtables_bytes);
2129}
2130
2131static inline void mm_inc_nr_ptes(struct mm_struct *mm)
2132{
2133 atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
2134}
2135
2136static inline void mm_dec_nr_ptes(struct mm_struct *mm)
2137{
2138 atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
2139}
2140#else
2141
2142static inline void mm_pgtables_bytes_init(struct mm_struct *mm) {}
2143static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
2144{
2145 return 0;
2146}
2147
2148static inline void mm_inc_nr_ptes(struct mm_struct *mm) {}
2149static inline void mm_dec_nr_ptes(struct mm_struct *mm) {}
2150#endif
2151
2152int __pte_alloc(struct mm_struct *mm, pmd_t *pmd);
2153int __pte_alloc_kernel(pmd_t *pmd);
2154
2155#if defined(CONFIG_MMU)
2156
2157static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
2158 unsigned long address)
2159{
2160 return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ?
2161 NULL : p4d_offset(pgd, address);
2162}
2163
2164static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
2165 unsigned long address)
2166{
2167 return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ?
2168 NULL : pud_offset(p4d, address);
2169}
2170
2171static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
2172{
2173 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
2174 NULL: pmd_offset(pud, address);
2175}
2176#endif
2177
2178#if USE_SPLIT_PTE_PTLOCKS
2179#if ALLOC_SPLIT_PTLOCKS
2180void __init ptlock_cache_init(void);
2181extern bool ptlock_alloc(struct page *page);
2182extern void ptlock_free(struct page *page);
2183
2184static inline spinlock_t *ptlock_ptr(struct page *page)
2185{
2186 return page->ptl;
2187}
2188#else
2189static inline void ptlock_cache_init(void)
2190{
2191}
2192
2193static inline bool ptlock_alloc(struct page *page)
2194{
2195 return true;
2196}
2197
2198static inline void ptlock_free(struct page *page)
2199{
2200}
2201
2202static inline spinlock_t *ptlock_ptr(struct page *page)
2203{
2204 return &page->ptl;
2205}
2206#endif
2207
2208static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
2209{
2210 return ptlock_ptr(pmd_page(*pmd));
2211}
2212
2213static inline bool ptlock_init(struct page *page)
2214{
2215
2216
2217
2218
2219
2220
2221
2222 VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
2223 if (!ptlock_alloc(page))
2224 return false;
2225 spin_lock_init(ptlock_ptr(page));
2226 return true;
2227}
2228
2229#else
2230
2231
2232
2233static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
2234{
2235 return &mm->page_table_lock;
2236}
2237static inline void ptlock_cache_init(void) {}
2238static inline bool ptlock_init(struct page *page) { return true; }
2239static inline void ptlock_free(struct page *page) {}
2240#endif
2241
2242static inline void pgtable_init(void)
2243{
2244 ptlock_cache_init();
2245 pgtable_cache_init();
2246}
2247
2248static inline bool pgtable_pte_page_ctor(struct page *page)
2249{
2250 if (!ptlock_init(page))
2251 return false;
2252 __SetPageTable(page);
2253 inc_lruvec_page_state(page, NR_PAGETABLE);
2254 return true;
2255}
2256
2257static inline void pgtable_pte_page_dtor(struct page *page)
2258{
2259 ptlock_free(page);
2260 __ClearPageTable(page);
2261 dec_lruvec_page_state(page, NR_PAGETABLE);
2262}
2263
2264#define pte_offset_map_lock(mm, pmd, address, ptlp) \
2265({ \
2266 spinlock_t *__ptl = pte_lockptr(mm, pmd); \
2267 pte_t *__pte = pte_offset_map(pmd, address); \
2268 *(ptlp) = __ptl; \
2269 spin_lock(__ptl); \
2270 __pte; \
2271})
2272
2273#define pte_unmap_unlock(pte, ptl) do { \
2274 spin_unlock(ptl); \
2275 pte_unmap(pte); \
2276} while (0)
2277
2278#define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd))
2279
2280#define pte_alloc_map(mm, pmd, address) \
2281 (pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address))
2282
2283#define pte_alloc_map_lock(mm, pmd, address, ptlp) \
2284 (pte_alloc(mm, pmd) ? \
2285 NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
2286
2287#define pte_alloc_kernel(pmd, address) \
2288 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \
2289 NULL: pte_offset_kernel(pmd, address))
2290
2291#if USE_SPLIT_PMD_PTLOCKS
2292
2293static struct page *pmd_to_page(pmd_t *pmd)
2294{
2295 unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
2296 return virt_to_page((void *)((unsigned long) pmd & mask));
2297}
2298
2299static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
2300{
2301 return ptlock_ptr(pmd_to_page(pmd));
2302}
2303
2304static inline bool pmd_ptlock_init(struct page *page)
2305{
2306#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2307 page->pmd_huge_pte = NULL;
2308#endif
2309 return ptlock_init(page);
2310}
2311
2312static inline void pmd_ptlock_free(struct page *page)
2313{
2314#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2315 VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
2316#endif
2317 ptlock_free(page);
2318}
2319
2320#define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte)
2321
2322#else
2323
2324static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
2325{
2326 return &mm->page_table_lock;
2327}
2328
2329static inline bool pmd_ptlock_init(struct page *page) { return true; }
2330static inline void pmd_ptlock_free(struct page *page) {}
2331
2332#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
2333
2334#endif
2335
2336static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
2337{
2338 spinlock_t *ptl = pmd_lockptr(mm, pmd);
2339 spin_lock(ptl);
2340 return ptl;
2341}
2342
2343static inline bool pgtable_pmd_page_ctor(struct page *page)
2344{
2345 if (!pmd_ptlock_init(page))
2346 return false;
2347 __SetPageTable(page);
2348 inc_lruvec_page_state(page, NR_PAGETABLE);
2349 return true;
2350}
2351
2352static inline void pgtable_pmd_page_dtor(struct page *page)
2353{
2354 pmd_ptlock_free(page);
2355 __ClearPageTable(page);
2356 dec_lruvec_page_state(page, NR_PAGETABLE);
2357}
2358
2359
2360
2361
2362
2363
2364
2365static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud)
2366{
2367 return &mm->page_table_lock;
2368}
2369
2370static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
2371{
2372 spinlock_t *ptl = pud_lockptr(mm, pud);
2373
2374 spin_lock(ptl);
2375 return ptl;
2376}
2377
2378extern void __init pagecache_init(void);
2379extern void __init free_area_init_memoryless_node(int nid);
2380extern void free_initmem(void);
2381
2382
2383
2384
2385
2386
2387
2388extern unsigned long free_reserved_area(void *start, void *end,
2389 int poison, const char *s);
2390
2391extern void adjust_managed_page_count(struct page *page, long count);
2392extern void mem_init_print_info(void);
2393
2394extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end);
2395
2396
2397static inline void free_reserved_page(struct page *page)
2398{
2399 ClearPageReserved(page);
2400 init_page_count(page);
2401 __free_page(page);
2402 adjust_managed_page_count(page, 1);
2403}
2404#define free_highmem_page(page) free_reserved_page(page)
2405
2406static inline void mark_page_reserved(struct page *page)
2407{
2408 SetPageReserved(page);
2409 adjust_managed_page_count(page, -1);
2410}
2411
2412
2413
2414
2415
2416
2417
2418static inline unsigned long free_initmem_default(int poison)
2419{
2420 extern char __init_begin[], __init_end[];
2421
2422 return free_reserved_area(&__init_begin, &__init_end,
2423 poison, "unused kernel image (initmem)");
2424}
2425
2426static inline unsigned long get_num_physpages(void)
2427{
2428 int nid;
2429 unsigned long phys_pages = 0;
2430
2431 for_each_online_node(nid)
2432 phys_pages += node_present_pages(nid);
2433
2434 return phys_pages;
2435}
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453void free_area_init(unsigned long *max_zone_pfn);
2454unsigned long node_map_pfn_alignment(void);
2455unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
2456 unsigned long end_pfn);
2457extern unsigned long absent_pages_in_range(unsigned long start_pfn,
2458 unsigned long end_pfn);
2459extern void get_pfn_range_for_nid(unsigned int nid,
2460 unsigned long *start_pfn, unsigned long *end_pfn);
2461extern unsigned long find_min_pfn_with_active_regions(void);
2462
2463#ifndef CONFIG_NUMA
2464static inline int early_pfn_to_nid(unsigned long pfn)
2465{
2466 return 0;
2467}
2468#else
2469
2470extern int __meminit early_pfn_to_nid(unsigned long pfn);
2471#endif
2472
2473extern void set_dma_reserve(unsigned long new_dma_reserve);
2474extern void memmap_init_range(unsigned long, int, unsigned long,
2475 unsigned long, unsigned long, enum meminit_context,
2476 struct vmem_altmap *, int migratetype);
2477extern void setup_per_zone_wmarks(void);
2478extern int __meminit init_per_zone_wmark_min(void);
2479extern void mem_init(void);
2480extern void __init mmap_init(void);
2481extern void show_mem(unsigned int flags, nodemask_t *nodemask);
2482extern long si_mem_available(void);
2483extern void si_meminfo(struct sysinfo * val);
2484extern void si_meminfo_node(struct sysinfo *val, int nid);
2485#ifdef __HAVE_ARCH_RESERVED_KERNEL_PAGES
2486extern unsigned long arch_reserved_kernel_pages(void);
2487#endif
2488
2489extern __printf(3, 4)
2490void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);
2491
2492extern void setup_per_cpu_pageset(void);
2493
2494
2495extern int min_free_kbytes;
2496extern int watermark_boost_factor;
2497extern int watermark_scale_factor;
2498extern bool arch_has_descending_max_zone_pfns(void);
2499
2500
2501extern atomic_long_t mmap_pages_allocated;
2502extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
2503
2504
2505void vma_interval_tree_insert(struct vm_area_struct *node,
2506 struct rb_root_cached *root);
2507void vma_interval_tree_insert_after(struct vm_area_struct *node,
2508 struct vm_area_struct *prev,
2509 struct rb_root_cached *root);
2510void vma_interval_tree_remove(struct vm_area_struct *node,
2511 struct rb_root_cached *root);
2512struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root,
2513 unsigned long start, unsigned long last);
2514struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
2515 unsigned long start, unsigned long last);
2516
2517#define vma_interval_tree_foreach(vma, root, start, last) \
2518 for (vma = vma_interval_tree_iter_first(root, start, last); \
2519 vma; vma = vma_interval_tree_iter_next(vma, start, last))
2520
2521void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
2522 struct rb_root_cached *root);
2523void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
2524 struct rb_root_cached *root);
2525struct anon_vma_chain *
2526anon_vma_interval_tree_iter_first(struct rb_root_cached *root,
2527 unsigned long start, unsigned long last);
2528struct anon_vma_chain *anon_vma_interval_tree_iter_next(
2529 struct anon_vma_chain *node, unsigned long start, unsigned long last);
2530#ifdef CONFIG_DEBUG_VM_RB
2531void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
2532#endif
2533
2534#define anon_vma_interval_tree_foreach(avc, root, start, last) \
2535 for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
2536 avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
2537
2538
2539extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
2540extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
2541 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
2542 struct vm_area_struct *expand);
2543static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
2544 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
2545{
2546 return __vma_adjust(vma, start, end, pgoff, insert, NULL);
2547}
2548extern struct vm_area_struct *vma_merge(struct mm_struct *,
2549 struct vm_area_struct *prev, unsigned long addr, unsigned long end,
2550 unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
2551 struct mempolicy *, struct vm_userfaultfd_ctx);
2552extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
2553extern int __split_vma(struct mm_struct *, struct vm_area_struct *,
2554 unsigned long addr, int new_below);
2555extern int split_vma(struct mm_struct *, struct vm_area_struct *,
2556 unsigned long addr, int new_below);
2557extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
2558extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
2559 struct rb_node **, struct rb_node *);
2560extern void unlink_file_vma(struct vm_area_struct *);
2561extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
2562 unsigned long addr, unsigned long len, pgoff_t pgoff,
2563 bool *need_rmap_locks);
2564extern void exit_mmap(struct mm_struct *);
2565
2566static inline int check_data_rlimit(unsigned long rlim,
2567 unsigned long new,
2568 unsigned long start,
2569 unsigned long end_data,
2570 unsigned long start_data)
2571{
2572 if (rlim < RLIM_INFINITY) {
2573 if (((new - start) + (end_data - start_data)) > rlim)
2574 return -ENOSPC;
2575 }
2576
2577 return 0;
2578}
2579
2580extern int mm_take_all_locks(struct mm_struct *mm);
2581extern void mm_drop_all_locks(struct mm_struct *mm);
2582
2583extern int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
2584extern int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
2585extern struct file *get_mm_exe_file(struct mm_struct *mm);
2586extern struct file *get_task_exe_file(struct task_struct *task);
2587
2588extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
2589extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
2590
2591extern bool vma_is_special_mapping(const struct vm_area_struct *vma,
2592 const struct vm_special_mapping *sm);
2593extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
2594 unsigned long addr, unsigned long len,
2595 unsigned long flags,
2596 const struct vm_special_mapping *spec);
2597
2598extern int install_special_mapping(struct mm_struct *mm,
2599 unsigned long addr, unsigned long len,
2600 unsigned long flags, struct page **pages);
2601
2602unsigned long randomize_stack_top(unsigned long stack_top);
2603
2604extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
2605
2606extern unsigned long mmap_region(struct file *file, unsigned long addr,
2607 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
2608 struct list_head *uf);
2609extern unsigned long do_mmap(struct file *file, unsigned long addr,
2610 unsigned long len, unsigned long prot, unsigned long flags,
2611 unsigned long pgoff, unsigned long *populate, struct list_head *uf);
2612extern int __do_munmap(struct mm_struct *, unsigned long, size_t,
2613 struct list_head *uf, bool downgrade);
2614extern int do_munmap(struct mm_struct *, unsigned long, size_t,
2615 struct list_head *uf);
2616extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);
2617
2618#ifdef CONFIG_MMU
2619extern int __mm_populate(unsigned long addr, unsigned long len,
2620 int ignore_errors);
2621static inline void mm_populate(unsigned long addr, unsigned long len)
2622{
2623
2624 (void) __mm_populate(addr, len, 1);
2625}
2626#else
2627static inline void mm_populate(unsigned long addr, unsigned long len) {}
2628#endif
2629
2630
2631extern int __must_check vm_brk(unsigned long, unsigned long);
2632extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long);
2633extern int vm_munmap(unsigned long, size_t);
2634extern unsigned long __must_check vm_mmap(struct file *, unsigned long,
2635 unsigned long, unsigned long,
2636 unsigned long, unsigned long);
2637
2638struct vm_unmapped_area_info {
2639#define VM_UNMAPPED_AREA_TOPDOWN 1
2640 unsigned long flags;
2641 unsigned long length;
2642 unsigned long low_limit;
2643 unsigned long high_limit;
2644 unsigned long align_mask;
2645 unsigned long align_offset;
2646};
2647
2648extern unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info);
2649
2650
2651extern void truncate_inode_pages(struct address_space *, loff_t);
2652extern void truncate_inode_pages_range(struct address_space *,
2653 loff_t lstart, loff_t lend);
2654extern void truncate_inode_pages_final(struct address_space *);
2655
2656
2657extern vm_fault_t filemap_fault(struct vm_fault *vmf);
2658extern vm_fault_t filemap_map_pages(struct vm_fault *vmf,
2659 pgoff_t start_pgoff, pgoff_t end_pgoff);
2660extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
2661
2662
2663int __must_check write_one_page(struct page *page);
2664void task_dirty_inc(struct task_struct *tsk);
2665
2666extern unsigned long stack_guard_gap;
2667
2668extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
2669
2670
2671extern int expand_downwards(struct vm_area_struct *vma,
2672 unsigned long address);
2673#if VM_GROWSUP
2674extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
2675#else
2676 #define expand_upwards(vma, address) (0)
2677#endif
2678
2679
2680extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
2681extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
2682 struct vm_area_struct **pprev);
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693static inline
2694struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
2695 unsigned long start_addr,
2696 unsigned long end_addr)
2697{
2698 struct vm_area_struct *vma = find_vma(mm, start_addr);
2699
2700 if (vma && end_addr <= vma->vm_start)
2701 vma = NULL;
2702 return vma;
2703}
2704
2705
2706
2707
2708
2709
2710
2711
2712static inline
2713struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
2714{
2715 struct vm_area_struct *vma = find_vma(mm, addr);
2716
2717 if (vma && addr < vma->vm_start)
2718 vma = NULL;
2719
2720 return vma;
2721}
2722
2723static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
2724{
2725 unsigned long vm_start = vma->vm_start;
2726
2727 if (vma->vm_flags & VM_GROWSDOWN) {
2728 vm_start -= stack_guard_gap;
2729 if (vm_start > vma->vm_start)
2730 vm_start = 0;
2731 }
2732 return vm_start;
2733}
2734
2735static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
2736{
2737 unsigned long vm_end = vma->vm_end;
2738
2739 if (vma->vm_flags & VM_GROWSUP) {
2740 vm_end += stack_guard_gap;
2741 if (vm_end < vma->vm_end)
2742 vm_end = -PAGE_SIZE;
2743 }
2744 return vm_end;
2745}
2746
2747static inline unsigned long vma_pages(struct vm_area_struct *vma)
2748{
2749 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
2750}
2751
2752
2753static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
2754 unsigned long vm_start, unsigned long vm_end)
2755{
2756 struct vm_area_struct *vma = find_vma(mm, vm_start);
2757
2758 if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
2759 vma = NULL;
2760
2761 return vma;
2762}
2763
2764static inline bool range_in_vma(struct vm_area_struct *vma,
2765 unsigned long start, unsigned long end)
2766{
2767 return (vma && vma->vm_start <= start && end <= vma->vm_end);
2768}
2769
2770#ifdef CONFIG_MMU
2771pgprot_t vm_get_page_prot(unsigned long vm_flags);
2772void vma_set_page_prot(struct vm_area_struct *vma);
2773#else
2774static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
2775{
2776 return __pgprot(0);
2777}
2778static inline void vma_set_page_prot(struct vm_area_struct *vma)
2779{
2780 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2781}
2782#endif
2783
2784void vma_set_file(struct vm_area_struct *vma, struct file *file);
2785
2786#ifdef CONFIG_NUMA_BALANCING
2787unsigned long change_prot_numa(struct vm_area_struct *vma,
2788 unsigned long start, unsigned long end);
2789#endif
2790
2791struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
2792int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
2793 unsigned long pfn, unsigned long size, pgprot_t);
2794int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
2795 unsigned long pfn, unsigned long size, pgprot_t prot);
2796int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
2797int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
2798 struct page **pages, unsigned long *num);
2799int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2800 unsigned long num);
2801int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
2802 unsigned long num);
2803vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2804 unsigned long pfn);
2805vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2806 unsigned long pfn, pgprot_t pgprot);
2807vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2808 pfn_t pfn);
2809vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
2810 pfn_t pfn, pgprot_t pgprot);
2811vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
2812 unsigned long addr, pfn_t pfn);
2813int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
2814
2815static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
2816 unsigned long addr, struct page *page)
2817{
2818 int err = vm_insert_page(vma, addr, page);
2819
2820 if (err == -ENOMEM)
2821 return VM_FAULT_OOM;
2822 if (err < 0 && err != -EBUSY)
2823 return VM_FAULT_SIGBUS;
2824
2825 return VM_FAULT_NOPAGE;
2826}
2827
2828#ifndef io_remap_pfn_range
2829static inline int io_remap_pfn_range(struct vm_area_struct *vma,
2830 unsigned long addr, unsigned long pfn,
2831 unsigned long size, pgprot_t prot)
2832{
2833 return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot));
2834}
2835#endif
2836
2837static inline vm_fault_t vmf_error(int err)
2838{
2839 if (err == -ENOMEM)
2840 return VM_FAULT_OOM;
2841 return VM_FAULT_SIGBUS;
2842}
2843
2844struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
2845 unsigned int foll_flags);
2846
2847#define FOLL_WRITE 0x01
2848#define FOLL_TOUCH 0x02
2849#define FOLL_GET 0x04
2850#define FOLL_DUMP 0x08
2851#define FOLL_FORCE 0x10
2852#define FOLL_NOWAIT 0x20
2853
2854#define FOLL_POPULATE 0x40
2855#define FOLL_HWPOISON 0x100
2856#define FOLL_NUMA 0x200
2857#define FOLL_MIGRATION 0x400
2858#define FOLL_TRIED 0x800
2859#define FOLL_MLOCK 0x1000
2860#define FOLL_REMOTE 0x2000
2861#define FOLL_COW 0x4000
2862#define FOLL_ANON 0x8000
2863#define FOLL_LONGTERM 0x10000
2864#define FOLL_SPLIT_PMD 0x20000
2865#define FOLL_PIN 0x40000
2866#define FOLL_FAST_ONLY 0x80000
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
2925{
2926 if (vm_fault & VM_FAULT_OOM)
2927 return -ENOMEM;
2928 if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
2929 return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT;
2930 if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
2931 return -EFAULT;
2932 return 0;
2933}
2934
2935typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data);
2936extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
2937 unsigned long size, pte_fn_t fn, void *data);
2938extern int apply_to_existing_page_range(struct mm_struct *mm,
2939 unsigned long address, unsigned long size,
2940 pte_fn_t fn, void *data);
2941
2942extern void init_mem_debugging_and_hardening(void);
2943#ifdef CONFIG_PAGE_POISONING
2944extern void __kernel_poison_pages(struct page *page, int numpages);
2945extern void __kernel_unpoison_pages(struct page *page, int numpages);
2946extern bool _page_poisoning_enabled_early;
2947DECLARE_STATIC_KEY_FALSE(_page_poisoning_enabled);
2948static inline bool page_poisoning_enabled(void)
2949{
2950 return _page_poisoning_enabled_early;
2951}
2952
2953
2954
2955
2956static inline bool page_poisoning_enabled_static(void)
2957{
2958 return static_branch_unlikely(&_page_poisoning_enabled);
2959}
2960static inline void kernel_poison_pages(struct page *page, int numpages)
2961{
2962 if (page_poisoning_enabled_static())
2963 __kernel_poison_pages(page, numpages);
2964}
2965static inline void kernel_unpoison_pages(struct page *page, int numpages)
2966{
2967 if (page_poisoning_enabled_static())
2968 __kernel_unpoison_pages(page, numpages);
2969}
2970#else
2971static inline bool page_poisoning_enabled(void) { return false; }
2972static inline bool page_poisoning_enabled_static(void) { return false; }
2973static inline void __kernel_poison_pages(struct page *page, int nunmpages) { }
2974static inline void kernel_poison_pages(struct page *page, int numpages) { }
2975static inline void kernel_unpoison_pages(struct page *page, int numpages) { }
2976#endif
2977
2978DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
2979static inline bool want_init_on_alloc(gfp_t flags)
2980{
2981 if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
2982 &init_on_alloc))
2983 return true;
2984 return flags & __GFP_ZERO;
2985}
2986
2987DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
2988static inline bool want_init_on_free(void)
2989{
2990 return static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
2991 &init_on_free);
2992}
2993
2994extern bool _debug_pagealloc_enabled_early;
2995DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
2996
2997static inline bool debug_pagealloc_enabled(void)
2998{
2999 return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
3000 _debug_pagealloc_enabled_early;
3001}
3002
3003
3004
3005
3006
3007static inline bool debug_pagealloc_enabled_static(void)
3008{
3009 if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC))
3010 return false;
3011
3012 return static_branch_unlikely(&_debug_pagealloc_enabled);
3013}
3014
3015#ifdef CONFIG_DEBUG_PAGEALLOC
3016
3017
3018
3019
3020extern void __kernel_map_pages(struct page *page, int numpages, int enable);
3021
3022static inline void debug_pagealloc_map_pages(struct page *page, int numpages)
3023{
3024 if (debug_pagealloc_enabled_static())
3025 __kernel_map_pages(page, numpages, 1);
3026}
3027
3028static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages)
3029{
3030 if (debug_pagealloc_enabled_static())
3031 __kernel_map_pages(page, numpages, 0);
3032}
3033#else
3034static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {}
3035static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {}
3036#endif
3037
3038#ifdef __HAVE_ARCH_GATE_AREA
3039extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
3040extern int in_gate_area_no_mm(unsigned long addr);
3041extern int in_gate_area(struct mm_struct *mm, unsigned long addr);
3042#else
3043static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
3044{
3045 return NULL;
3046}
3047static inline int in_gate_area_no_mm(unsigned long addr) { return 0; }
3048static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
3049{
3050 return 0;
3051}
3052#endif
3053
3054extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);
3055
3056#ifdef CONFIG_SYSCTL
3057extern int sysctl_drop_caches;
3058int drop_caches_sysctl_handler(struct ctl_table *, int, void *, size_t *,
3059 loff_t *);
3060#endif
3061
3062void drop_slab(void);
3063void drop_slab_node(int nid);
3064
3065#ifndef CONFIG_MMU
3066#define randomize_va_space 0
3067#else
3068extern int randomize_va_space;
3069#endif
3070
3071const char * arch_vma_name(struct vm_area_struct *vma);
3072#ifdef CONFIG_MMU
3073void print_vma_addr(char *prefix, unsigned long rip);
3074#else
3075static inline void print_vma_addr(char *prefix, unsigned long rip)
3076{
3077}
3078#endif
3079
3080int vmemmap_remap_free(unsigned long start, unsigned long end,
3081 unsigned long reuse);
3082int vmemmap_remap_alloc(unsigned long start, unsigned long end,
3083 unsigned long reuse, gfp_t gfp_mask);
3084
3085void *sparse_buffer_alloc(unsigned long size);
3086struct page * __populate_section_memmap(unsigned long pfn,
3087 unsigned long nr_pages, int nid, struct vmem_altmap *altmap);
3088pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
3089p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
3090pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
3091pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
3092pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
3093 struct vmem_altmap *altmap);
3094void *vmemmap_alloc_block(unsigned long size, int node);
3095struct vmem_altmap;
3096void *vmemmap_alloc_block_buf(unsigned long size, int node,
3097 struct vmem_altmap *altmap);
3098void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
3099int vmemmap_populate_basepages(unsigned long start, unsigned long end,
3100 int node, struct vmem_altmap *altmap);
3101int vmemmap_populate(unsigned long start, unsigned long end, int node,
3102 struct vmem_altmap *altmap);
3103void vmemmap_populate_print_last(void);
3104#ifdef CONFIG_MEMORY_HOTPLUG
3105void vmemmap_free(unsigned long start, unsigned long end,
3106 struct vmem_altmap *altmap);
3107#endif
3108void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
3109 unsigned long nr_pages);
3110
3111enum mf_flags {
3112 MF_COUNT_INCREASED = 1 << 0,
3113 MF_ACTION_REQUIRED = 1 << 1,
3114 MF_MUST_KILL = 1 << 2,
3115 MF_SOFT_OFFLINE = 1 << 3,
3116};
3117extern int memory_failure(unsigned long pfn, int flags);
3118extern void memory_failure_queue(unsigned long pfn, int flags);
3119extern void memory_failure_queue_kick(int cpu);
3120extern int unpoison_memory(unsigned long pfn);
3121extern int sysctl_memory_failure_early_kill;
3122extern int sysctl_memory_failure_recovery;
3123extern void shake_page(struct page *p);
3124extern atomic_long_t num_poisoned_pages __read_mostly;
3125extern int soft_offline_page(unsigned long pfn, int flags);
3126
3127
3128
3129
3130
3131enum mf_result {
3132 MF_IGNORED,
3133 MF_FAILED,
3134 MF_DELAYED,
3135 MF_RECOVERED,
3136};
3137
3138enum mf_action_page_type {
3139 MF_MSG_KERNEL,
3140 MF_MSG_KERNEL_HIGH_ORDER,
3141 MF_MSG_SLAB,
3142 MF_MSG_DIFFERENT_COMPOUND,
3143 MF_MSG_POISONED_HUGE,
3144 MF_MSG_HUGE,
3145 MF_MSG_FREE_HUGE,
3146 MF_MSG_NON_PMD_HUGE,
3147 MF_MSG_UNMAP_FAILED,
3148 MF_MSG_DIRTY_SWAPCACHE,
3149 MF_MSG_CLEAN_SWAPCACHE,
3150 MF_MSG_DIRTY_MLOCKED_LRU,
3151 MF_MSG_CLEAN_MLOCKED_LRU,
3152 MF_MSG_DIRTY_UNEVICTABLE_LRU,
3153 MF_MSG_CLEAN_UNEVICTABLE_LRU,
3154 MF_MSG_DIRTY_LRU,
3155 MF_MSG_CLEAN_LRU,
3156 MF_MSG_TRUNCATED_LRU,
3157 MF_MSG_BUDDY,
3158 MF_MSG_BUDDY_2ND,
3159 MF_MSG_DAX,
3160 MF_MSG_UNSPLIT_THP,
3161 MF_MSG_UNKNOWN,
3162};
3163
3164#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
3165extern void clear_huge_page(struct page *page,
3166 unsigned long addr_hint,
3167 unsigned int pages_per_huge_page);
3168extern void copy_user_huge_page(struct page *dst, struct page *src,
3169 unsigned long addr_hint,
3170 struct vm_area_struct *vma,
3171 unsigned int pages_per_huge_page);
3172extern long copy_huge_page_from_user(struct page *dst_page,
3173 const void __user *usr_src,
3174 unsigned int pages_per_huge_page,
3175 bool allow_pagefault);
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187static inline bool vma_is_special_huge(const struct vm_area_struct *vma)
3188{
3189 return vma_is_dax(vma) || (vma->vm_file &&
3190 (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)));
3191}
3192
3193#endif
3194
3195#ifdef CONFIG_DEBUG_PAGEALLOC
3196extern unsigned int _debug_guardpage_minorder;
3197DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
3198
3199static inline unsigned int debug_guardpage_minorder(void)
3200{
3201 return _debug_guardpage_minorder;
3202}
3203
3204static inline bool debug_guardpage_enabled(void)
3205{
3206 return static_branch_unlikely(&_debug_guardpage_enabled);
3207}
3208
3209static inline bool page_is_guard(struct page *page)
3210{
3211 if (!debug_guardpage_enabled())
3212 return false;
3213
3214 return PageGuard(page);
3215}
3216#else
3217static inline unsigned int debug_guardpage_minorder(void) { return 0; }
3218static inline bool debug_guardpage_enabled(void) { return false; }
3219static inline bool page_is_guard(struct page *page) { return false; }
3220#endif
3221
3222#if MAX_NUMNODES > 1
3223void __init setup_nr_node_ids(void);
3224#else
3225static inline void setup_nr_node_ids(void) {}
3226#endif
3227
3228extern int memcmp_pages(struct page *page1, struct page *page2);
3229
3230static inline int pages_identical(struct page *page1, struct page *page2)
3231{
3232 return !memcmp_pages(page1, page2);
3233}
3234
3235#ifdef CONFIG_MAPPING_DIRTY_HELPERS
3236unsigned long clean_record_shared_mapping_range(struct address_space *mapping,
3237 pgoff_t first_index, pgoff_t nr,
3238 pgoff_t bitmap_pgoff,
3239 unsigned long *bitmap,
3240 pgoff_t *start,
3241 pgoff_t *end);
3242
3243unsigned long wp_shared_mapping_range(struct address_space *mapping,
3244 pgoff_t first_index, pgoff_t nr);
3245#endif
3246
3247extern int sysctl_nr_trim_pages;
3248
3249#ifdef CONFIG_PRINTK
3250void mem_dump_obj(void *object);
3251#else
3252static inline void mem_dump_obj(void *object) {}
3253#endif
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263static inline int seal_check_future_write(int seals, struct vm_area_struct *vma)
3264{
3265 if (seals & F_SEAL_FUTURE_WRITE) {
3266
3267
3268
3269
3270 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
3271 return -EPERM;
3272
3273
3274
3275
3276
3277
3278
3279
3280 if (vma->vm_flags & VM_SHARED)
3281 vma->vm_flags &= ~(VM_MAYWRITE);
3282 }
3283
3284 return 0;
3285}
3286
3287#endif
3288#endif
3289