1
2#ifndef _LINUX_MM_H
3#define _LINUX_MM_H
4
5#include <linux/errno.h>
6
7#ifdef __KERNEL__
8
9#include <linux/mmdebug.h>
10#include <linux/gfp.h>
11#include <linux/bug.h>
12#include <linux/list.h>
13#include <linux/mmzone.h>
14#include <linux/rbtree.h>
15#include <linux/atomic.h>
16#include <linux/debug_locks.h>
17#include <linux/mm_types.h>
18#include <linux/range.h>
19#include <linux/pfn.h>
20#include <linux/percpu-refcount.h>
21#include <linux/bit_spinlock.h>
22#include <linux/shrinker.h>
23#include <linux/resource.h>
24#include <linux/page_ext.h>
25#include <linux/err.h>
26#include <linux/page_ref.h>
27#include <linux/memremap.h>
28#include <linux/overflow.h>
29#include <linux/sizes.h>
30
31struct mempolicy;
32struct anon_vma;
33struct anon_vma_chain;
34struct file_ra_state;
35struct user_struct;
36struct writeback_control;
37struct bdi_writeback;
38
39void init_mm_internals(void);
40
41#ifndef CONFIG_NEED_MULTIPLE_NODES
42extern unsigned long max_mapnr;
43
44static inline void set_max_mapnr(unsigned long limit)
45{
46 max_mapnr = limit;
47}
48#else
49static inline void set_max_mapnr(unsigned long limit) { }
50#endif
51
52extern atomic_long_t _totalram_pages;
53static inline unsigned long totalram_pages(void)
54{
55 return (unsigned long)atomic_long_read(&_totalram_pages);
56}
57
58static inline void totalram_pages_inc(void)
59{
60 atomic_long_inc(&_totalram_pages);
61}
62
63static inline void totalram_pages_dec(void)
64{
65 atomic_long_dec(&_totalram_pages);
66}
67
68static inline void totalram_pages_add(long count)
69{
70 atomic_long_add(count, &_totalram_pages);
71}
72
73static inline void totalram_pages_set(long val)
74{
75 atomic_long_set(&_totalram_pages, val);
76}
77
78extern void * high_memory;
79extern int page_cluster;
80
81#ifdef CONFIG_SYSCTL
82extern int sysctl_legacy_va_layout;
83#else
84#define sysctl_legacy_va_layout 0
85#endif
86
87#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
88extern const int mmap_rnd_bits_min;
89extern const int mmap_rnd_bits_max;
90extern int mmap_rnd_bits __read_mostly;
91#endif
92#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
93extern const int mmap_rnd_compat_bits_min;
94extern const int mmap_rnd_compat_bits_max;
95extern int mmap_rnd_compat_bits __read_mostly;
96#endif
97
98#include <asm/page.h>
99#include <asm/pgtable.h>
100#include <asm/processor.h>
101
102
103
104
105
106
107
108
109#ifndef untagged_addr
110#define untagged_addr(addr) (addr)
111#endif
112
113#ifndef __pa_symbol
114#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
115#endif
116
117#ifndef page_to_virt
118#define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x)))
119#endif
120
121#ifndef lm_alias
122#define lm_alias(x) __va(__pa_symbol(x))
123#endif
124
125
126
127
128
129
130
131
132#ifndef mm_forbids_zeropage
133#define mm_forbids_zeropage(X) (0)
134#endif
135
136
137
138
139
140
141
142#if BITS_PER_LONG == 64
143
144
145
146
147
148
149#define mm_zero_struct_page(pp) __mm_zero_struct_page(pp)
150static inline void __mm_zero_struct_page(struct page *page)
151{
152 unsigned long *_pp = (void *)page;
153
154
155 BUILD_BUG_ON(sizeof(struct page) & 7);
156 BUILD_BUG_ON(sizeof(struct page) < 56);
157 BUILD_BUG_ON(sizeof(struct page) > 80);
158
159 switch (sizeof(struct page)) {
160 case 80:
161 _pp[9] = 0;
162 case 72:
163 _pp[8] = 0;
164 case 64:
165 _pp[7] = 0;
166 case 56:
167 _pp[6] = 0;
168 _pp[5] = 0;
169 _pp[4] = 0;
170 _pp[3] = 0;
171 _pp[2] = 0;
172 _pp[1] = 0;
173 _pp[0] = 0;
174 }
175}
176#else
177#define mm_zero_struct_page(pp) ((void)memset((pp), 0, sizeof(struct page)))
178#endif
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196#define MAPCOUNT_ELF_CORE_MARGIN (5)
197#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
198
199extern int sysctl_max_map_count;
200
201extern unsigned long sysctl_user_reserve_kbytes;
202extern unsigned long sysctl_admin_reserve_kbytes;
203
204extern int sysctl_overcommit_memory;
205extern int sysctl_overcommit_ratio;
206extern unsigned long sysctl_overcommit_kbytes;
207
208extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *,
209 size_t *, loff_t *);
210extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
211 size_t *, loff_t *);
212
213#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
214
215
216#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
217
218
219#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
220
221#define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
222
223
224
225
226
227
228
229
230
231
232struct vm_area_struct *vm_area_alloc(struct mm_struct *);
233struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
234void vm_area_free(struct vm_area_struct *);
235
236#ifndef CONFIG_MMU
237extern struct rb_root nommu_region_tree;
238extern struct rw_semaphore nommu_region_sem;
239
240extern unsigned int kobjsize(const void *objp);
241#endif
242
243
244
245
246
247#define VM_NONE 0x00000000
248
249#define VM_READ 0x00000001
250#define VM_WRITE 0x00000002
251#define VM_EXEC 0x00000004
252#define VM_SHARED 0x00000008
253
254
255#define VM_MAYREAD 0x00000010
256#define VM_MAYWRITE 0x00000020
257#define VM_MAYEXEC 0x00000040
258#define VM_MAYSHARE 0x00000080
259
260#define VM_GROWSDOWN 0x00000100
261#define VM_UFFD_MISSING 0x00000200
262#define VM_PFNMAP 0x00000400
263#define VM_DENYWRITE 0x00000800
264#define VM_UFFD_WP 0x00001000
265
266#define VM_LOCKED 0x00002000
267#define VM_IO 0x00004000
268
269
270#define VM_SEQ_READ 0x00008000
271#define VM_RAND_READ 0x00010000
272
273#define VM_DONTCOPY 0x00020000
274#define VM_DONTEXPAND 0x00040000
275#define VM_LOCKONFAULT 0x00080000
276#define VM_ACCOUNT 0x00100000
277#define VM_NORESERVE 0x00200000
278#define VM_HUGETLB 0x00400000
279#define VM_SYNC 0x00800000
280#define VM_ARCH_1 0x01000000
281#define VM_WIPEONFORK 0x02000000
282#define VM_DONTDUMP 0x04000000
283
284#ifdef CONFIG_MEM_SOFT_DIRTY
285# define VM_SOFTDIRTY 0x08000000
286#else
287# define VM_SOFTDIRTY 0
288#endif
289
290#define VM_MIXEDMAP 0x10000000
291#define VM_HUGEPAGE 0x20000000
292#define VM_NOHUGEPAGE 0x40000000
293#define VM_MERGEABLE 0x80000000
294
295#ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS
296#define VM_HIGH_ARCH_BIT_0 32
297#define VM_HIGH_ARCH_BIT_1 33
298#define VM_HIGH_ARCH_BIT_2 34
299#define VM_HIGH_ARCH_BIT_3 35
300#define VM_HIGH_ARCH_BIT_4 36
301#define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0)
302#define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1)
303#define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2)
304#define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3)
305#define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4)
306#endif
307
308#ifdef CONFIG_ARCH_HAS_PKEYS
309# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0
310# define VM_PKEY_BIT0 VM_HIGH_ARCH_0
311# define VM_PKEY_BIT1 VM_HIGH_ARCH_1
312# define VM_PKEY_BIT2 VM_HIGH_ARCH_2
313# define VM_PKEY_BIT3 VM_HIGH_ARCH_3
314#ifdef CONFIG_PPC
315# define VM_PKEY_BIT4 VM_HIGH_ARCH_4
316#else
317# define VM_PKEY_BIT4 0
318#endif
319#endif
320
321#if defined(CONFIG_X86)
322# define VM_PAT VM_ARCH_1
323#elif defined(CONFIG_PPC)
324# define VM_SAO VM_ARCH_1
325#elif defined(CONFIG_PARISC)
326# define VM_GROWSUP VM_ARCH_1
327#elif defined(CONFIG_IA64)
328# define VM_GROWSUP VM_ARCH_1
329#elif defined(CONFIG_SPARC64)
330# define VM_SPARC_ADI VM_ARCH_1
331# define VM_ARCH_CLEAR VM_SPARC_ADI
332#elif !defined(CONFIG_MMU)
333# define VM_MAPPED_COPY VM_ARCH_1
334#endif
335
336#if defined(CONFIG_X86_INTEL_MPX)
337
338# define VM_MPX VM_HIGH_ARCH_4
339#else
340# define VM_MPX VM_NONE
341#endif
342
343#ifndef VM_GROWSUP
344# define VM_GROWSUP VM_NONE
345#endif
346
347
348#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ)
349
350#ifndef VM_STACK_DEFAULT_FLAGS
351#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
352#endif
353
354#ifdef CONFIG_STACK_GROWSUP
355#define VM_STACK VM_GROWSUP
356#else
357#define VM_STACK VM_GROWSDOWN
358#endif
359
360#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
361
362
363
364
365
366#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
367
368
369#define VM_INIT_DEF_MASK VM_NOHUGEPAGE
370
371
372#define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT))
373
374
375#ifndef VM_ARCH_CLEAR
376# define VM_ARCH_CLEAR VM_NONE
377#endif
378#define VM_FLAGS_CLEAR (ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR)
379
380
381
382
383
384extern pgprot_t protection_map[16];
385
386#define FAULT_FLAG_WRITE 0x01
387#define FAULT_FLAG_MKWRITE 0x02
388#define FAULT_FLAG_ALLOW_RETRY 0x04
389#define FAULT_FLAG_RETRY_NOWAIT 0x08
390#define FAULT_FLAG_KILLABLE 0x10
391#define FAULT_FLAG_TRIED 0x20
392#define FAULT_FLAG_USER 0x40
393#define FAULT_FLAG_REMOTE 0x80
394#define FAULT_FLAG_INSTRUCTION 0x100
395
396#define FAULT_FLAG_TRACE \
397 { FAULT_FLAG_WRITE, "WRITE" }, \
398 { FAULT_FLAG_MKWRITE, "MKWRITE" }, \
399 { FAULT_FLAG_ALLOW_RETRY, "ALLOW_RETRY" }, \
400 { FAULT_FLAG_RETRY_NOWAIT, "RETRY_NOWAIT" }, \
401 { FAULT_FLAG_KILLABLE, "KILLABLE" }, \
402 { FAULT_FLAG_TRIED, "TRIED" }, \
403 { FAULT_FLAG_USER, "USER" }, \
404 { FAULT_FLAG_REMOTE, "REMOTE" }, \
405 { FAULT_FLAG_INSTRUCTION, "INSTRUCTION" }
406
407
408
409
410
411
412
413
414
415
416
417struct vm_fault {
418 struct vm_area_struct *vma;
419 unsigned int flags;
420 gfp_t gfp_mask;
421 pgoff_t pgoff;
422 unsigned long address;
423 pmd_t *pmd;
424
425 pud_t *pud;
426
427
428 pte_t orig_pte;
429
430 struct page *cow_page;
431 struct mem_cgroup *memcg;
432 struct page *page;
433
434
435
436
437
438 pte_t *pte;
439
440
441
442 spinlock_t *ptl;
443
444
445
446 pgtable_t prealloc_pte;
447
448
449
450
451
452
453};
454
455
456enum page_entry_size {
457 PE_SIZE_PTE = 0,
458 PE_SIZE_PMD,
459 PE_SIZE_PUD,
460};
461
462
463
464
465
466
467struct vm_operations_struct {
468 void (*open)(struct vm_area_struct * area);
469 void (*close)(struct vm_area_struct * area);
470 int (*split)(struct vm_area_struct * area, unsigned long addr);
471 int (*mremap)(struct vm_area_struct * area);
472 vm_fault_t (*fault)(struct vm_fault *vmf);
473 vm_fault_t (*huge_fault)(struct vm_fault *vmf,
474 enum page_entry_size pe_size);
475 void (*map_pages)(struct vm_fault *vmf,
476 pgoff_t start_pgoff, pgoff_t end_pgoff);
477 unsigned long (*pagesize)(struct vm_area_struct * area);
478
479
480
481 vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
482
483
484 vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
485
486
487
488
489 int (*access)(struct vm_area_struct *vma, unsigned long addr,
490 void *buf, int len, int write);
491
492
493
494
495 const char *(*name)(struct vm_area_struct *vma);
496
497#ifdef CONFIG_NUMA
498
499
500
501
502
503
504
505 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
506
507
508
509
510
511
512
513
514
515
516
517 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
518 unsigned long addr);
519#endif
520
521
522
523
524
525 struct page *(*find_special_page)(struct vm_area_struct *vma,
526 unsigned long addr);
527};
528
529static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
530{
531 static const struct vm_operations_struct dummy_vm_ops = {};
532
533 memset(vma, 0, sizeof(*vma));
534 vma->vm_mm = mm;
535 vma->vm_ops = &dummy_vm_ops;
536 INIT_LIST_HEAD(&vma->anon_vma_chain);
537}
538
539static inline void vma_set_anonymous(struct vm_area_struct *vma)
540{
541 vma->vm_ops = NULL;
542}
543
544static inline bool vma_is_anonymous(struct vm_area_struct *vma)
545{
546 return !vma->vm_ops;
547}
548
549#ifdef CONFIG_SHMEM
550
551
552
553
554bool vma_is_shmem(struct vm_area_struct *vma);
555#else
556static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
557#endif
558
559int vma_is_stack_for_current(struct vm_area_struct *vma);
560
561
562#define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
563
564struct mmu_gather;
565struct inode;
566
567#if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
568static inline int pmd_devmap(pmd_t pmd)
569{
570 return 0;
571}
572static inline int pud_devmap(pud_t pud)
573{
574 return 0;
575}
576static inline int pgd_devmap(pgd_t pgd)
577{
578 return 0;
579}
580#endif
581
582
583
584
585
586#include <linux/page-flags.h>
587#include <linux/huge_mm.h>
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605static inline int put_page_testzero(struct page *page)
606{
607 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
608 return page_ref_dec_and_test(page);
609}
610
611
612
613
614
615
616
617static inline int get_page_unless_zero(struct page *page)
618{
619 return page_ref_add_unless(page, 1, 0);
620}
621
622extern int page_is_ram(unsigned long pfn);
623
624enum {
625 REGION_INTERSECTS,
626 REGION_DISJOINT,
627 REGION_MIXED,
628};
629
630int region_intersects(resource_size_t offset, size_t size, unsigned long flags,
631 unsigned long desc);
632
633
634struct page *vmalloc_to_page(const void *addr);
635unsigned long vmalloc_to_pfn(const void *addr);
636
637
638
639
640
641
642
643static inline bool is_vmalloc_addr(const void *x)
644{
645#ifdef CONFIG_MMU
646 unsigned long addr = (unsigned long)x;
647
648 return addr >= VMALLOC_START && addr < VMALLOC_END;
649#else
650 return false;
651#endif
652}
653
654#ifndef is_ioremap_addr
655#define is_ioremap_addr(x) is_vmalloc_addr(x)
656#endif
657
658#ifdef CONFIG_MMU
659extern int is_vmalloc_or_module_addr(const void *x);
660#else
661static inline int is_vmalloc_or_module_addr(const void *x)
662{
663 return 0;
664}
665#endif
666
667extern void *kvmalloc_node(size_t size, gfp_t flags, int node);
668static inline void *kvmalloc(size_t size, gfp_t flags)
669{
670 return kvmalloc_node(size, flags, NUMA_NO_NODE);
671}
672static inline void *kvzalloc_node(size_t size, gfp_t flags, int node)
673{
674 return kvmalloc_node(size, flags | __GFP_ZERO, node);
675}
676static inline void *kvzalloc(size_t size, gfp_t flags)
677{
678 return kvmalloc(size, flags | __GFP_ZERO);
679}
680
681static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
682{
683 size_t bytes;
684
685 if (unlikely(check_mul_overflow(n, size, &bytes)))
686 return NULL;
687
688 return kvmalloc(bytes, flags);
689}
690
691static inline void *kvcalloc(size_t n, size_t size, gfp_t flags)
692{
693 return kvmalloc_array(n, size, flags | __GFP_ZERO);
694}
695
696extern void kvfree(const void *addr);
697
698static inline int compound_mapcount(struct page *page)
699{
700 VM_BUG_ON_PAGE(!PageCompound(page), page);
701 page = compound_head(page);
702 return atomic_read(compound_mapcount_ptr(page)) + 1;
703}
704
705
706
707
708
709
710static inline void page_mapcount_reset(struct page *page)
711{
712 atomic_set(&(page)->_mapcount, -1);
713}
714
715int __page_mapcount(struct page *page);
716
717static inline int page_mapcount(struct page *page)
718{
719 VM_BUG_ON_PAGE(PageSlab(page), page);
720
721 if (unlikely(PageCompound(page)))
722 return __page_mapcount(page);
723 return atomic_read(&page->_mapcount) + 1;
724}
725
726#ifdef CONFIG_TRANSPARENT_HUGEPAGE
727int total_mapcount(struct page *page);
728int page_trans_huge_mapcount(struct page *page, int *total_mapcount);
729#else
730static inline int total_mapcount(struct page *page)
731{
732 return page_mapcount(page);
733}
734static inline int page_trans_huge_mapcount(struct page *page,
735 int *total_mapcount)
736{
737 int mapcount = page_mapcount(page);
738 if (total_mapcount)
739 *total_mapcount = mapcount;
740 return mapcount;
741}
742#endif
743
744static inline struct page *virt_to_head_page(const void *x)
745{
746 struct page *page = virt_to_page(x);
747
748 return compound_head(page);
749}
750
751void __put_page(struct page *page);
752
753void put_pages_list(struct list_head *pages);
754
755void split_page(struct page *page, unsigned int order);
756
757
758
759
760
761
762typedef void compound_page_dtor(struct page *);
763
764
765enum compound_dtor_id {
766 NULL_COMPOUND_DTOR,
767 COMPOUND_PAGE_DTOR,
768#ifdef CONFIG_HUGETLB_PAGE
769 HUGETLB_PAGE_DTOR,
770#endif
771#ifdef CONFIG_TRANSPARENT_HUGEPAGE
772 TRANSHUGE_PAGE_DTOR,
773#endif
774 NR_COMPOUND_DTORS,
775};
776extern compound_page_dtor * const compound_page_dtors[];
777
778static inline void set_compound_page_dtor(struct page *page,
779 enum compound_dtor_id compound_dtor)
780{
781 VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page);
782 page[1].compound_dtor = compound_dtor;
783}
784
785static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
786{
787 VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page);
788 return compound_page_dtors[page[1].compound_dtor];
789}
790
791static inline unsigned int compound_order(struct page *page)
792{
793 if (!PageHead(page))
794 return 0;
795 return page[1].compound_order;
796}
797
798static inline void set_compound_order(struct page *page, unsigned int order)
799{
800 page[1].compound_order = order;
801}
802
803
804static inline unsigned long compound_nr(struct page *page)
805{
806 return 1UL << compound_order(page);
807}
808
809
810static inline unsigned long page_size(struct page *page)
811{
812 return PAGE_SIZE << compound_order(page);
813}
814
815
816static inline unsigned int page_shift(struct page *page)
817{
818 return PAGE_SHIFT + compound_order(page);
819}
820
821void free_compound_page(struct page *page);
822
823#ifdef CONFIG_MMU
824
825
826
827
828
829
830static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
831{
832 if (likely(vma->vm_flags & VM_WRITE))
833 pte = pte_mkwrite(pte);
834 return pte;
835}
836
837vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
838 struct page *page);
839vm_fault_t finish_fault(struct vm_fault *vmf);
840vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
841#endif
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
910#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
911#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
912#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
913#define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH)
914
915
916
917
918
919
920#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
921#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
922#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
923#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
924#define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0))
925
926
927#ifdef NODE_NOT_IN_PAGE_FLAGS
928#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
929#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \
930 SECTIONS_PGOFF : ZONES_PGOFF)
931#else
932#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
933#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \
934 NODES_PGOFF : ZONES_PGOFF)
935#endif
936
937#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
938
939#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
940#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
941#endif
942
943#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
944#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
945#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
946#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
947#define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1)
948#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
949
950static inline enum zone_type page_zonenum(const struct page *page)
951{
952 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
953}
954
955#ifdef CONFIG_ZONE_DEVICE
956static inline bool is_zone_device_page(const struct page *page)
957{
958 return page_zonenum(page) == ZONE_DEVICE;
959}
960extern void memmap_init_zone_device(struct zone *, unsigned long,
961 unsigned long, struct dev_pagemap *);
962#else
963static inline bool is_zone_device_page(const struct page *page)
964{
965 return false;
966}
967#endif
968
969#ifdef CONFIG_DEV_PAGEMAP_OPS
970void __put_devmap_managed_page(struct page *page);
971DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
972static inline bool put_devmap_managed_page(struct page *page)
973{
974 if (!static_branch_unlikely(&devmap_managed_key))
975 return false;
976 if (!is_zone_device_page(page))
977 return false;
978 switch (page->pgmap->type) {
979 case MEMORY_DEVICE_PRIVATE:
980 case MEMORY_DEVICE_FS_DAX:
981 __put_devmap_managed_page(page);
982 return true;
983 default:
984 break;
985 }
986 return false;
987}
988
989#else
990static inline bool put_devmap_managed_page(struct page *page)
991{
992 return false;
993}
994#endif
995
996static inline bool is_device_private_page(const struct page *page)
997{
998 return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) &&
999 IS_ENABLED(CONFIG_DEVICE_PRIVATE) &&
1000 is_zone_device_page(page) &&
1001 page->pgmap->type == MEMORY_DEVICE_PRIVATE;
1002}
1003
1004static inline bool is_pci_p2pdma_page(const struct page *page)
1005{
1006 return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) &&
1007 IS_ENABLED(CONFIG_PCI_P2PDMA) &&
1008 is_zone_device_page(page) &&
1009 page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA;
1010}
1011
1012
1013#define page_ref_zero_or_close_to_overflow(page) \
1014 ((unsigned int) page_ref_count(page) + 127u <= 127u)
1015
1016static inline void get_page(struct page *page)
1017{
1018 page = compound_head(page);
1019
1020
1021
1022
1023 VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(page), page);
1024 page_ref_inc(page);
1025}
1026
1027static inline __must_check bool try_get_page(struct page *page)
1028{
1029 page = compound_head(page);
1030 if (WARN_ON_ONCE(page_ref_count(page) <= 0))
1031 return false;
1032 page_ref_inc(page);
1033 return true;
1034}
1035
1036static inline void put_page(struct page *page)
1037{
1038 page = compound_head(page);
1039
1040
1041
1042
1043
1044
1045
1046 if (put_devmap_managed_page(page))
1047 return;
1048
1049 if (put_page_testzero(page))
1050 __put_page(page);
1051}
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068static inline void put_user_page(struct page *page)
1069{
1070 put_page(page);
1071}
1072
1073void put_user_pages_dirty_lock(struct page **pages, unsigned long npages,
1074 bool make_dirty);
1075
1076void put_user_pages(struct page **pages, unsigned long npages);
1077
1078#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
1079#define SECTION_IN_PAGE_FLAGS
1080#endif
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090static inline int page_zone_id(struct page *page)
1091{
1092 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
1093}
1094
1095#ifdef NODE_NOT_IN_PAGE_FLAGS
1096extern int page_to_nid(const struct page *page);
1097#else
1098static inline int page_to_nid(const struct page *page)
1099{
1100 struct page *p = (struct page *)page;
1101
1102 return (PF_POISONED_CHECK(p)->flags >> NODES_PGSHIFT) & NODES_MASK;
1103}
1104#endif
1105
1106#ifdef CONFIG_NUMA_BALANCING
1107static inline int cpu_pid_to_cpupid(int cpu, int pid)
1108{
1109 return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
1110}
1111
1112static inline int cpupid_to_pid(int cpupid)
1113{
1114 return cpupid & LAST__PID_MASK;
1115}
1116
1117static inline int cpupid_to_cpu(int cpupid)
1118{
1119 return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
1120}
1121
1122static inline int cpupid_to_nid(int cpupid)
1123{
1124 return cpu_to_node(cpupid_to_cpu(cpupid));
1125}
1126
1127static inline bool cpupid_pid_unset(int cpupid)
1128{
1129 return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
1130}
1131
1132static inline bool cpupid_cpu_unset(int cpupid)
1133{
1134 return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
1135}
1136
1137static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
1138{
1139 return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
1140}
1141
1142#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
1143#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
1144static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
1145{
1146 return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
1147}
1148
1149static inline int page_cpupid_last(struct page *page)
1150{
1151 return page->_last_cpupid;
1152}
1153static inline void page_cpupid_reset_last(struct page *page)
1154{
1155 page->_last_cpupid = -1 & LAST_CPUPID_MASK;
1156}
1157#else
1158static inline int page_cpupid_last(struct page *page)
1159{
1160 return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
1161}
1162
1163extern int page_cpupid_xchg_last(struct page *page, int cpupid);
1164
1165static inline void page_cpupid_reset_last(struct page *page)
1166{
1167 page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
1168}
1169#endif
1170#else
1171static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
1172{
1173 return page_to_nid(page);
1174}
1175
1176static inline int page_cpupid_last(struct page *page)
1177{
1178 return page_to_nid(page);
1179}
1180
1181static inline int cpupid_to_nid(int cpupid)
1182{
1183 return -1;
1184}
1185
1186static inline int cpupid_to_pid(int cpupid)
1187{
1188 return -1;
1189}
1190
1191static inline int cpupid_to_cpu(int cpupid)
1192{
1193 return -1;
1194}
1195
1196static inline int cpu_pid_to_cpupid(int nid, int pid)
1197{
1198 return -1;
1199}
1200
1201static inline bool cpupid_pid_unset(int cpupid)
1202{
1203 return 1;
1204}
1205
1206static inline void page_cpupid_reset_last(struct page *page)
1207{
1208}
1209
1210static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
1211{
1212 return false;
1213}
1214#endif
1215
1216#ifdef CONFIG_KASAN_SW_TAGS
1217static inline u8 page_kasan_tag(const struct page *page)
1218{
1219 return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
1220}
1221
1222static inline void page_kasan_tag_set(struct page *page, u8 tag)
1223{
1224 page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
1225 page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
1226}
1227
1228static inline void page_kasan_tag_reset(struct page *page)
1229{
1230 page_kasan_tag_set(page, 0xff);
1231}
1232#else
1233static inline u8 page_kasan_tag(const struct page *page)
1234{
1235 return 0xff;
1236}
1237
1238static inline void page_kasan_tag_set(struct page *page, u8 tag) { }
1239static inline void page_kasan_tag_reset(struct page *page) { }
1240#endif
1241
1242static inline struct zone *page_zone(const struct page *page)
1243{
1244 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
1245}
1246
1247static inline pg_data_t *page_pgdat(const struct page *page)
1248{
1249 return NODE_DATA(page_to_nid(page));
1250}
1251
1252#ifdef SECTION_IN_PAGE_FLAGS
1253static inline void set_page_section(struct page *page, unsigned long section)
1254{
1255 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
1256 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
1257}
1258
1259static inline unsigned long page_to_section(const struct page *page)
1260{
1261 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
1262}
1263#endif
1264
1265static inline void set_page_zone(struct page *page, enum zone_type zone)
1266{
1267 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
1268 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
1269}
1270
1271static inline void set_page_node(struct page *page, unsigned long node)
1272{
1273 page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
1274 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
1275}
1276
1277static inline void set_page_links(struct page *page, enum zone_type zone,
1278 unsigned long node, unsigned long pfn)
1279{
1280 set_page_zone(page, zone);
1281 set_page_node(page, node);
1282#ifdef SECTION_IN_PAGE_FLAGS
1283 set_page_section(page, pfn_to_section_nr(pfn));
1284#endif
1285}
1286
1287#ifdef CONFIG_MEMCG
1288static inline struct mem_cgroup *page_memcg(struct page *page)
1289{
1290 return page->mem_cgroup;
1291}
1292static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
1293{
1294 WARN_ON_ONCE(!rcu_read_lock_held());
1295 return READ_ONCE(page->mem_cgroup);
1296}
1297#else
1298static inline struct mem_cgroup *page_memcg(struct page *page)
1299{
1300 return NULL;
1301}
1302static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
1303{
1304 WARN_ON_ONCE(!rcu_read_lock_held());
1305 return NULL;
1306}
1307#endif
1308
1309
1310
1311
1312#include <linux/vmstat.h>
1313
1314static __always_inline void *lowmem_page_address(const struct page *page)
1315{
1316 return page_to_virt(page);
1317}
1318
1319#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
1320#define HASHED_PAGE_VIRTUAL
1321#endif
1322
1323#if defined(WANT_PAGE_VIRTUAL)
1324static inline void *page_address(const struct page *page)
1325{
1326 return page->virtual;
1327}
1328static inline void set_page_address(struct page *page, void *address)
1329{
1330 page->virtual = address;
1331}
1332#define page_address_init() do { } while(0)
1333#endif
1334
1335#if defined(HASHED_PAGE_VIRTUAL)
1336void *page_address(const struct page *page);
1337void set_page_address(struct page *page, void *virtual);
1338void page_address_init(void);
1339#endif
1340
1341#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
1342#define page_address(page) lowmem_page_address(page)
1343#define set_page_address(page, address) do { } while(0)
1344#define page_address_init() do { } while(0)
1345#endif
1346
1347extern void *page_rmapping(struct page *page);
1348extern struct anon_vma *page_anon_vma(struct page *page);
1349extern struct address_space *page_mapping(struct page *page);
1350
1351extern struct address_space *__page_file_mapping(struct page *);
1352
1353static inline
1354struct address_space *page_file_mapping(struct page *page)
1355{
1356 if (unlikely(PageSwapCache(page)))
1357 return __page_file_mapping(page);
1358
1359 return page->mapping;
1360}
1361
1362extern pgoff_t __page_file_index(struct page *page);
1363
1364
1365
1366
1367
1368static inline pgoff_t page_index(struct page *page)
1369{
1370 if (unlikely(PageSwapCache(page)))
1371 return __page_file_index(page);
1372 return page->index;
1373}
1374
1375bool page_mapped(struct page *page);
1376struct address_space *page_mapping(struct page *page);
1377struct address_space *page_mapping_file(struct page *page);
1378
1379
1380
1381
1382
1383
1384static inline bool page_is_pfmemalloc(struct page *page)
1385{
1386
1387
1388
1389
1390 return page->index == -1UL;
1391}
1392
1393
1394
1395
1396
1397static inline void set_page_pfmemalloc(struct page *page)
1398{
1399 page->index = -1UL;
1400}
1401
1402static inline void clear_page_pfmemalloc(struct page *page)
1403{
1404 page->index = 0;
1405}
1406
1407
1408
1409
1410extern void pagefault_out_of_memory(void);
1411
1412#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
1413
1414
1415
1416
1417
1418#define SHOW_MEM_FILTER_NODES (0x0001u)
1419
1420extern void show_free_areas(unsigned int flags, nodemask_t *nodemask);
1421
1422#ifdef CONFIG_MMU
1423extern bool can_do_mlock(void);
1424#else
1425static inline bool can_do_mlock(void) { return false; }
1426#endif
1427extern int user_shm_lock(size_t, struct user_struct *);
1428extern void user_shm_unlock(size_t, struct user_struct *);
1429
1430
1431
1432
1433struct zap_details {
1434 struct address_space *check_mapping;
1435 pgoff_t first_index;
1436 pgoff_t last_index;
1437};
1438
1439struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
1440 pte_t pte);
1441struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
1442 pmd_t pmd);
1443
1444void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1445 unsigned long size);
1446void zap_page_range(struct vm_area_struct *vma, unsigned long address,
1447 unsigned long size);
1448void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
1449 unsigned long start, unsigned long end);
1450
1451struct mmu_notifier_range;
1452
1453void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
1454 unsigned long end, unsigned long floor, unsigned long ceiling);
1455int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
1456 struct vm_area_struct *vma);
1457int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
1458 struct mmu_notifier_range *range,
1459 pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
1460int follow_pfn(struct vm_area_struct *vma, unsigned long address,
1461 unsigned long *pfn);
1462int follow_phys(struct vm_area_struct *vma, unsigned long address,
1463 unsigned int flags, unsigned long *prot, resource_size_t *phys);
1464int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
1465 void *buf, int len, int write);
1466
1467extern void truncate_pagecache(struct inode *inode, loff_t new);
1468extern void truncate_setsize(struct inode *inode, loff_t newsize);
1469void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
1470void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
1471int truncate_inode_page(struct address_space *mapping, struct page *page);
1472int generic_error_remove_page(struct address_space *mapping, struct page *page);
1473int invalidate_inode_page(struct page *page);
1474
1475#ifdef CONFIG_MMU
1476extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
1477 unsigned long address, unsigned int flags);
1478extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
1479 unsigned long address, unsigned int fault_flags,
1480 bool *unlocked);
1481void unmap_mapping_pages(struct address_space *mapping,
1482 pgoff_t start, pgoff_t nr, bool even_cows);
1483void unmap_mapping_range(struct address_space *mapping,
1484 loff_t const holebegin, loff_t const holelen, int even_cows);
1485#else
1486static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
1487 unsigned long address, unsigned int flags)
1488{
1489
1490 BUG();
1491 return VM_FAULT_SIGBUS;
1492}
1493static inline int fixup_user_fault(struct task_struct *tsk,
1494 struct mm_struct *mm, unsigned long address,
1495 unsigned int fault_flags, bool *unlocked)
1496{
1497
1498 BUG();
1499 return -EFAULT;
1500}
1501static inline void unmap_mapping_pages(struct address_space *mapping,
1502 pgoff_t start, pgoff_t nr, bool even_cows) { }
1503static inline void unmap_mapping_range(struct address_space *mapping,
1504 loff_t const holebegin, loff_t const holelen, int even_cows) { }
1505#endif
1506
1507static inline void unmap_shared_mapping_range(struct address_space *mapping,
1508 loff_t const holebegin, loff_t const holelen)
1509{
1510 unmap_mapping_range(mapping, holebegin, holelen, 0);
1511}
1512
1513extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
1514 void *buf, int len, unsigned int gup_flags);
1515extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1516 void *buf, int len, unsigned int gup_flags);
1517extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
1518 unsigned long addr, void *buf, int len, unsigned int gup_flags);
1519
1520long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
1521 unsigned long start, unsigned long nr_pages,
1522 unsigned int gup_flags, struct page **pages,
1523 struct vm_area_struct **vmas, int *locked);
1524long get_user_pages(unsigned long start, unsigned long nr_pages,
1525 unsigned int gup_flags, struct page **pages,
1526 struct vm_area_struct **vmas);
1527long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
1528 unsigned int gup_flags, struct page **pages, int *locked);
1529long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
1530 struct page **pages, unsigned int gup_flags);
1531
1532int get_user_pages_fast(unsigned long start, int nr_pages,
1533 unsigned int gup_flags, struct page **pages);
1534
1535int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
1536int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
1537 struct task_struct *task, bool bypass_rlim);
1538
1539
1540struct frame_vector {
1541 unsigned int nr_allocated;
1542 unsigned int nr_frames;
1543 bool got_ref;
1544 bool is_pfns;
1545 void *ptrs[0];
1546
1547
1548};
1549
1550struct frame_vector *frame_vector_create(unsigned int nr_frames);
1551void frame_vector_destroy(struct frame_vector *vec);
1552int get_vaddr_frames(unsigned long start, unsigned int nr_pfns,
1553 unsigned int gup_flags, struct frame_vector *vec);
1554void put_vaddr_frames(struct frame_vector *vec);
1555int frame_vector_to_pages(struct frame_vector *vec);
1556void frame_vector_to_pfns(struct frame_vector *vec);
1557
1558static inline unsigned int frame_vector_count(struct frame_vector *vec)
1559{
1560 return vec->nr_frames;
1561}
1562
1563static inline struct page **frame_vector_pages(struct frame_vector *vec)
1564{
1565 if (vec->is_pfns) {
1566 int err = frame_vector_to_pages(vec);
1567
1568 if (err)
1569 return ERR_PTR(err);
1570 }
1571 return (struct page **)(vec->ptrs);
1572}
1573
1574static inline unsigned long *frame_vector_pfns(struct frame_vector *vec)
1575{
1576 if (!vec->is_pfns)
1577 frame_vector_to_pfns(vec);
1578 return (unsigned long *)(vec->ptrs);
1579}
1580
1581struct kvec;
1582int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
1583 struct page **pages);
1584int get_kernel_page(unsigned long start, int write, struct page **pages);
1585struct page *get_dump_page(unsigned long addr);
1586
1587extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
1588extern void do_invalidatepage(struct page *page, unsigned int offset,
1589 unsigned int length);
1590
1591void __set_page_dirty(struct page *, struct address_space *, int warn);
1592int __set_page_dirty_nobuffers(struct page *page);
1593int __set_page_dirty_no_writeback(struct page *page);
1594int redirty_page_for_writepage(struct writeback_control *wbc,
1595 struct page *page);
1596void account_page_dirtied(struct page *page, struct address_space *mapping);
1597void account_page_cleaned(struct page *page, struct address_space *mapping,
1598 struct bdi_writeback *wb);
1599int set_page_dirty(struct page *page);
1600int set_page_dirty_lock(struct page *page);
1601void __cancel_dirty_page(struct page *page);
1602static inline void cancel_dirty_page(struct page *page)
1603{
1604
1605 if (PageDirty(page))
1606 __cancel_dirty_page(page);
1607}
1608int clear_page_dirty_for_io(struct page *page);
1609
1610int get_cmdline(struct task_struct *task, char *buffer, int buflen);
1611
1612extern unsigned long move_page_tables(struct vm_area_struct *vma,
1613 unsigned long old_addr, struct vm_area_struct *new_vma,
1614 unsigned long new_addr, unsigned long len,
1615 bool need_rmap_locks);
1616extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
1617 unsigned long end, pgprot_t newprot,
1618 int dirty_accountable, int prot_numa);
1619extern int mprotect_fixup(struct vm_area_struct *vma,
1620 struct vm_area_struct **pprev, unsigned long start,
1621 unsigned long end, unsigned long newflags);
1622
1623
1624
1625
1626int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1627 struct page **pages);
1628
1629
1630
1631static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
1632{
1633 long val = atomic_long_read(&mm->rss_stat.count[member]);
1634
1635#ifdef SPLIT_RSS_COUNTING
1636
1637
1638
1639
1640 if (val < 0)
1641 val = 0;
1642#endif
1643 return (unsigned long)val;
1644}
1645
1646static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1647{
1648 atomic_long_add(value, &mm->rss_stat.count[member]);
1649}
1650
1651static inline void inc_mm_counter(struct mm_struct *mm, int member)
1652{
1653 atomic_long_inc(&mm->rss_stat.count[member]);
1654}
1655
1656static inline void dec_mm_counter(struct mm_struct *mm, int member)
1657{
1658 atomic_long_dec(&mm->rss_stat.count[member]);
1659}
1660
1661
1662static inline int mm_counter_file(struct page *page)
1663{
1664 if (PageSwapBacked(page))
1665 return MM_SHMEMPAGES;
1666 return MM_FILEPAGES;
1667}
1668
1669static inline int mm_counter(struct page *page)
1670{
1671 if (PageAnon(page))
1672 return MM_ANONPAGES;
1673 return mm_counter_file(page);
1674}
1675
1676static inline unsigned long get_mm_rss(struct mm_struct *mm)
1677{
1678 return get_mm_counter(mm, MM_FILEPAGES) +
1679 get_mm_counter(mm, MM_ANONPAGES) +
1680 get_mm_counter(mm, MM_SHMEMPAGES);
1681}
1682
1683static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
1684{
1685 return max(mm->hiwater_rss, get_mm_rss(mm));
1686}
1687
1688static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
1689{
1690 return max(mm->hiwater_vm, mm->total_vm);
1691}
1692
1693static inline void update_hiwater_rss(struct mm_struct *mm)
1694{
1695 unsigned long _rss = get_mm_rss(mm);
1696
1697 if ((mm)->hiwater_rss < _rss)
1698 (mm)->hiwater_rss = _rss;
1699}
1700
1701static inline void update_hiwater_vm(struct mm_struct *mm)
1702{
1703 if (mm->hiwater_vm < mm->total_vm)
1704 mm->hiwater_vm = mm->total_vm;
1705}
1706
1707static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
1708{
1709 mm->hiwater_rss = get_mm_rss(mm);
1710}
1711
1712static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
1713 struct mm_struct *mm)
1714{
1715 unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
1716
1717 if (*maxrss < hiwater_rss)
1718 *maxrss = hiwater_rss;
1719}
1720
1721#if defined(SPLIT_RSS_COUNTING)
1722void sync_mm_rss(struct mm_struct *mm);
1723#else
1724static inline void sync_mm_rss(struct mm_struct *mm)
1725{
1726}
1727#endif
1728
1729#ifndef CONFIG_ARCH_HAS_PTE_DEVMAP
1730static inline int pte_devmap(pte_t pte)
1731{
1732 return 0;
1733}
1734#endif
1735
1736int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
1737
1738extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1739 spinlock_t **ptl);
1740static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
1741 spinlock_t **ptl)
1742{
1743 pte_t *ptep;
1744 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
1745 return ptep;
1746}
1747
1748#ifdef __PAGETABLE_P4D_FOLDED
1749static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
1750 unsigned long address)
1751{
1752 return 0;
1753}
1754#else
1755int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
1756#endif
1757
1758#if defined(__PAGETABLE_PUD_FOLDED) || !defined(CONFIG_MMU)
1759static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d,
1760 unsigned long address)
1761{
1762 return 0;
1763}
1764static inline void mm_inc_nr_puds(struct mm_struct *mm) {}
1765static inline void mm_dec_nr_puds(struct mm_struct *mm) {}
1766
1767#else
1768int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
1769
1770static inline void mm_inc_nr_puds(struct mm_struct *mm)
1771{
1772 if (mm_pud_folded(mm))
1773 return;
1774 atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
1775}
1776
1777static inline void mm_dec_nr_puds(struct mm_struct *mm)
1778{
1779 if (mm_pud_folded(mm))
1780 return;
1781 atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
1782}
1783#endif
1784
1785#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
1786static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
1787 unsigned long address)
1788{
1789 return 0;
1790}
1791
1792static inline void mm_inc_nr_pmds(struct mm_struct *mm) {}
1793static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
1794
1795#else
1796int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
1797
1798static inline void mm_inc_nr_pmds(struct mm_struct *mm)
1799{
1800 if (mm_pmd_folded(mm))
1801 return;
1802 atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
1803}
1804
1805static inline void mm_dec_nr_pmds(struct mm_struct *mm)
1806{
1807 if (mm_pmd_folded(mm))
1808 return;
1809 atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
1810}
1811#endif
1812
1813#ifdef CONFIG_MMU
1814static inline void mm_pgtables_bytes_init(struct mm_struct *mm)
1815{
1816 atomic_long_set(&mm->pgtables_bytes, 0);
1817}
1818
1819static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
1820{
1821 return atomic_long_read(&mm->pgtables_bytes);
1822}
1823
1824static inline void mm_inc_nr_ptes(struct mm_struct *mm)
1825{
1826 atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
1827}
1828
1829static inline void mm_dec_nr_ptes(struct mm_struct *mm)
1830{
1831 atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
1832}
1833#else
1834
1835static inline void mm_pgtables_bytes_init(struct mm_struct *mm) {}
1836static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
1837{
1838 return 0;
1839}
1840
1841static inline void mm_inc_nr_ptes(struct mm_struct *mm) {}
1842static inline void mm_dec_nr_ptes(struct mm_struct *mm) {}
1843#endif
1844
1845int __pte_alloc(struct mm_struct *mm, pmd_t *pmd);
1846int __pte_alloc_kernel(pmd_t *pmd);
1847
1848
1849
1850
1851
1852#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
1853
1854#ifndef __ARCH_HAS_5LEVEL_HACK
1855static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
1856 unsigned long address)
1857{
1858 return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ?
1859 NULL : p4d_offset(pgd, address);
1860}
1861
1862static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
1863 unsigned long address)
1864{
1865 return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ?
1866 NULL : pud_offset(p4d, address);
1867}
1868#endif
1869
1870static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
1871{
1872 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
1873 NULL: pmd_offset(pud, address);
1874}
1875#endif
1876
1877#if USE_SPLIT_PTE_PTLOCKS
1878#if ALLOC_SPLIT_PTLOCKS
1879void __init ptlock_cache_init(void);
1880extern bool ptlock_alloc(struct page *page);
1881extern void ptlock_free(struct page *page);
1882
1883static inline spinlock_t *ptlock_ptr(struct page *page)
1884{
1885 return page->ptl;
1886}
1887#else
1888static inline void ptlock_cache_init(void)
1889{
1890}
1891
1892static inline bool ptlock_alloc(struct page *page)
1893{
1894 return true;
1895}
1896
1897static inline void ptlock_free(struct page *page)
1898{
1899}
1900
1901static inline spinlock_t *ptlock_ptr(struct page *page)
1902{
1903 return &page->ptl;
1904}
1905#endif
1906
1907static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1908{
1909 return ptlock_ptr(pmd_page(*pmd));
1910}
1911
1912static inline bool ptlock_init(struct page *page)
1913{
1914
1915
1916
1917
1918
1919
1920
1921 VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
1922 if (!ptlock_alloc(page))
1923 return false;
1924 spin_lock_init(ptlock_ptr(page));
1925 return true;
1926}
1927
1928#else
1929
1930
1931
1932static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1933{
1934 return &mm->page_table_lock;
1935}
1936static inline void ptlock_cache_init(void) {}
1937static inline bool ptlock_init(struct page *page) { return true; }
1938static inline void ptlock_free(struct page *page) {}
1939#endif
1940
1941static inline void pgtable_init(void)
1942{
1943 ptlock_cache_init();
1944 pgtable_cache_init();
1945}
1946
1947static inline bool pgtable_pte_page_ctor(struct page *page)
1948{
1949 if (!ptlock_init(page))
1950 return false;
1951 __SetPageTable(page);
1952 inc_zone_page_state(page, NR_PAGETABLE);
1953 return true;
1954}
1955
1956static inline void pgtable_pte_page_dtor(struct page *page)
1957{
1958 ptlock_free(page);
1959 __ClearPageTable(page);
1960 dec_zone_page_state(page, NR_PAGETABLE);
1961}
1962
1963#define pte_offset_map_lock(mm, pmd, address, ptlp) \
1964({ \
1965 spinlock_t *__ptl = pte_lockptr(mm, pmd); \
1966 pte_t *__pte = pte_offset_map(pmd, address); \
1967 *(ptlp) = __ptl; \
1968 spin_lock(__ptl); \
1969 __pte; \
1970})
1971
1972#define pte_unmap_unlock(pte, ptl) do { \
1973 spin_unlock(ptl); \
1974 pte_unmap(pte); \
1975} while (0)
1976
1977#define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd))
1978
1979#define pte_alloc_map(mm, pmd, address) \
1980 (pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address))
1981
1982#define pte_alloc_map_lock(mm, pmd, address, ptlp) \
1983 (pte_alloc(mm, pmd) ? \
1984 NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
1985
1986#define pte_alloc_kernel(pmd, address) \
1987 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \
1988 NULL: pte_offset_kernel(pmd, address))
1989
1990#if USE_SPLIT_PMD_PTLOCKS
1991
1992static struct page *pmd_to_page(pmd_t *pmd)
1993{
1994 unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
1995 return virt_to_page((void *)((unsigned long) pmd & mask));
1996}
1997
1998static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
1999{
2000 return ptlock_ptr(pmd_to_page(pmd));
2001}
2002
2003static inline bool pgtable_pmd_page_ctor(struct page *page)
2004{
2005#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2006 page->pmd_huge_pte = NULL;
2007#endif
2008 return ptlock_init(page);
2009}
2010
2011static inline void pgtable_pmd_page_dtor(struct page *page)
2012{
2013#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2014 VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
2015#endif
2016 ptlock_free(page);
2017}
2018
2019#define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte)
2020
2021#else
2022
2023static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
2024{
2025 return &mm->page_table_lock;
2026}
2027
2028static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; }
2029static inline void pgtable_pmd_page_dtor(struct page *page) {}
2030
2031#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
2032
2033#endif
2034
2035static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
2036{
2037 spinlock_t *ptl = pmd_lockptr(mm, pmd);
2038 spin_lock(ptl);
2039 return ptl;
2040}
2041
2042
2043
2044
2045
2046
2047
2048static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud)
2049{
2050 return &mm->page_table_lock;
2051}
2052
2053static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
2054{
2055 spinlock_t *ptl = pud_lockptr(mm, pud);
2056
2057 spin_lock(ptl);
2058 return ptl;
2059}
2060
2061extern void __init pagecache_init(void);
2062extern void free_area_init(unsigned long * zones_size);
2063extern void __init free_area_init_node(int nid, unsigned long * zones_size,
2064 unsigned long zone_start_pfn, unsigned long *zholes_size);
2065extern void free_initmem(void);
2066
2067
2068
2069
2070
2071
2072
2073extern unsigned long free_reserved_area(void *start, void *end,
2074 int poison, const char *s);
2075
2076#ifdef CONFIG_HIGHMEM
2077
2078
2079
2080
2081extern void free_highmem_page(struct page *page);
2082#endif
2083
2084extern void adjust_managed_page_count(struct page *page, long count);
2085extern void mem_init_print_info(const char *str);
2086
2087extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end);
2088
2089
2090static inline void __free_reserved_page(struct page *page)
2091{
2092 ClearPageReserved(page);
2093 init_page_count(page);
2094 __free_page(page);
2095}
2096
2097static inline void free_reserved_page(struct page *page)
2098{
2099 __free_reserved_page(page);
2100 adjust_managed_page_count(page, 1);
2101}
2102
2103static inline void mark_page_reserved(struct page *page)
2104{
2105 SetPageReserved(page);
2106 adjust_managed_page_count(page, -1);
2107}
2108
2109
2110
2111
2112
2113
2114
2115static inline unsigned long free_initmem_default(int poison)
2116{
2117 extern char __init_begin[], __init_end[];
2118
2119 return free_reserved_area(&__init_begin, &__init_end,
2120 poison, "unused kernel");
2121}
2122
2123static inline unsigned long get_num_physpages(void)
2124{
2125 int nid;
2126 unsigned long phys_pages = 0;
2127
2128 for_each_online_node(nid)
2129 phys_pages += node_present_pages(nid);
2130
2131 return phys_pages;
2132}
2133
2134#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161extern void free_area_init_nodes(unsigned long *max_zone_pfn);
2162unsigned long node_map_pfn_alignment(void);
2163unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
2164 unsigned long end_pfn);
2165extern unsigned long absent_pages_in_range(unsigned long start_pfn,
2166 unsigned long end_pfn);
2167extern void get_pfn_range_for_nid(unsigned int nid,
2168 unsigned long *start_pfn, unsigned long *end_pfn);
2169extern unsigned long find_min_pfn_with_active_regions(void);
2170extern void free_bootmem_with_active_regions(int nid,
2171 unsigned long max_low_pfn);
2172extern void sparse_memory_present_with_active_regions(int nid);
2173
2174#endif
2175
2176#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
2177 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
2178static inline int __early_pfn_to_nid(unsigned long pfn,
2179 struct mminit_pfnnid_cache *state)
2180{
2181 return 0;
2182}
2183#else
2184
2185extern int __meminit early_pfn_to_nid(unsigned long pfn);
2186
2187extern int __meminit __early_pfn_to_nid(unsigned long pfn,
2188 struct mminit_pfnnid_cache *state);
2189#endif
2190
2191#if !defined(CONFIG_FLAT_NODE_MEM_MAP)
2192void zero_resv_unavail(void);
2193#else
2194static inline void zero_resv_unavail(void) {}
2195#endif
2196
2197extern void set_dma_reserve(unsigned long new_dma_reserve);
2198extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long,
2199 enum memmap_context, struct vmem_altmap *);
2200extern void setup_per_zone_wmarks(void);
2201extern int __meminit init_per_zone_wmark_min(void);
2202extern void mem_init(void);
2203extern void __init mmap_init(void);
2204extern void show_mem(unsigned int flags, nodemask_t *nodemask);
2205extern long si_mem_available(void);
2206extern void si_meminfo(struct sysinfo * val);
2207extern void si_meminfo_node(struct sysinfo *val, int nid);
2208#ifdef __HAVE_ARCH_RESERVED_KERNEL_PAGES
2209extern unsigned long arch_reserved_kernel_pages(void);
2210#endif
2211
2212extern __printf(3, 4)
2213void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);
2214
2215extern void setup_per_cpu_pageset(void);
2216
2217extern void zone_pcp_update(struct zone *zone);
2218extern void zone_pcp_reset(struct zone *zone);
2219
2220
2221extern int min_free_kbytes;
2222extern int watermark_boost_factor;
2223extern int watermark_scale_factor;
2224
2225
2226extern atomic_long_t mmap_pages_allocated;
2227extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
2228
2229
2230void vma_interval_tree_insert(struct vm_area_struct *node,
2231 struct rb_root_cached *root);
2232void vma_interval_tree_insert_after(struct vm_area_struct *node,
2233 struct vm_area_struct *prev,
2234 struct rb_root_cached *root);
2235void vma_interval_tree_remove(struct vm_area_struct *node,
2236 struct rb_root_cached *root);
2237struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root,
2238 unsigned long start, unsigned long last);
2239struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
2240 unsigned long start, unsigned long last);
2241
2242#define vma_interval_tree_foreach(vma, root, start, last) \
2243 for (vma = vma_interval_tree_iter_first(root, start, last); \
2244 vma; vma = vma_interval_tree_iter_next(vma, start, last))
2245
2246void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
2247 struct rb_root_cached *root);
2248void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
2249 struct rb_root_cached *root);
2250struct anon_vma_chain *
2251anon_vma_interval_tree_iter_first(struct rb_root_cached *root,
2252 unsigned long start, unsigned long last);
2253struct anon_vma_chain *anon_vma_interval_tree_iter_next(
2254 struct anon_vma_chain *node, unsigned long start, unsigned long last);
2255#ifdef CONFIG_DEBUG_VM_RB
2256void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
2257#endif
2258
2259#define anon_vma_interval_tree_foreach(avc, root, start, last) \
2260 for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
2261 avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
2262
2263
2264extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
2265extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
2266 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
2267 struct vm_area_struct *expand);
2268static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
2269 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
2270{
2271 return __vma_adjust(vma, start, end, pgoff, insert, NULL);
2272}
2273extern struct vm_area_struct *vma_merge(struct mm_struct *,
2274 struct vm_area_struct *prev, unsigned long addr, unsigned long end,
2275 unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
2276 struct mempolicy *, struct vm_userfaultfd_ctx);
2277extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
2278extern int __split_vma(struct mm_struct *, struct vm_area_struct *,
2279 unsigned long addr, int new_below);
2280extern int split_vma(struct mm_struct *, struct vm_area_struct *,
2281 unsigned long addr, int new_below);
2282extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
2283extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
2284 struct rb_node **, struct rb_node *);
2285extern void unlink_file_vma(struct vm_area_struct *);
2286extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
2287 unsigned long addr, unsigned long len, pgoff_t pgoff,
2288 bool *need_rmap_locks);
2289extern void exit_mmap(struct mm_struct *);
2290
2291static inline int check_data_rlimit(unsigned long rlim,
2292 unsigned long new,
2293 unsigned long start,
2294 unsigned long end_data,
2295 unsigned long start_data)
2296{
2297 if (rlim < RLIM_INFINITY) {
2298 if (((new - start) + (end_data - start_data)) > rlim)
2299 return -ENOSPC;
2300 }
2301
2302 return 0;
2303}
2304
2305extern int mm_take_all_locks(struct mm_struct *mm);
2306extern void mm_drop_all_locks(struct mm_struct *mm);
2307
2308extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
2309extern struct file *get_mm_exe_file(struct mm_struct *mm);
2310extern struct file *get_task_exe_file(struct task_struct *task);
2311
2312extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
2313extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
2314
2315extern bool vma_is_special_mapping(const struct vm_area_struct *vma,
2316 const struct vm_special_mapping *sm);
2317extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
2318 unsigned long addr, unsigned long len,
2319 unsigned long flags,
2320 const struct vm_special_mapping *spec);
2321
2322extern int install_special_mapping(struct mm_struct *mm,
2323 unsigned long addr, unsigned long len,
2324 unsigned long flags, struct page **pages);
2325
2326unsigned long randomize_stack_top(unsigned long stack_top);
2327
2328extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
2329
2330extern unsigned long mmap_region(struct file *file, unsigned long addr,
2331 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
2332 struct list_head *uf);
2333extern unsigned long do_mmap(struct file *file, unsigned long addr,
2334 unsigned long len, unsigned long prot, unsigned long flags,
2335 vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
2336 struct list_head *uf);
2337extern int __do_munmap(struct mm_struct *, unsigned long, size_t,
2338 struct list_head *uf, bool downgrade);
2339extern int do_munmap(struct mm_struct *, unsigned long, size_t,
2340 struct list_head *uf);
2341
2342static inline unsigned long
2343do_mmap_pgoff(struct file *file, unsigned long addr,
2344 unsigned long len, unsigned long prot, unsigned long flags,
2345 unsigned long pgoff, unsigned long *populate,
2346 struct list_head *uf)
2347{
2348 return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate, uf);
2349}
2350
2351#ifdef CONFIG_MMU
2352extern int __mm_populate(unsigned long addr, unsigned long len,
2353 int ignore_errors);
2354static inline void mm_populate(unsigned long addr, unsigned long len)
2355{
2356
2357 (void) __mm_populate(addr, len, 1);
2358}
2359#else
2360static inline void mm_populate(unsigned long addr, unsigned long len) {}
2361#endif
2362
2363
2364extern int __must_check vm_brk(unsigned long, unsigned long);
2365extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long);
2366extern int vm_munmap(unsigned long, size_t);
2367extern unsigned long __must_check vm_mmap(struct file *, unsigned long,
2368 unsigned long, unsigned long,
2369 unsigned long, unsigned long);
2370
2371struct vm_unmapped_area_info {
2372#define VM_UNMAPPED_AREA_TOPDOWN 1
2373 unsigned long flags;
2374 unsigned long length;
2375 unsigned long low_limit;
2376 unsigned long high_limit;
2377 unsigned long align_mask;
2378 unsigned long align_offset;
2379};
2380
2381extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
2382extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393static inline unsigned long
2394vm_unmapped_area(struct vm_unmapped_area_info *info)
2395{
2396 if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
2397 return unmapped_area_topdown(info);
2398 else
2399 return unmapped_area(info);
2400}
2401
2402
2403extern void truncate_inode_pages(struct address_space *, loff_t);
2404extern void truncate_inode_pages_range(struct address_space *,
2405 loff_t lstart, loff_t lend);
2406extern void truncate_inode_pages_final(struct address_space *);
2407
2408
2409extern vm_fault_t filemap_fault(struct vm_fault *vmf);
2410extern void filemap_map_pages(struct vm_fault *vmf,
2411 pgoff_t start_pgoff, pgoff_t end_pgoff);
2412extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
2413
2414
2415int __must_check write_one_page(struct page *page);
2416void task_dirty_inc(struct task_struct *tsk);
2417
2418
2419#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE)
2420
2421int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
2422 pgoff_t offset, unsigned long nr_to_read);
2423
2424void page_cache_sync_readahead(struct address_space *mapping,
2425 struct file_ra_state *ra,
2426 struct file *filp,
2427 pgoff_t offset,
2428 unsigned long size);
2429
2430void page_cache_async_readahead(struct address_space *mapping,
2431 struct file_ra_state *ra,
2432 struct file *filp,
2433 struct page *pg,
2434 pgoff_t offset,
2435 unsigned long size);
2436
2437extern unsigned long stack_guard_gap;
2438
2439extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
2440
2441
2442extern int expand_downwards(struct vm_area_struct *vma,
2443 unsigned long address);
2444#if VM_GROWSUP
2445extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
2446#else
2447 #define expand_upwards(vma, address) (0)
2448#endif
2449
2450
2451extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
2452extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
2453 struct vm_area_struct **pprev);
2454
2455
2456
2457static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
2458{
2459 struct vm_area_struct * vma = find_vma(mm,start_addr);
2460
2461 if (vma && end_addr <= vma->vm_start)
2462 vma = NULL;
2463 return vma;
2464}
2465
2466static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
2467{
2468 unsigned long vm_start = vma->vm_start;
2469
2470 if (vma->vm_flags & VM_GROWSDOWN) {
2471 vm_start -= stack_guard_gap;
2472 if (vm_start > vma->vm_start)
2473 vm_start = 0;
2474 }
2475 return vm_start;
2476}
2477
2478static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
2479{
2480 unsigned long vm_end = vma->vm_end;
2481
2482 if (vma->vm_flags & VM_GROWSUP) {
2483 vm_end += stack_guard_gap;
2484 if (vm_end < vma->vm_end)
2485 vm_end = -PAGE_SIZE;
2486 }
2487 return vm_end;
2488}
2489
2490static inline unsigned long vma_pages(struct vm_area_struct *vma)
2491{
2492 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
2493}
2494
2495
2496static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
2497 unsigned long vm_start, unsigned long vm_end)
2498{
2499 struct vm_area_struct *vma = find_vma(mm, vm_start);
2500
2501 if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
2502 vma = NULL;
2503
2504 return vma;
2505}
2506
2507static inline bool range_in_vma(struct vm_area_struct *vma,
2508 unsigned long start, unsigned long end)
2509{
2510 return (vma && vma->vm_start <= start && end <= vma->vm_end);
2511}
2512
2513#ifdef CONFIG_MMU
2514pgprot_t vm_get_page_prot(unsigned long vm_flags);
2515void vma_set_page_prot(struct vm_area_struct *vma);
2516#else
2517static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
2518{
2519 return __pgprot(0);
2520}
2521static inline void vma_set_page_prot(struct vm_area_struct *vma)
2522{
2523 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2524}
2525#endif
2526
2527#ifdef CONFIG_NUMA_BALANCING
2528unsigned long change_prot_numa(struct vm_area_struct *vma,
2529 unsigned long start, unsigned long end);
2530#endif
2531
2532struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
2533int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
2534 unsigned long pfn, unsigned long size, pgprot_t);
2535int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
2536int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2537 unsigned long num);
2538int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
2539 unsigned long num);
2540vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2541 unsigned long pfn);
2542vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2543 unsigned long pfn, pgprot_t pgprot);
2544vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2545 pfn_t pfn);
2546vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
2547 unsigned long addr, pfn_t pfn);
2548int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
2549
2550static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
2551 unsigned long addr, struct page *page)
2552{
2553 int err = vm_insert_page(vma, addr, page);
2554
2555 if (err == -ENOMEM)
2556 return VM_FAULT_OOM;
2557 if (err < 0 && err != -EBUSY)
2558 return VM_FAULT_SIGBUS;
2559
2560 return VM_FAULT_NOPAGE;
2561}
2562
2563static inline vm_fault_t vmf_error(int err)
2564{
2565 if (err == -ENOMEM)
2566 return VM_FAULT_OOM;
2567 return VM_FAULT_SIGBUS;
2568}
2569
2570struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
2571 unsigned int foll_flags);
2572
2573#define FOLL_WRITE 0x01
2574#define FOLL_TOUCH 0x02
2575#define FOLL_GET 0x04
2576#define FOLL_DUMP 0x08
2577#define FOLL_FORCE 0x10
2578#define FOLL_NOWAIT 0x20
2579
2580#define FOLL_POPULATE 0x40
2581#define FOLL_SPLIT 0x80
2582#define FOLL_HWPOISON 0x100
2583#define FOLL_NUMA 0x200
2584#define FOLL_MIGRATION 0x400
2585#define FOLL_TRIED 0x800
2586#define FOLL_MLOCK 0x1000
2587#define FOLL_REMOTE 0x2000
2588#define FOLL_COW 0x4000
2589#define FOLL_ANON 0x8000
2590#define FOLL_LONGTERM 0x10000
2591#define FOLL_SPLIT_PMD 0x20000
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
2621{
2622 if (vm_fault & VM_FAULT_OOM)
2623 return -ENOMEM;
2624 if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
2625 return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT;
2626 if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
2627 return -EFAULT;
2628 return 0;
2629}
2630
2631typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data);
2632extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
2633 unsigned long size, pte_fn_t fn, void *data);
2634
2635
2636#ifdef CONFIG_PAGE_POISONING
2637extern bool page_poisoning_enabled(void);
2638extern void kernel_poison_pages(struct page *page, int numpages, int enable);
2639#else
2640static inline bool page_poisoning_enabled(void) { return false; }
2641static inline void kernel_poison_pages(struct page *page, int numpages,
2642 int enable) { }
2643#endif
2644
2645#ifdef CONFIG_INIT_ON_ALLOC_DEFAULT_ON
2646DECLARE_STATIC_KEY_TRUE(init_on_alloc);
2647#else
2648DECLARE_STATIC_KEY_FALSE(init_on_alloc);
2649#endif
2650static inline bool want_init_on_alloc(gfp_t flags)
2651{
2652 if (static_branch_unlikely(&init_on_alloc) &&
2653 !page_poisoning_enabled())
2654 return true;
2655 return flags & __GFP_ZERO;
2656}
2657
2658#ifdef CONFIG_INIT_ON_FREE_DEFAULT_ON
2659DECLARE_STATIC_KEY_TRUE(init_on_free);
2660#else
2661DECLARE_STATIC_KEY_FALSE(init_on_free);
2662#endif
2663static inline bool want_init_on_free(void)
2664{
2665 return static_branch_unlikely(&init_on_free) &&
2666 !page_poisoning_enabled();
2667}
2668
2669#ifdef CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT
2670DECLARE_STATIC_KEY_TRUE(_debug_pagealloc_enabled);
2671#else
2672DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
2673#endif
2674
2675static inline bool debug_pagealloc_enabled(void)
2676{
2677 if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC))
2678 return false;
2679
2680 return static_branch_unlikely(&_debug_pagealloc_enabled);
2681}
2682
2683#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP)
2684extern void __kernel_map_pages(struct page *page, int numpages, int enable);
2685
2686static inline void
2687kernel_map_pages(struct page *page, int numpages, int enable)
2688{
2689 __kernel_map_pages(page, numpages, enable);
2690}
2691#ifdef CONFIG_HIBERNATION
2692extern bool kernel_page_present(struct page *page);
2693#endif
2694#else
2695static inline void
2696kernel_map_pages(struct page *page, int numpages, int enable) {}
2697#ifdef CONFIG_HIBERNATION
2698static inline bool kernel_page_present(struct page *page) { return true; }
2699#endif
2700#endif
2701
2702#ifdef __HAVE_ARCH_GATE_AREA
2703extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
2704extern int in_gate_area_no_mm(unsigned long addr);
2705extern int in_gate_area(struct mm_struct *mm, unsigned long addr);
2706#else
2707static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
2708{
2709 return NULL;
2710}
2711static inline int in_gate_area_no_mm(unsigned long addr) { return 0; }
2712static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
2713{
2714 return 0;
2715}
2716#endif
2717
2718extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);
2719
2720#ifdef CONFIG_SYSCTL
2721extern int sysctl_drop_caches;
2722int drop_caches_sysctl_handler(struct ctl_table *, int,
2723 void __user *, size_t *, loff_t *);
2724#endif
2725
2726void drop_slab(void);
2727void drop_slab_node(int nid);
2728
2729#ifndef CONFIG_MMU
2730#define randomize_va_space 0
2731#else
2732extern int randomize_va_space;
2733#endif
2734
2735const char * arch_vma_name(struct vm_area_struct *vma);
2736#ifdef CONFIG_MMU
2737void print_vma_addr(char *prefix, unsigned long rip);
2738#else
2739static inline void print_vma_addr(char *prefix, unsigned long rip)
2740{
2741}
2742#endif
2743
2744void *sparse_buffer_alloc(unsigned long size);
2745struct page * __populate_section_memmap(unsigned long pfn,
2746 unsigned long nr_pages, int nid, struct vmem_altmap *altmap);
2747pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
2748p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
2749pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
2750pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
2751pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
2752void *vmemmap_alloc_block(unsigned long size, int node);
2753struct vmem_altmap;
2754void *vmemmap_alloc_block_buf(unsigned long size, int node);
2755void *altmap_alloc_block_buf(unsigned long size, struct vmem_altmap *altmap);
2756void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
2757int vmemmap_populate_basepages(unsigned long start, unsigned long end,
2758 int node);
2759int vmemmap_populate(unsigned long start, unsigned long end, int node,
2760 struct vmem_altmap *altmap);
2761void vmemmap_populate_print_last(void);
2762#ifdef CONFIG_MEMORY_HOTPLUG
2763void vmemmap_free(unsigned long start, unsigned long end,
2764 struct vmem_altmap *altmap);
2765#endif
2766void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
2767 unsigned long nr_pages);
2768
2769enum mf_flags {
2770 MF_COUNT_INCREASED = 1 << 0,
2771 MF_ACTION_REQUIRED = 1 << 1,
2772 MF_MUST_KILL = 1 << 2,
2773 MF_SOFT_OFFLINE = 1 << 3,
2774};
2775extern int memory_failure(unsigned long pfn, int flags);
2776extern void memory_failure_queue(unsigned long pfn, int flags);
2777extern int unpoison_memory(unsigned long pfn);
2778extern int get_hwpoison_page(struct page *page);
2779#define put_hwpoison_page(page) put_page(page)
2780extern int sysctl_memory_failure_early_kill;
2781extern int sysctl_memory_failure_recovery;
2782extern void shake_page(struct page *p, int access);
2783extern atomic_long_t num_poisoned_pages __read_mostly;
2784extern int soft_offline_page(struct page *page, int flags);
2785
2786
2787
2788
2789
2790enum mf_result {
2791 MF_IGNORED,
2792 MF_FAILED,
2793 MF_DELAYED,
2794 MF_RECOVERED,
2795};
2796
2797enum mf_action_page_type {
2798 MF_MSG_KERNEL,
2799 MF_MSG_KERNEL_HIGH_ORDER,
2800 MF_MSG_SLAB,
2801 MF_MSG_DIFFERENT_COMPOUND,
2802 MF_MSG_POISONED_HUGE,
2803 MF_MSG_HUGE,
2804 MF_MSG_FREE_HUGE,
2805 MF_MSG_NON_PMD_HUGE,
2806 MF_MSG_UNMAP_FAILED,
2807 MF_MSG_DIRTY_SWAPCACHE,
2808 MF_MSG_CLEAN_SWAPCACHE,
2809 MF_MSG_DIRTY_MLOCKED_LRU,
2810 MF_MSG_CLEAN_MLOCKED_LRU,
2811 MF_MSG_DIRTY_UNEVICTABLE_LRU,
2812 MF_MSG_CLEAN_UNEVICTABLE_LRU,
2813 MF_MSG_DIRTY_LRU,
2814 MF_MSG_CLEAN_LRU,
2815 MF_MSG_TRUNCATED_LRU,
2816 MF_MSG_BUDDY,
2817 MF_MSG_BUDDY_2ND,
2818 MF_MSG_DAX,
2819 MF_MSG_UNKNOWN,
2820};
2821
2822#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
2823extern void clear_huge_page(struct page *page,
2824 unsigned long addr_hint,
2825 unsigned int pages_per_huge_page);
2826extern void copy_user_huge_page(struct page *dst, struct page *src,
2827 unsigned long addr_hint,
2828 struct vm_area_struct *vma,
2829 unsigned int pages_per_huge_page);
2830extern long copy_huge_page_from_user(struct page *dst_page,
2831 const void __user *usr_src,
2832 unsigned int pages_per_huge_page,
2833 bool allow_pagefault);
2834#endif
2835
2836#ifdef CONFIG_DEBUG_PAGEALLOC
2837extern unsigned int _debug_guardpage_minorder;
2838DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
2839
2840static inline unsigned int debug_guardpage_minorder(void)
2841{
2842 return _debug_guardpage_minorder;
2843}
2844
2845static inline bool debug_guardpage_enabled(void)
2846{
2847 return static_branch_unlikely(&_debug_guardpage_enabled);
2848}
2849
2850static inline bool page_is_guard(struct page *page)
2851{
2852 if (!debug_guardpage_enabled())
2853 return false;
2854
2855 return PageGuard(page);
2856}
2857#else
2858static inline unsigned int debug_guardpage_minorder(void) { return 0; }
2859static inline bool debug_guardpage_enabled(void) { return false; }
2860static inline bool page_is_guard(struct page *page) { return false; }
2861#endif
2862
2863#if MAX_NUMNODES > 1
2864void __init setup_nr_node_ids(void);
2865#else
2866static inline void setup_nr_node_ids(void) {}
2867#endif
2868
2869extern int memcmp_pages(struct page *page1, struct page *page2);
2870
2871static inline int pages_identical(struct page *page1, struct page *page2)
2872{
2873 return !memcmp_pages(page1, page2);
2874}
2875
2876#endif
2877#endif
2878