1
2#ifndef _LINUX_MM_H
3#define _LINUX_MM_H
4
5#include <linux/errno.h>
6
7#ifdef __KERNEL__
8
9#include <linux/mmdebug.h>
10#include <linux/gfp.h>
11#include <linux/bug.h>
12#include <linux/list.h>
13#include <linux/mmzone.h>
14#include <linux/rbtree.h>
15#include <linux/atomic.h>
16#include <linux/debug_locks.h>
17#include <linux/mm_types.h>
18#include <linux/range.h>
19#include <linux/pfn.h>
20#include <linux/percpu-refcount.h>
21#include <linux/bit_spinlock.h>
22#include <linux/shrinker.h>
23#include <linux/resource.h>
24#include <linux/page_ext.h>
25#include <linux/err.h>
26#include <linux/page_ref.h>
27#include <linux/memremap.h>
28#include <linux/overflow.h>
29#include <linux/sizes.h>
30
31struct mempolicy;
32struct anon_vma;
33struct anon_vma_chain;
34struct file_ra_state;
35struct user_struct;
36struct writeback_control;
37struct bdi_writeback;
38
39void init_mm_internals(void);
40
41#ifndef CONFIG_NEED_MULTIPLE_NODES
42extern unsigned long max_mapnr;
43
44static inline void set_max_mapnr(unsigned long limit)
45{
46 max_mapnr = limit;
47}
48#else
49static inline void set_max_mapnr(unsigned long limit) { }
50#endif
51
52extern atomic_long_t _totalram_pages;
53static inline unsigned long totalram_pages(void)
54{
55 return (unsigned long)atomic_long_read(&_totalram_pages);
56}
57
58static inline void totalram_pages_inc(void)
59{
60 atomic_long_inc(&_totalram_pages);
61}
62
63static inline void totalram_pages_dec(void)
64{
65 atomic_long_dec(&_totalram_pages);
66}
67
68static inline void totalram_pages_add(long count)
69{
70 atomic_long_add(count, &_totalram_pages);
71}
72
73static inline void totalram_pages_set(long val)
74{
75 atomic_long_set(&_totalram_pages, val);
76}
77
78extern void * high_memory;
79extern int page_cluster;
80
81#ifdef CONFIG_SYSCTL
82extern int sysctl_legacy_va_layout;
83#else
84#define sysctl_legacy_va_layout 0
85#endif
86
87#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
88extern const int mmap_rnd_bits_min;
89extern const int mmap_rnd_bits_max;
90extern int mmap_rnd_bits __read_mostly;
91#endif
92#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
93extern const int mmap_rnd_compat_bits_min;
94extern const int mmap_rnd_compat_bits_max;
95extern int mmap_rnd_compat_bits __read_mostly;
96#endif
97
98#include <asm/page.h>
99#include <asm/pgtable.h>
100#include <asm/processor.h>
101
102
103
104
105
106
107
108
109#ifndef untagged_addr
110#define untagged_addr(addr) (addr)
111#endif
112
113#ifndef __pa_symbol
114#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
115#endif
116
117#ifndef page_to_virt
118#define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x)))
119#endif
120
121#ifndef lm_alias
122#define lm_alias(x) __va(__pa_symbol(x))
123#endif
124
125
126
127
128
129
130
131
132#ifndef mm_forbids_zeropage
133#define mm_forbids_zeropage(X) (0)
134#endif
135
136
137
138
139
140
141
142#if BITS_PER_LONG == 64
143
144
145
146
147
148
149#define mm_zero_struct_page(pp) __mm_zero_struct_page(pp)
150static inline void __mm_zero_struct_page(struct page *page)
151{
152 unsigned long *_pp = (void *)page;
153
154
155 BUILD_BUG_ON(sizeof(struct page) & 7);
156 BUILD_BUG_ON(sizeof(struct page) < 56);
157 BUILD_BUG_ON(sizeof(struct page) > 80);
158
159 switch (sizeof(struct page)) {
160 case 80:
161 _pp[9] = 0;
162 case 72:
163 _pp[8] = 0;
164 case 64:
165 _pp[7] = 0;
166 case 56:
167 _pp[6] = 0;
168 _pp[5] = 0;
169 _pp[4] = 0;
170 _pp[3] = 0;
171 _pp[2] = 0;
172 _pp[1] = 0;
173 _pp[0] = 0;
174 }
175}
176#else
177#define mm_zero_struct_page(pp) ((void)memset((pp), 0, sizeof(struct page)))
178#endif
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196#define MAPCOUNT_ELF_CORE_MARGIN (5)
197#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
198
199extern int sysctl_max_map_count;
200
201extern unsigned long sysctl_user_reserve_kbytes;
202extern unsigned long sysctl_admin_reserve_kbytes;
203
204extern int sysctl_overcommit_memory;
205extern int sysctl_overcommit_ratio;
206extern unsigned long sysctl_overcommit_kbytes;
207
208extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *,
209 size_t *, loff_t *);
210extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
211 size_t *, loff_t *);
212
213#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
214
215
216#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
217
218
219#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
220
221#define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
222
223
224
225
226
227
228
229
230
231
232struct vm_area_struct *vm_area_alloc(struct mm_struct *);
233struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
234void vm_area_free(struct vm_area_struct *);
235
236#ifndef CONFIG_MMU
237extern struct rb_root nommu_region_tree;
238extern struct rw_semaphore nommu_region_sem;
239
240extern unsigned int kobjsize(const void *objp);
241#endif
242
243
244
245
246
247#define VM_NONE 0x00000000
248
249#define VM_READ 0x00000001
250#define VM_WRITE 0x00000002
251#define VM_EXEC 0x00000004
252#define VM_SHARED 0x00000008
253
254
255#define VM_MAYREAD 0x00000010
256#define VM_MAYWRITE 0x00000020
257#define VM_MAYEXEC 0x00000040
258#define VM_MAYSHARE 0x00000080
259
260#define VM_GROWSDOWN 0x00000100
261#define VM_UFFD_MISSING 0x00000200
262#define VM_PFNMAP 0x00000400
263#define VM_DENYWRITE 0x00000800
264#define VM_UFFD_WP 0x00001000
265
266#define VM_LOCKED 0x00002000
267#define VM_IO 0x00004000
268
269
270#define VM_SEQ_READ 0x00008000
271#define VM_RAND_READ 0x00010000
272
273#define VM_DONTCOPY 0x00020000
274#define VM_DONTEXPAND 0x00040000
275#define VM_LOCKONFAULT 0x00080000
276#define VM_ACCOUNT 0x00100000
277#define VM_NORESERVE 0x00200000
278#define VM_HUGETLB 0x00400000
279#define VM_SYNC 0x00800000
280#define VM_ARCH_1 0x01000000
281#define VM_WIPEONFORK 0x02000000
282#define VM_DONTDUMP 0x04000000
283
284#ifdef CONFIG_MEM_SOFT_DIRTY
285# define VM_SOFTDIRTY 0x08000000
286#else
287# define VM_SOFTDIRTY 0
288#endif
289
290#define VM_MIXEDMAP 0x10000000
291#define VM_HUGEPAGE 0x20000000
292#define VM_NOHUGEPAGE 0x40000000
293#define VM_MERGEABLE 0x80000000
294
295#ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS
296#define VM_HIGH_ARCH_BIT_0 32
297#define VM_HIGH_ARCH_BIT_1 33
298#define VM_HIGH_ARCH_BIT_2 34
299#define VM_HIGH_ARCH_BIT_3 35
300#define VM_HIGH_ARCH_BIT_4 36
301#define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0)
302#define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1)
303#define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2)
304#define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3)
305#define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4)
306#endif
307
308#ifdef CONFIG_ARCH_HAS_PKEYS
309# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0
310# define VM_PKEY_BIT0 VM_HIGH_ARCH_0
311# define VM_PKEY_BIT1 VM_HIGH_ARCH_1
312# define VM_PKEY_BIT2 VM_HIGH_ARCH_2
313# define VM_PKEY_BIT3 VM_HIGH_ARCH_3
314#ifdef CONFIG_PPC
315# define VM_PKEY_BIT4 VM_HIGH_ARCH_4
316#else
317# define VM_PKEY_BIT4 0
318#endif
319#endif
320
321#if defined(CONFIG_X86)
322# define VM_PAT VM_ARCH_1
323#elif defined(CONFIG_PPC)
324# define VM_SAO VM_ARCH_1
325#elif defined(CONFIG_PARISC)
326# define VM_GROWSUP VM_ARCH_1
327#elif defined(CONFIG_IA64)
328# define VM_GROWSUP VM_ARCH_1
329#elif defined(CONFIG_SPARC64)
330# define VM_SPARC_ADI VM_ARCH_1
331# define VM_ARCH_CLEAR VM_SPARC_ADI
332#elif !defined(CONFIG_MMU)
333# define VM_MAPPED_COPY VM_ARCH_1
334#endif
335
336#if defined(CONFIG_X86_INTEL_MPX)
337
338# define VM_MPX VM_HIGH_ARCH_4
339#else
340# define VM_MPX VM_NONE
341#endif
342
343#ifndef VM_GROWSUP
344# define VM_GROWSUP VM_NONE
345#endif
346
347
348#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ)
349
350#ifndef VM_STACK_DEFAULT_FLAGS
351#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
352#endif
353
354#ifdef CONFIG_STACK_GROWSUP
355#define VM_STACK VM_GROWSUP
356#else
357#define VM_STACK VM_GROWSDOWN
358#endif
359
360#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
361
362
363
364
365
366#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
367
368
369#define VM_INIT_DEF_MASK VM_NOHUGEPAGE
370
371
372#define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT))
373
374
375#ifndef VM_ARCH_CLEAR
376# define VM_ARCH_CLEAR VM_NONE
377#endif
378#define VM_FLAGS_CLEAR (ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR)
379
380
381
382
383
384extern pgprot_t protection_map[16];
385
386#define FAULT_FLAG_WRITE 0x01
387#define FAULT_FLAG_MKWRITE 0x02
388#define FAULT_FLAG_ALLOW_RETRY 0x04
389#define FAULT_FLAG_RETRY_NOWAIT 0x08
390#define FAULT_FLAG_KILLABLE 0x10
391#define FAULT_FLAG_TRIED 0x20
392#define FAULT_FLAG_USER 0x40
393#define FAULT_FLAG_REMOTE 0x80
394#define FAULT_FLAG_INSTRUCTION 0x100
395
396#define FAULT_FLAG_TRACE \
397 { FAULT_FLAG_WRITE, "WRITE" }, \
398 { FAULT_FLAG_MKWRITE, "MKWRITE" }, \
399 { FAULT_FLAG_ALLOW_RETRY, "ALLOW_RETRY" }, \
400 { FAULT_FLAG_RETRY_NOWAIT, "RETRY_NOWAIT" }, \
401 { FAULT_FLAG_KILLABLE, "KILLABLE" }, \
402 { FAULT_FLAG_TRIED, "TRIED" }, \
403 { FAULT_FLAG_USER, "USER" }, \
404 { FAULT_FLAG_REMOTE, "REMOTE" }, \
405 { FAULT_FLAG_INSTRUCTION, "INSTRUCTION" }
406
407
408
409
410
411
412
413
414
415
416
417struct vm_fault {
418 struct vm_area_struct *vma;
419 unsigned int flags;
420 gfp_t gfp_mask;
421 pgoff_t pgoff;
422 unsigned long address;
423 pmd_t *pmd;
424
425 pud_t *pud;
426
427
428 pte_t orig_pte;
429
430 struct page *cow_page;
431 struct mem_cgroup *memcg;
432 struct page *page;
433
434
435
436
437
438 pte_t *pte;
439
440
441
442 spinlock_t *ptl;
443
444
445
446 pgtable_t prealloc_pte;
447
448
449
450
451
452
453};
454
455
456enum page_entry_size {
457 PE_SIZE_PTE = 0,
458 PE_SIZE_PMD,
459 PE_SIZE_PUD,
460};
461
462
463
464
465
466
467struct vm_operations_struct {
468 void (*open)(struct vm_area_struct * area);
469 void (*close)(struct vm_area_struct * area);
470 int (*split)(struct vm_area_struct * area, unsigned long addr);
471 int (*mremap)(struct vm_area_struct * area);
472 vm_fault_t (*fault)(struct vm_fault *vmf);
473 vm_fault_t (*huge_fault)(struct vm_fault *vmf,
474 enum page_entry_size pe_size);
475 void (*map_pages)(struct vm_fault *vmf,
476 pgoff_t start_pgoff, pgoff_t end_pgoff);
477 unsigned long (*pagesize)(struct vm_area_struct * area);
478
479
480
481 vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
482
483
484 vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
485
486
487
488
489 int (*access)(struct vm_area_struct *vma, unsigned long addr,
490 void *buf, int len, int write);
491
492
493
494
495 const char *(*name)(struct vm_area_struct *vma);
496
497#ifdef CONFIG_NUMA
498
499
500
501
502
503
504
505 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
506
507
508
509
510
511
512
513
514
515
516
517 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
518 unsigned long addr);
519#endif
520
521
522
523
524
525 struct page *(*find_special_page)(struct vm_area_struct *vma,
526 unsigned long addr);
527};
528
529static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
530{
531 static const struct vm_operations_struct dummy_vm_ops = {};
532
533 memset(vma, 0, sizeof(*vma));
534 vma->vm_mm = mm;
535 vma->vm_ops = &dummy_vm_ops;
536 INIT_LIST_HEAD(&vma->anon_vma_chain);
537}
538
539static inline void vma_set_anonymous(struct vm_area_struct *vma)
540{
541 vma->vm_ops = NULL;
542}
543
544static inline bool vma_is_anonymous(struct vm_area_struct *vma)
545{
546 return !vma->vm_ops;
547}
548
549#ifdef CONFIG_SHMEM
550
551
552
553
554bool vma_is_shmem(struct vm_area_struct *vma);
555#else
556static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
557#endif
558
559int vma_is_stack_for_current(struct vm_area_struct *vma);
560
561
562#define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
563
564struct mmu_gather;
565struct inode;
566
567#if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
568static inline int pmd_devmap(pmd_t pmd)
569{
570 return 0;
571}
572static inline int pud_devmap(pud_t pud)
573{
574 return 0;
575}
576static inline int pgd_devmap(pgd_t pgd)
577{
578 return 0;
579}
580#endif
581
582
583
584
585
586#include <linux/page-flags.h>
587#include <linux/huge_mm.h>
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605static inline int put_page_testzero(struct page *page)
606{
607 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
608 return page_ref_dec_and_test(page);
609}
610
611
612
613
614
615
616
617static inline int get_page_unless_zero(struct page *page)
618{
619 return page_ref_add_unless(page, 1, 0);
620}
621
622extern int page_is_ram(unsigned long pfn);
623
624enum {
625 REGION_INTERSECTS,
626 REGION_DISJOINT,
627 REGION_MIXED,
628};
629
630int region_intersects(resource_size_t offset, size_t size, unsigned long flags,
631 unsigned long desc);
632
633
634struct page *vmalloc_to_page(const void *addr);
635unsigned long vmalloc_to_pfn(const void *addr);
636
637
638
639
640
641
642
643static inline bool is_vmalloc_addr(const void *x)
644{
645#ifdef CONFIG_MMU
646 unsigned long addr = (unsigned long)x;
647
648 return addr >= VMALLOC_START && addr < VMALLOC_END;
649#else
650 return false;
651#endif
652}
653
654#ifndef is_ioremap_addr
655#define is_ioremap_addr(x) is_vmalloc_addr(x)
656#endif
657
658#ifdef CONFIG_MMU
659extern int is_vmalloc_or_module_addr(const void *x);
660#else
661static inline int is_vmalloc_or_module_addr(const void *x)
662{
663 return 0;
664}
665#endif
666
667extern void *kvmalloc_node(size_t size, gfp_t flags, int node);
668static inline void *kvmalloc(size_t size, gfp_t flags)
669{
670 return kvmalloc_node(size, flags, NUMA_NO_NODE);
671}
672static inline void *kvzalloc_node(size_t size, gfp_t flags, int node)
673{
674 return kvmalloc_node(size, flags | __GFP_ZERO, node);
675}
676static inline void *kvzalloc(size_t size, gfp_t flags)
677{
678 return kvmalloc(size, flags | __GFP_ZERO);
679}
680
681static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
682{
683 size_t bytes;
684
685 if (unlikely(check_mul_overflow(n, size, &bytes)))
686 return NULL;
687
688 return kvmalloc(bytes, flags);
689}
690
691static inline void *kvcalloc(size_t n, size_t size, gfp_t flags)
692{
693 return kvmalloc_array(n, size, flags | __GFP_ZERO);
694}
695
696extern void kvfree(const void *addr);
697
698static inline atomic_t *compound_mapcount_ptr(struct page *page)
699{
700 return &page[1].compound_mapcount;
701}
702
703static inline int compound_mapcount(struct page *page)
704{
705 VM_BUG_ON_PAGE(!PageCompound(page), page);
706 page = compound_head(page);
707 return atomic_read(compound_mapcount_ptr(page)) + 1;
708}
709
710
711
712
713
714
715static inline void page_mapcount_reset(struct page *page)
716{
717 atomic_set(&(page)->_mapcount, -1);
718}
719
720int __page_mapcount(struct page *page);
721
722static inline int page_mapcount(struct page *page)
723{
724 VM_BUG_ON_PAGE(PageSlab(page), page);
725
726 if (unlikely(PageCompound(page)))
727 return __page_mapcount(page);
728 return atomic_read(&page->_mapcount) + 1;
729}
730
731#ifdef CONFIG_TRANSPARENT_HUGEPAGE
732int total_mapcount(struct page *page);
733int page_trans_huge_mapcount(struct page *page, int *total_mapcount);
734#else
735static inline int total_mapcount(struct page *page)
736{
737 return page_mapcount(page);
738}
739static inline int page_trans_huge_mapcount(struct page *page,
740 int *total_mapcount)
741{
742 int mapcount = page_mapcount(page);
743 if (total_mapcount)
744 *total_mapcount = mapcount;
745 return mapcount;
746}
747#endif
748
749static inline struct page *virt_to_head_page(const void *x)
750{
751 struct page *page = virt_to_page(x);
752
753 return compound_head(page);
754}
755
756void __put_page(struct page *page);
757
758void put_pages_list(struct list_head *pages);
759
760void split_page(struct page *page, unsigned int order);
761
762
763
764
765
766
767typedef void compound_page_dtor(struct page *);
768
769
770enum compound_dtor_id {
771 NULL_COMPOUND_DTOR,
772 COMPOUND_PAGE_DTOR,
773#ifdef CONFIG_HUGETLB_PAGE
774 HUGETLB_PAGE_DTOR,
775#endif
776#ifdef CONFIG_TRANSPARENT_HUGEPAGE
777 TRANSHUGE_PAGE_DTOR,
778#endif
779 NR_COMPOUND_DTORS,
780};
781extern compound_page_dtor * const compound_page_dtors[];
782
783static inline void set_compound_page_dtor(struct page *page,
784 enum compound_dtor_id compound_dtor)
785{
786 VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page);
787 page[1].compound_dtor = compound_dtor;
788}
789
790static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
791{
792 VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page);
793 return compound_page_dtors[page[1].compound_dtor];
794}
795
796static inline unsigned int compound_order(struct page *page)
797{
798 if (!PageHead(page))
799 return 0;
800 return page[1].compound_order;
801}
802
803static inline void set_compound_order(struct page *page, unsigned int order)
804{
805 page[1].compound_order = order;
806}
807
808void free_compound_page(struct page *page);
809
810#ifdef CONFIG_MMU
811
812
813
814
815
816
817static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
818{
819 if (likely(vma->vm_flags & VM_WRITE))
820 pte = pte_mkwrite(pte);
821 return pte;
822}
823
824vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
825 struct page *page);
826vm_fault_t finish_fault(struct vm_fault *vmf);
827vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
828#endif
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
897#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
898#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
899#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
900#define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH)
901
902
903
904
905
906
907#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
908#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
909#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
910#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
911#define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0))
912
913
914#ifdef NODE_NOT_IN_PAGE_FLAGS
915#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
916#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \
917 SECTIONS_PGOFF : ZONES_PGOFF)
918#else
919#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
920#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \
921 NODES_PGOFF : ZONES_PGOFF)
922#endif
923
924#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
925
926#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
927#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
928#endif
929
930#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
931#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
932#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
933#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
934#define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1)
935#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
936
937static inline enum zone_type page_zonenum(const struct page *page)
938{
939 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
940}
941
942#ifdef CONFIG_ZONE_DEVICE
943static inline bool is_zone_device_page(const struct page *page)
944{
945 return page_zonenum(page) == ZONE_DEVICE;
946}
947extern void memmap_init_zone_device(struct zone *, unsigned long,
948 unsigned long, struct dev_pagemap *);
949#else
950static inline bool is_zone_device_page(const struct page *page)
951{
952 return false;
953}
954#endif
955
956#ifdef CONFIG_DEV_PAGEMAP_OPS
957void __put_devmap_managed_page(struct page *page);
958DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
959static inline bool put_devmap_managed_page(struct page *page)
960{
961 if (!static_branch_unlikely(&devmap_managed_key))
962 return false;
963 if (!is_zone_device_page(page))
964 return false;
965 switch (page->pgmap->type) {
966 case MEMORY_DEVICE_PRIVATE:
967 case MEMORY_DEVICE_FS_DAX:
968 __put_devmap_managed_page(page);
969 return true;
970 default:
971 break;
972 }
973 return false;
974}
975
976#else
977static inline bool put_devmap_managed_page(struct page *page)
978{
979 return false;
980}
981#endif
982
983static inline bool is_device_private_page(const struct page *page)
984{
985 return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) &&
986 IS_ENABLED(CONFIG_DEVICE_PRIVATE) &&
987 is_zone_device_page(page) &&
988 page->pgmap->type == MEMORY_DEVICE_PRIVATE;
989}
990
991static inline bool is_pci_p2pdma_page(const struct page *page)
992{
993 return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) &&
994 IS_ENABLED(CONFIG_PCI_P2PDMA) &&
995 is_zone_device_page(page) &&
996 page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA;
997}
998
999
1000#define page_ref_zero_or_close_to_overflow(page) \
1001 ((unsigned int) page_ref_count(page) + 127u <= 127u)
1002
1003static inline void get_page(struct page *page)
1004{
1005 page = compound_head(page);
1006
1007
1008
1009
1010 VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(page), page);
1011 page_ref_inc(page);
1012}
1013
1014static inline __must_check bool try_get_page(struct page *page)
1015{
1016 page = compound_head(page);
1017 if (WARN_ON_ONCE(page_ref_count(page) <= 0))
1018 return false;
1019 page_ref_inc(page);
1020 return true;
1021}
1022
1023static inline void put_page(struct page *page)
1024{
1025 page = compound_head(page);
1026
1027
1028
1029
1030
1031
1032
1033 if (put_devmap_managed_page(page))
1034 return;
1035
1036 if (put_page_testzero(page))
1037 __put_page(page);
1038}
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055static inline void put_user_page(struct page *page)
1056{
1057 put_page(page);
1058}
1059
1060void put_user_pages_dirty(struct page **pages, unsigned long npages);
1061void put_user_pages_dirty_lock(struct page **pages, unsigned long npages);
1062void put_user_pages(struct page **pages, unsigned long npages);
1063
1064#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
1065#define SECTION_IN_PAGE_FLAGS
1066#endif
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076static inline int page_zone_id(struct page *page)
1077{
1078 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
1079}
1080
1081#ifdef NODE_NOT_IN_PAGE_FLAGS
1082extern int page_to_nid(const struct page *page);
1083#else
1084static inline int page_to_nid(const struct page *page)
1085{
1086 struct page *p = (struct page *)page;
1087
1088 return (PF_POISONED_CHECK(p)->flags >> NODES_PGSHIFT) & NODES_MASK;
1089}
1090#endif
1091
1092#ifdef CONFIG_NUMA_BALANCING
1093static inline int cpu_pid_to_cpupid(int cpu, int pid)
1094{
1095 return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
1096}
1097
1098static inline int cpupid_to_pid(int cpupid)
1099{
1100 return cpupid & LAST__PID_MASK;
1101}
1102
1103static inline int cpupid_to_cpu(int cpupid)
1104{
1105 return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
1106}
1107
1108static inline int cpupid_to_nid(int cpupid)
1109{
1110 return cpu_to_node(cpupid_to_cpu(cpupid));
1111}
1112
1113static inline bool cpupid_pid_unset(int cpupid)
1114{
1115 return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
1116}
1117
1118static inline bool cpupid_cpu_unset(int cpupid)
1119{
1120 return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
1121}
1122
1123static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
1124{
1125 return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
1126}
1127
1128#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
1129#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
1130static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
1131{
1132 return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
1133}
1134
1135static inline int page_cpupid_last(struct page *page)
1136{
1137 return page->_last_cpupid;
1138}
1139static inline void page_cpupid_reset_last(struct page *page)
1140{
1141 page->_last_cpupid = -1 & LAST_CPUPID_MASK;
1142}
1143#else
1144static inline int page_cpupid_last(struct page *page)
1145{
1146 return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
1147}
1148
1149extern int page_cpupid_xchg_last(struct page *page, int cpupid);
1150
1151static inline void page_cpupid_reset_last(struct page *page)
1152{
1153 page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
1154}
1155#endif
1156#else
1157static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
1158{
1159 return page_to_nid(page);
1160}
1161
1162static inline int page_cpupid_last(struct page *page)
1163{
1164 return page_to_nid(page);
1165}
1166
1167static inline int cpupid_to_nid(int cpupid)
1168{
1169 return -1;
1170}
1171
1172static inline int cpupid_to_pid(int cpupid)
1173{
1174 return -1;
1175}
1176
1177static inline int cpupid_to_cpu(int cpupid)
1178{
1179 return -1;
1180}
1181
1182static inline int cpu_pid_to_cpupid(int nid, int pid)
1183{
1184 return -1;
1185}
1186
1187static inline bool cpupid_pid_unset(int cpupid)
1188{
1189 return 1;
1190}
1191
1192static inline void page_cpupid_reset_last(struct page *page)
1193{
1194}
1195
1196static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
1197{
1198 return false;
1199}
1200#endif
1201
1202#ifdef CONFIG_KASAN_SW_TAGS
1203static inline u8 page_kasan_tag(const struct page *page)
1204{
1205 return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
1206}
1207
1208static inline void page_kasan_tag_set(struct page *page, u8 tag)
1209{
1210 page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
1211 page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
1212}
1213
1214static inline void page_kasan_tag_reset(struct page *page)
1215{
1216 page_kasan_tag_set(page, 0xff);
1217}
1218#else
1219static inline u8 page_kasan_tag(const struct page *page)
1220{
1221 return 0xff;
1222}
1223
1224static inline void page_kasan_tag_set(struct page *page, u8 tag) { }
1225static inline void page_kasan_tag_reset(struct page *page) { }
1226#endif
1227
1228static inline struct zone *page_zone(const struct page *page)
1229{
1230 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
1231}
1232
1233static inline pg_data_t *page_pgdat(const struct page *page)
1234{
1235 return NODE_DATA(page_to_nid(page));
1236}
1237
1238#ifdef SECTION_IN_PAGE_FLAGS
1239static inline void set_page_section(struct page *page, unsigned long section)
1240{
1241 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
1242 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
1243}
1244
1245static inline unsigned long page_to_section(const struct page *page)
1246{
1247 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
1248}
1249#endif
1250
1251static inline void set_page_zone(struct page *page, enum zone_type zone)
1252{
1253 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
1254 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
1255}
1256
1257static inline void set_page_node(struct page *page, unsigned long node)
1258{
1259 page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
1260 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
1261}
1262
1263static inline void set_page_links(struct page *page, enum zone_type zone,
1264 unsigned long node, unsigned long pfn)
1265{
1266 set_page_zone(page, zone);
1267 set_page_node(page, node);
1268#ifdef SECTION_IN_PAGE_FLAGS
1269 set_page_section(page, pfn_to_section_nr(pfn));
1270#endif
1271}
1272
1273#ifdef CONFIG_MEMCG
1274static inline struct mem_cgroup *page_memcg(struct page *page)
1275{
1276 return page->mem_cgroup;
1277}
1278static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
1279{
1280 WARN_ON_ONCE(!rcu_read_lock_held());
1281 return READ_ONCE(page->mem_cgroup);
1282}
1283#else
1284static inline struct mem_cgroup *page_memcg(struct page *page)
1285{
1286 return NULL;
1287}
1288static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
1289{
1290 WARN_ON_ONCE(!rcu_read_lock_held());
1291 return NULL;
1292}
1293#endif
1294
1295
1296
1297
1298#include <linux/vmstat.h>
1299
1300static __always_inline void *lowmem_page_address(const struct page *page)
1301{
1302 return page_to_virt(page);
1303}
1304
1305#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
1306#define HASHED_PAGE_VIRTUAL
1307#endif
1308
1309#if defined(WANT_PAGE_VIRTUAL)
1310static inline void *page_address(const struct page *page)
1311{
1312 return page->virtual;
1313}
1314static inline void set_page_address(struct page *page, void *address)
1315{
1316 page->virtual = address;
1317}
1318#define page_address_init() do { } while(0)
1319#endif
1320
1321#if defined(HASHED_PAGE_VIRTUAL)
1322void *page_address(const struct page *page);
1323void set_page_address(struct page *page, void *virtual);
1324void page_address_init(void);
1325#endif
1326
1327#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
1328#define page_address(page) lowmem_page_address(page)
1329#define set_page_address(page, address) do { } while(0)
1330#define page_address_init() do { } while(0)
1331#endif
1332
1333extern void *page_rmapping(struct page *page);
1334extern struct anon_vma *page_anon_vma(struct page *page);
1335extern struct address_space *page_mapping(struct page *page);
1336
1337extern struct address_space *__page_file_mapping(struct page *);
1338
1339static inline
1340struct address_space *page_file_mapping(struct page *page)
1341{
1342 if (unlikely(PageSwapCache(page)))
1343 return __page_file_mapping(page);
1344
1345 return page->mapping;
1346}
1347
1348extern pgoff_t __page_file_index(struct page *page);
1349
1350
1351
1352
1353
1354static inline pgoff_t page_index(struct page *page)
1355{
1356 if (unlikely(PageSwapCache(page)))
1357 return __page_file_index(page);
1358 return page->index;
1359}
1360
1361bool page_mapped(struct page *page);
1362struct address_space *page_mapping(struct page *page);
1363struct address_space *page_mapping_file(struct page *page);
1364
1365
1366
1367
1368
1369
1370static inline bool page_is_pfmemalloc(struct page *page)
1371{
1372
1373
1374
1375
1376 return page->index == -1UL;
1377}
1378
1379
1380
1381
1382
1383static inline void set_page_pfmemalloc(struct page *page)
1384{
1385 page->index = -1UL;
1386}
1387
1388static inline void clear_page_pfmemalloc(struct page *page)
1389{
1390 page->index = 0;
1391}
1392
1393
1394
1395
1396extern void pagefault_out_of_memory(void);
1397
1398#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
1399
1400
1401
1402
1403
1404#define SHOW_MEM_FILTER_NODES (0x0001u)
1405
1406extern void show_free_areas(unsigned int flags, nodemask_t *nodemask);
1407
1408extern bool can_do_mlock(void);
1409extern int user_shm_lock(size_t, struct user_struct *);
1410extern void user_shm_unlock(size_t, struct user_struct *);
1411
1412
1413
1414
1415struct zap_details {
1416 struct address_space *check_mapping;
1417 pgoff_t first_index;
1418 pgoff_t last_index;
1419};
1420
1421struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
1422 pte_t pte);
1423struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
1424 pmd_t pmd);
1425
1426void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1427 unsigned long size);
1428void zap_page_range(struct vm_area_struct *vma, unsigned long address,
1429 unsigned long size);
1430void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
1431 unsigned long start, unsigned long end);
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457struct mm_walk {
1458 int (*pud_entry)(pud_t *pud, unsigned long addr,
1459 unsigned long next, struct mm_walk *walk);
1460 int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
1461 unsigned long next, struct mm_walk *walk);
1462 int (*pte_entry)(pte_t *pte, unsigned long addr,
1463 unsigned long next, struct mm_walk *walk);
1464 int (*pte_hole)(unsigned long addr, unsigned long next,
1465 struct mm_walk *walk);
1466 int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
1467 unsigned long addr, unsigned long next,
1468 struct mm_walk *walk);
1469 int (*test_walk)(unsigned long addr, unsigned long next,
1470 struct mm_walk *walk);
1471 struct mm_struct *mm;
1472 struct vm_area_struct *vma;
1473 void *private;
1474};
1475
1476struct mmu_notifier_range;
1477
1478int walk_page_range(unsigned long addr, unsigned long end,
1479 struct mm_walk *walk);
1480int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk);
1481void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
1482 unsigned long end, unsigned long floor, unsigned long ceiling);
1483int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
1484 struct vm_area_struct *vma);
1485int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
1486 struct mmu_notifier_range *range,
1487 pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
1488int follow_pfn(struct vm_area_struct *vma, unsigned long address,
1489 unsigned long *pfn);
1490int follow_phys(struct vm_area_struct *vma, unsigned long address,
1491 unsigned int flags, unsigned long *prot, resource_size_t *phys);
1492int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
1493 void *buf, int len, int write);
1494
1495extern void truncate_pagecache(struct inode *inode, loff_t new);
1496extern void truncate_setsize(struct inode *inode, loff_t newsize);
1497void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
1498void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
1499int truncate_inode_page(struct address_space *mapping, struct page *page);
1500int generic_error_remove_page(struct address_space *mapping, struct page *page);
1501int invalidate_inode_page(struct page *page);
1502
1503#ifdef CONFIG_MMU
1504extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
1505 unsigned long address, unsigned int flags);
1506extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
1507 unsigned long address, unsigned int fault_flags,
1508 bool *unlocked);
1509void unmap_mapping_pages(struct address_space *mapping,
1510 pgoff_t start, pgoff_t nr, bool even_cows);
1511void unmap_mapping_range(struct address_space *mapping,
1512 loff_t const holebegin, loff_t const holelen, int even_cows);
1513#else
1514static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
1515 unsigned long address, unsigned int flags)
1516{
1517
1518 BUG();
1519 return VM_FAULT_SIGBUS;
1520}
1521static inline int fixup_user_fault(struct task_struct *tsk,
1522 struct mm_struct *mm, unsigned long address,
1523 unsigned int fault_flags, bool *unlocked)
1524{
1525
1526 BUG();
1527 return -EFAULT;
1528}
1529static inline void unmap_mapping_pages(struct address_space *mapping,
1530 pgoff_t start, pgoff_t nr, bool even_cows) { }
1531static inline void unmap_mapping_range(struct address_space *mapping,
1532 loff_t const holebegin, loff_t const holelen, int even_cows) { }
1533#endif
1534
1535static inline void unmap_shared_mapping_range(struct address_space *mapping,
1536 loff_t const holebegin, loff_t const holelen)
1537{
1538 unmap_mapping_range(mapping, holebegin, holelen, 0);
1539}
1540
1541extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
1542 void *buf, int len, unsigned int gup_flags);
1543extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1544 void *buf, int len, unsigned int gup_flags);
1545extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
1546 unsigned long addr, void *buf, int len, unsigned int gup_flags);
1547
1548long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
1549 unsigned long start, unsigned long nr_pages,
1550 unsigned int gup_flags, struct page **pages,
1551 struct vm_area_struct **vmas, int *locked);
1552long get_user_pages(unsigned long start, unsigned long nr_pages,
1553 unsigned int gup_flags, struct page **pages,
1554 struct vm_area_struct **vmas);
1555long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
1556 unsigned int gup_flags, struct page **pages, int *locked);
1557long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
1558 struct page **pages, unsigned int gup_flags);
1559
1560int get_user_pages_fast(unsigned long start, int nr_pages,
1561 unsigned int gup_flags, struct page **pages);
1562
1563int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
1564int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
1565 struct task_struct *task, bool bypass_rlim);
1566
1567
1568struct frame_vector {
1569 unsigned int nr_allocated;
1570 unsigned int nr_frames;
1571 bool got_ref;
1572 bool is_pfns;
1573 void *ptrs[0];
1574
1575
1576};
1577
1578struct frame_vector *frame_vector_create(unsigned int nr_frames);
1579void frame_vector_destroy(struct frame_vector *vec);
1580int get_vaddr_frames(unsigned long start, unsigned int nr_pfns,
1581 unsigned int gup_flags, struct frame_vector *vec);
1582void put_vaddr_frames(struct frame_vector *vec);
1583int frame_vector_to_pages(struct frame_vector *vec);
1584void frame_vector_to_pfns(struct frame_vector *vec);
1585
1586static inline unsigned int frame_vector_count(struct frame_vector *vec)
1587{
1588 return vec->nr_frames;
1589}
1590
1591static inline struct page **frame_vector_pages(struct frame_vector *vec)
1592{
1593 if (vec->is_pfns) {
1594 int err = frame_vector_to_pages(vec);
1595
1596 if (err)
1597 return ERR_PTR(err);
1598 }
1599 return (struct page **)(vec->ptrs);
1600}
1601
1602static inline unsigned long *frame_vector_pfns(struct frame_vector *vec)
1603{
1604 if (!vec->is_pfns)
1605 frame_vector_to_pfns(vec);
1606 return (unsigned long *)(vec->ptrs);
1607}
1608
1609struct kvec;
1610int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
1611 struct page **pages);
1612int get_kernel_page(unsigned long start, int write, struct page **pages);
1613struct page *get_dump_page(unsigned long addr);
1614
1615extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
1616extern void do_invalidatepage(struct page *page, unsigned int offset,
1617 unsigned int length);
1618
1619void __set_page_dirty(struct page *, struct address_space *, int warn);
1620int __set_page_dirty_nobuffers(struct page *page);
1621int __set_page_dirty_no_writeback(struct page *page);
1622int redirty_page_for_writepage(struct writeback_control *wbc,
1623 struct page *page);
1624void account_page_dirtied(struct page *page, struct address_space *mapping);
1625void account_page_cleaned(struct page *page, struct address_space *mapping,
1626 struct bdi_writeback *wb);
1627int set_page_dirty(struct page *page);
1628int set_page_dirty_lock(struct page *page);
1629void __cancel_dirty_page(struct page *page);
1630static inline void cancel_dirty_page(struct page *page)
1631{
1632
1633 if (PageDirty(page))
1634 __cancel_dirty_page(page);
1635}
1636int clear_page_dirty_for_io(struct page *page);
1637
1638int get_cmdline(struct task_struct *task, char *buffer, int buflen);
1639
1640extern unsigned long move_page_tables(struct vm_area_struct *vma,
1641 unsigned long old_addr, struct vm_area_struct *new_vma,
1642 unsigned long new_addr, unsigned long len,
1643 bool need_rmap_locks);
1644extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
1645 unsigned long end, pgprot_t newprot,
1646 int dirty_accountable, int prot_numa);
1647extern int mprotect_fixup(struct vm_area_struct *vma,
1648 struct vm_area_struct **pprev, unsigned long start,
1649 unsigned long end, unsigned long newflags);
1650
1651
1652
1653
1654int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1655 struct page **pages);
1656
1657
1658
1659static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
1660{
1661 long val = atomic_long_read(&mm->rss_stat.count[member]);
1662
1663#ifdef SPLIT_RSS_COUNTING
1664
1665
1666
1667
1668 if (val < 0)
1669 val = 0;
1670#endif
1671 return (unsigned long)val;
1672}
1673
1674static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1675{
1676 atomic_long_add(value, &mm->rss_stat.count[member]);
1677}
1678
1679static inline void inc_mm_counter(struct mm_struct *mm, int member)
1680{
1681 atomic_long_inc(&mm->rss_stat.count[member]);
1682}
1683
1684static inline void dec_mm_counter(struct mm_struct *mm, int member)
1685{
1686 atomic_long_dec(&mm->rss_stat.count[member]);
1687}
1688
1689
1690static inline int mm_counter_file(struct page *page)
1691{
1692 if (PageSwapBacked(page))
1693 return MM_SHMEMPAGES;
1694 return MM_FILEPAGES;
1695}
1696
1697static inline int mm_counter(struct page *page)
1698{
1699 if (PageAnon(page))
1700 return MM_ANONPAGES;
1701 return mm_counter_file(page);
1702}
1703
1704static inline unsigned long get_mm_rss(struct mm_struct *mm)
1705{
1706 return get_mm_counter(mm, MM_FILEPAGES) +
1707 get_mm_counter(mm, MM_ANONPAGES) +
1708 get_mm_counter(mm, MM_SHMEMPAGES);
1709}
1710
1711static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
1712{
1713 return max(mm->hiwater_rss, get_mm_rss(mm));
1714}
1715
1716static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
1717{
1718 return max(mm->hiwater_vm, mm->total_vm);
1719}
1720
1721static inline void update_hiwater_rss(struct mm_struct *mm)
1722{
1723 unsigned long _rss = get_mm_rss(mm);
1724
1725 if ((mm)->hiwater_rss < _rss)
1726 (mm)->hiwater_rss = _rss;
1727}
1728
1729static inline void update_hiwater_vm(struct mm_struct *mm)
1730{
1731 if (mm->hiwater_vm < mm->total_vm)
1732 mm->hiwater_vm = mm->total_vm;
1733}
1734
1735static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
1736{
1737 mm->hiwater_rss = get_mm_rss(mm);
1738}
1739
1740static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
1741 struct mm_struct *mm)
1742{
1743 unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
1744
1745 if (*maxrss < hiwater_rss)
1746 *maxrss = hiwater_rss;
1747}
1748
1749#if defined(SPLIT_RSS_COUNTING)
1750void sync_mm_rss(struct mm_struct *mm);
1751#else
1752static inline void sync_mm_rss(struct mm_struct *mm)
1753{
1754}
1755#endif
1756
1757#ifndef CONFIG_ARCH_HAS_PTE_DEVMAP
1758static inline int pte_devmap(pte_t pte)
1759{
1760 return 0;
1761}
1762#endif
1763
1764int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
1765
1766extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1767 spinlock_t **ptl);
1768static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
1769 spinlock_t **ptl)
1770{
1771 pte_t *ptep;
1772 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
1773 return ptep;
1774}
1775
1776#ifdef __PAGETABLE_P4D_FOLDED
1777static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
1778 unsigned long address)
1779{
1780 return 0;
1781}
1782#else
1783int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
1784#endif
1785
1786#if defined(__PAGETABLE_PUD_FOLDED) || !defined(CONFIG_MMU)
1787static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d,
1788 unsigned long address)
1789{
1790 return 0;
1791}
1792static inline void mm_inc_nr_puds(struct mm_struct *mm) {}
1793static inline void mm_dec_nr_puds(struct mm_struct *mm) {}
1794
1795#else
1796int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
1797
1798static inline void mm_inc_nr_puds(struct mm_struct *mm)
1799{
1800 if (mm_pud_folded(mm))
1801 return;
1802 atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
1803}
1804
1805static inline void mm_dec_nr_puds(struct mm_struct *mm)
1806{
1807 if (mm_pud_folded(mm))
1808 return;
1809 atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
1810}
1811#endif
1812
1813#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
1814static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
1815 unsigned long address)
1816{
1817 return 0;
1818}
1819
1820static inline void mm_inc_nr_pmds(struct mm_struct *mm) {}
1821static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
1822
1823#else
1824int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
1825
1826static inline void mm_inc_nr_pmds(struct mm_struct *mm)
1827{
1828 if (mm_pmd_folded(mm))
1829 return;
1830 atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
1831}
1832
1833static inline void mm_dec_nr_pmds(struct mm_struct *mm)
1834{
1835 if (mm_pmd_folded(mm))
1836 return;
1837 atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
1838}
1839#endif
1840
1841#ifdef CONFIG_MMU
1842static inline void mm_pgtables_bytes_init(struct mm_struct *mm)
1843{
1844 atomic_long_set(&mm->pgtables_bytes, 0);
1845}
1846
1847static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
1848{
1849 return atomic_long_read(&mm->pgtables_bytes);
1850}
1851
1852static inline void mm_inc_nr_ptes(struct mm_struct *mm)
1853{
1854 atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
1855}
1856
1857static inline void mm_dec_nr_ptes(struct mm_struct *mm)
1858{
1859 atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
1860}
1861#else
1862
1863static inline void mm_pgtables_bytes_init(struct mm_struct *mm) {}
1864static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
1865{
1866 return 0;
1867}
1868
1869static inline void mm_inc_nr_ptes(struct mm_struct *mm) {}
1870static inline void mm_dec_nr_ptes(struct mm_struct *mm) {}
1871#endif
1872
1873int __pte_alloc(struct mm_struct *mm, pmd_t *pmd);
1874int __pte_alloc_kernel(pmd_t *pmd);
1875
1876
1877
1878
1879
1880#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
1881
1882#ifndef __ARCH_HAS_5LEVEL_HACK
1883static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
1884 unsigned long address)
1885{
1886 return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ?
1887 NULL : p4d_offset(pgd, address);
1888}
1889
1890static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
1891 unsigned long address)
1892{
1893 return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ?
1894 NULL : pud_offset(p4d, address);
1895}
1896#endif
1897
1898static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
1899{
1900 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
1901 NULL: pmd_offset(pud, address);
1902}
1903#endif
1904
1905#if USE_SPLIT_PTE_PTLOCKS
1906#if ALLOC_SPLIT_PTLOCKS
1907void __init ptlock_cache_init(void);
1908extern bool ptlock_alloc(struct page *page);
1909extern void ptlock_free(struct page *page);
1910
1911static inline spinlock_t *ptlock_ptr(struct page *page)
1912{
1913 return page->ptl;
1914}
1915#else
1916static inline void ptlock_cache_init(void)
1917{
1918}
1919
1920static inline bool ptlock_alloc(struct page *page)
1921{
1922 return true;
1923}
1924
1925static inline void ptlock_free(struct page *page)
1926{
1927}
1928
1929static inline spinlock_t *ptlock_ptr(struct page *page)
1930{
1931 return &page->ptl;
1932}
1933#endif
1934
1935static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1936{
1937 return ptlock_ptr(pmd_page(*pmd));
1938}
1939
1940static inline bool ptlock_init(struct page *page)
1941{
1942
1943
1944
1945
1946
1947
1948
1949 VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
1950 if (!ptlock_alloc(page))
1951 return false;
1952 spin_lock_init(ptlock_ptr(page));
1953 return true;
1954}
1955
1956#else
1957
1958
1959
1960static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1961{
1962 return &mm->page_table_lock;
1963}
1964static inline void ptlock_cache_init(void) {}
1965static inline bool ptlock_init(struct page *page) { return true; }
1966static inline void ptlock_free(struct page *page) {}
1967#endif
1968
1969static inline void pgtable_init(void)
1970{
1971 ptlock_cache_init();
1972 pgtable_cache_init();
1973}
1974
1975static inline bool pgtable_page_ctor(struct page *page)
1976{
1977 if (!ptlock_init(page))
1978 return false;
1979 __SetPageTable(page);
1980 inc_zone_page_state(page, NR_PAGETABLE);
1981 return true;
1982}
1983
1984static inline void pgtable_page_dtor(struct page *page)
1985{
1986 ptlock_free(page);
1987 __ClearPageTable(page);
1988 dec_zone_page_state(page, NR_PAGETABLE);
1989}
1990
1991#define pte_offset_map_lock(mm, pmd, address, ptlp) \
1992({ \
1993 spinlock_t *__ptl = pte_lockptr(mm, pmd); \
1994 pte_t *__pte = pte_offset_map(pmd, address); \
1995 *(ptlp) = __ptl; \
1996 spin_lock(__ptl); \
1997 __pte; \
1998})
1999
2000#define pte_unmap_unlock(pte, ptl) do { \
2001 spin_unlock(ptl); \
2002 pte_unmap(pte); \
2003} while (0)
2004
2005#define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd))
2006
2007#define pte_alloc_map(mm, pmd, address) \
2008 (pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address))
2009
2010#define pte_alloc_map_lock(mm, pmd, address, ptlp) \
2011 (pte_alloc(mm, pmd) ? \
2012 NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
2013
2014#define pte_alloc_kernel(pmd, address) \
2015 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \
2016 NULL: pte_offset_kernel(pmd, address))
2017
2018#if USE_SPLIT_PMD_PTLOCKS
2019
2020static struct page *pmd_to_page(pmd_t *pmd)
2021{
2022 unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
2023 return virt_to_page((void *)((unsigned long) pmd & mask));
2024}
2025
2026static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
2027{
2028 return ptlock_ptr(pmd_to_page(pmd));
2029}
2030
2031static inline bool pgtable_pmd_page_ctor(struct page *page)
2032{
2033#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2034 page->pmd_huge_pte = NULL;
2035#endif
2036 return ptlock_init(page);
2037}
2038
2039static inline void pgtable_pmd_page_dtor(struct page *page)
2040{
2041#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2042 VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
2043#endif
2044 ptlock_free(page);
2045}
2046
2047#define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte)
2048
2049#else
2050
2051static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
2052{
2053 return &mm->page_table_lock;
2054}
2055
2056static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; }
2057static inline void pgtable_pmd_page_dtor(struct page *page) {}
2058
2059#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
2060
2061#endif
2062
2063static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
2064{
2065 spinlock_t *ptl = pmd_lockptr(mm, pmd);
2066 spin_lock(ptl);
2067 return ptl;
2068}
2069
2070
2071
2072
2073
2074
2075
2076static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud)
2077{
2078 return &mm->page_table_lock;
2079}
2080
2081static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
2082{
2083 spinlock_t *ptl = pud_lockptr(mm, pud);
2084
2085 spin_lock(ptl);
2086 return ptl;
2087}
2088
2089extern void __init pagecache_init(void);
2090extern void free_area_init(unsigned long * zones_size);
2091extern void __init free_area_init_node(int nid, unsigned long * zones_size,
2092 unsigned long zone_start_pfn, unsigned long *zholes_size);
2093extern void free_initmem(void);
2094
2095
2096
2097
2098
2099
2100
2101extern unsigned long free_reserved_area(void *start, void *end,
2102 int poison, const char *s);
2103
2104#ifdef CONFIG_HIGHMEM
2105
2106
2107
2108
2109extern void free_highmem_page(struct page *page);
2110#endif
2111
2112extern void adjust_managed_page_count(struct page *page, long count);
2113extern void mem_init_print_info(const char *str);
2114
2115extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end);
2116
2117
2118static inline void __free_reserved_page(struct page *page)
2119{
2120 ClearPageReserved(page);
2121 init_page_count(page);
2122 __free_page(page);
2123}
2124
2125static inline void free_reserved_page(struct page *page)
2126{
2127 __free_reserved_page(page);
2128 adjust_managed_page_count(page, 1);
2129}
2130
2131static inline void mark_page_reserved(struct page *page)
2132{
2133 SetPageReserved(page);
2134 adjust_managed_page_count(page, -1);
2135}
2136
2137
2138
2139
2140
2141
2142
2143static inline unsigned long free_initmem_default(int poison)
2144{
2145 extern char __init_begin[], __init_end[];
2146
2147 return free_reserved_area(&__init_begin, &__init_end,
2148 poison, "unused kernel");
2149}
2150
2151static inline unsigned long get_num_physpages(void)
2152{
2153 int nid;
2154 unsigned long phys_pages = 0;
2155
2156 for_each_online_node(nid)
2157 phys_pages += node_present_pages(nid);
2158
2159 return phys_pages;
2160}
2161
2162#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189extern void free_area_init_nodes(unsigned long *max_zone_pfn);
2190unsigned long node_map_pfn_alignment(void);
2191unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
2192 unsigned long end_pfn);
2193extern unsigned long absent_pages_in_range(unsigned long start_pfn,
2194 unsigned long end_pfn);
2195extern void get_pfn_range_for_nid(unsigned int nid,
2196 unsigned long *start_pfn, unsigned long *end_pfn);
2197extern unsigned long find_min_pfn_with_active_regions(void);
2198extern void free_bootmem_with_active_regions(int nid,
2199 unsigned long max_low_pfn);
2200extern void sparse_memory_present_with_active_regions(int nid);
2201
2202#endif
2203
2204#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
2205 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
2206static inline int __early_pfn_to_nid(unsigned long pfn,
2207 struct mminit_pfnnid_cache *state)
2208{
2209 return 0;
2210}
2211#else
2212
2213extern int __meminit early_pfn_to_nid(unsigned long pfn);
2214
2215extern int __meminit __early_pfn_to_nid(unsigned long pfn,
2216 struct mminit_pfnnid_cache *state);
2217#endif
2218
2219#if !defined(CONFIG_FLAT_NODE_MEM_MAP)
2220void zero_resv_unavail(void);
2221#else
2222static inline void zero_resv_unavail(void) {}
2223#endif
2224
2225extern void set_dma_reserve(unsigned long new_dma_reserve);
2226extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long,
2227 enum memmap_context, struct vmem_altmap *);
2228extern void setup_per_zone_wmarks(void);
2229extern int __meminit init_per_zone_wmark_min(void);
2230extern void mem_init(void);
2231extern void __init mmap_init(void);
2232extern void show_mem(unsigned int flags, nodemask_t *nodemask);
2233extern long si_mem_available(void);
2234extern void si_meminfo(struct sysinfo * val);
2235extern void si_meminfo_node(struct sysinfo *val, int nid);
2236#ifdef __HAVE_ARCH_RESERVED_KERNEL_PAGES
2237extern unsigned long arch_reserved_kernel_pages(void);
2238#endif
2239
2240extern __printf(3, 4)
2241void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);
2242
2243extern void setup_per_cpu_pageset(void);
2244
2245extern void zone_pcp_update(struct zone *zone);
2246extern void zone_pcp_reset(struct zone *zone);
2247
2248
2249extern int min_free_kbytes;
2250extern int watermark_boost_factor;
2251extern int watermark_scale_factor;
2252
2253
2254extern atomic_long_t mmap_pages_allocated;
2255extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
2256
2257
2258void vma_interval_tree_insert(struct vm_area_struct *node,
2259 struct rb_root_cached *root);
2260void vma_interval_tree_insert_after(struct vm_area_struct *node,
2261 struct vm_area_struct *prev,
2262 struct rb_root_cached *root);
2263void vma_interval_tree_remove(struct vm_area_struct *node,
2264 struct rb_root_cached *root);
2265struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root,
2266 unsigned long start, unsigned long last);
2267struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
2268 unsigned long start, unsigned long last);
2269
2270#define vma_interval_tree_foreach(vma, root, start, last) \
2271 for (vma = vma_interval_tree_iter_first(root, start, last); \
2272 vma; vma = vma_interval_tree_iter_next(vma, start, last))
2273
2274void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
2275 struct rb_root_cached *root);
2276void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
2277 struct rb_root_cached *root);
2278struct anon_vma_chain *
2279anon_vma_interval_tree_iter_first(struct rb_root_cached *root,
2280 unsigned long start, unsigned long last);
2281struct anon_vma_chain *anon_vma_interval_tree_iter_next(
2282 struct anon_vma_chain *node, unsigned long start, unsigned long last);
2283#ifdef CONFIG_DEBUG_VM_RB
2284void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
2285#endif
2286
2287#define anon_vma_interval_tree_foreach(avc, root, start, last) \
2288 for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
2289 avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
2290
2291
2292extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
2293extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
2294 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
2295 struct vm_area_struct *expand);
2296static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
2297 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
2298{
2299 return __vma_adjust(vma, start, end, pgoff, insert, NULL);
2300}
2301extern struct vm_area_struct *vma_merge(struct mm_struct *,
2302 struct vm_area_struct *prev, unsigned long addr, unsigned long end,
2303 unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
2304 struct mempolicy *, struct vm_userfaultfd_ctx);
2305extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
2306extern int __split_vma(struct mm_struct *, struct vm_area_struct *,
2307 unsigned long addr, int new_below);
2308extern int split_vma(struct mm_struct *, struct vm_area_struct *,
2309 unsigned long addr, int new_below);
2310extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
2311extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
2312 struct rb_node **, struct rb_node *);
2313extern void unlink_file_vma(struct vm_area_struct *);
2314extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
2315 unsigned long addr, unsigned long len, pgoff_t pgoff,
2316 bool *need_rmap_locks);
2317extern void exit_mmap(struct mm_struct *);
2318
2319static inline int check_data_rlimit(unsigned long rlim,
2320 unsigned long new,
2321 unsigned long start,
2322 unsigned long end_data,
2323 unsigned long start_data)
2324{
2325 if (rlim < RLIM_INFINITY) {
2326 if (((new - start) + (end_data - start_data)) > rlim)
2327 return -ENOSPC;
2328 }
2329
2330 return 0;
2331}
2332
2333extern int mm_take_all_locks(struct mm_struct *mm);
2334extern void mm_drop_all_locks(struct mm_struct *mm);
2335
2336extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
2337extern struct file *get_mm_exe_file(struct mm_struct *mm);
2338extern struct file *get_task_exe_file(struct task_struct *task);
2339
2340extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
2341extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
2342
2343extern bool vma_is_special_mapping(const struct vm_area_struct *vma,
2344 const struct vm_special_mapping *sm);
2345extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
2346 unsigned long addr, unsigned long len,
2347 unsigned long flags,
2348 const struct vm_special_mapping *spec);
2349
2350extern int install_special_mapping(struct mm_struct *mm,
2351 unsigned long addr, unsigned long len,
2352 unsigned long flags, struct page **pages);
2353
2354extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
2355
2356extern unsigned long mmap_region(struct file *file, unsigned long addr,
2357 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
2358 struct list_head *uf);
2359extern unsigned long do_mmap(struct file *file, unsigned long addr,
2360 unsigned long len, unsigned long prot, unsigned long flags,
2361 vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
2362 struct list_head *uf);
2363extern int __do_munmap(struct mm_struct *, unsigned long, size_t,
2364 struct list_head *uf, bool downgrade);
2365extern int do_munmap(struct mm_struct *, unsigned long, size_t,
2366 struct list_head *uf);
2367
2368static inline unsigned long
2369do_mmap_pgoff(struct file *file, unsigned long addr,
2370 unsigned long len, unsigned long prot, unsigned long flags,
2371 unsigned long pgoff, unsigned long *populate,
2372 struct list_head *uf)
2373{
2374 return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate, uf);
2375}
2376
2377#ifdef CONFIG_MMU
2378extern int __mm_populate(unsigned long addr, unsigned long len,
2379 int ignore_errors);
2380static inline void mm_populate(unsigned long addr, unsigned long len)
2381{
2382
2383 (void) __mm_populate(addr, len, 1);
2384}
2385#else
2386static inline void mm_populate(unsigned long addr, unsigned long len) {}
2387#endif
2388
2389
2390extern int __must_check vm_brk(unsigned long, unsigned long);
2391extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long);
2392extern int vm_munmap(unsigned long, size_t);
2393extern unsigned long __must_check vm_mmap(struct file *, unsigned long,
2394 unsigned long, unsigned long,
2395 unsigned long, unsigned long);
2396
2397struct vm_unmapped_area_info {
2398#define VM_UNMAPPED_AREA_TOPDOWN 1
2399 unsigned long flags;
2400 unsigned long length;
2401 unsigned long low_limit;
2402 unsigned long high_limit;
2403 unsigned long align_mask;
2404 unsigned long align_offset;
2405};
2406
2407extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
2408extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419static inline unsigned long
2420vm_unmapped_area(struct vm_unmapped_area_info *info)
2421{
2422 if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
2423 return unmapped_area_topdown(info);
2424 else
2425 return unmapped_area(info);
2426}
2427
2428
2429extern void truncate_inode_pages(struct address_space *, loff_t);
2430extern void truncate_inode_pages_range(struct address_space *,
2431 loff_t lstart, loff_t lend);
2432extern void truncate_inode_pages_final(struct address_space *);
2433
2434
2435extern vm_fault_t filemap_fault(struct vm_fault *vmf);
2436extern void filemap_map_pages(struct vm_fault *vmf,
2437 pgoff_t start_pgoff, pgoff_t end_pgoff);
2438extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
2439
2440
2441int __must_check write_one_page(struct page *page);
2442void task_dirty_inc(struct task_struct *tsk);
2443
2444
2445#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE)
2446
2447int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
2448 pgoff_t offset, unsigned long nr_to_read);
2449
2450void page_cache_sync_readahead(struct address_space *mapping,
2451 struct file_ra_state *ra,
2452 struct file *filp,
2453 pgoff_t offset,
2454 unsigned long size);
2455
2456void page_cache_async_readahead(struct address_space *mapping,
2457 struct file_ra_state *ra,
2458 struct file *filp,
2459 struct page *pg,
2460 pgoff_t offset,
2461 unsigned long size);
2462
2463extern unsigned long stack_guard_gap;
2464
2465extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
2466
2467
2468extern int expand_downwards(struct vm_area_struct *vma,
2469 unsigned long address);
2470#if VM_GROWSUP
2471extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
2472#else
2473 #define expand_upwards(vma, address) (0)
2474#endif
2475
2476
2477extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
2478extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
2479 struct vm_area_struct **pprev);
2480
2481
2482
2483static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
2484{
2485 struct vm_area_struct * vma = find_vma(mm,start_addr);
2486
2487 if (vma && end_addr <= vma->vm_start)
2488 vma = NULL;
2489 return vma;
2490}
2491
2492static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
2493{
2494 unsigned long vm_start = vma->vm_start;
2495
2496 if (vma->vm_flags & VM_GROWSDOWN) {
2497 vm_start -= stack_guard_gap;
2498 if (vm_start > vma->vm_start)
2499 vm_start = 0;
2500 }
2501 return vm_start;
2502}
2503
2504static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
2505{
2506 unsigned long vm_end = vma->vm_end;
2507
2508 if (vma->vm_flags & VM_GROWSUP) {
2509 vm_end += stack_guard_gap;
2510 if (vm_end < vma->vm_end)
2511 vm_end = -PAGE_SIZE;
2512 }
2513 return vm_end;
2514}
2515
2516static inline unsigned long vma_pages(struct vm_area_struct *vma)
2517{
2518 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
2519}
2520
2521
2522static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
2523 unsigned long vm_start, unsigned long vm_end)
2524{
2525 struct vm_area_struct *vma = find_vma(mm, vm_start);
2526
2527 if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
2528 vma = NULL;
2529
2530 return vma;
2531}
2532
2533static inline bool range_in_vma(struct vm_area_struct *vma,
2534 unsigned long start, unsigned long end)
2535{
2536 return (vma && vma->vm_start <= start && end <= vma->vm_end);
2537}
2538
2539#ifdef CONFIG_MMU
2540pgprot_t vm_get_page_prot(unsigned long vm_flags);
2541void vma_set_page_prot(struct vm_area_struct *vma);
2542#else
2543static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
2544{
2545 return __pgprot(0);
2546}
2547static inline void vma_set_page_prot(struct vm_area_struct *vma)
2548{
2549 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2550}
2551#endif
2552
2553#ifdef CONFIG_NUMA_BALANCING
2554unsigned long change_prot_numa(struct vm_area_struct *vma,
2555 unsigned long start, unsigned long end);
2556#endif
2557
2558struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
2559int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
2560 unsigned long pfn, unsigned long size, pgprot_t);
2561int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
2562int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2563 unsigned long num);
2564int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
2565 unsigned long num);
2566vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2567 unsigned long pfn);
2568vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2569 unsigned long pfn, pgprot_t pgprot);
2570vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2571 pfn_t pfn);
2572vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
2573 unsigned long addr, pfn_t pfn);
2574int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
2575
2576static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
2577 unsigned long addr, struct page *page)
2578{
2579 int err = vm_insert_page(vma, addr, page);
2580
2581 if (err == -ENOMEM)
2582 return VM_FAULT_OOM;
2583 if (err < 0 && err != -EBUSY)
2584 return VM_FAULT_SIGBUS;
2585
2586 return VM_FAULT_NOPAGE;
2587}
2588
2589static inline vm_fault_t vmf_error(int err)
2590{
2591 if (err == -ENOMEM)
2592 return VM_FAULT_OOM;
2593 return VM_FAULT_SIGBUS;
2594}
2595
2596struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
2597 unsigned int foll_flags);
2598
2599#define FOLL_WRITE 0x01
2600#define FOLL_TOUCH 0x02
2601#define FOLL_GET 0x04
2602#define FOLL_DUMP 0x08
2603#define FOLL_FORCE 0x10
2604#define FOLL_NOWAIT 0x20
2605
2606#define FOLL_POPULATE 0x40
2607#define FOLL_SPLIT 0x80
2608#define FOLL_HWPOISON 0x100
2609#define FOLL_NUMA 0x200
2610#define FOLL_MIGRATION 0x400
2611#define FOLL_TRIED 0x800
2612#define FOLL_MLOCK 0x1000
2613#define FOLL_REMOTE 0x2000
2614#define FOLL_COW 0x4000
2615#define FOLL_ANON 0x8000
2616#define FOLL_LONGTERM 0x10000
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
2646{
2647 if (vm_fault & VM_FAULT_OOM)
2648 return -ENOMEM;
2649 if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
2650 return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT;
2651 if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
2652 return -EFAULT;
2653 return 0;
2654}
2655
2656typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data);
2657extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
2658 unsigned long size, pte_fn_t fn, void *data);
2659
2660
2661#ifdef CONFIG_PAGE_POISONING
2662extern bool page_poisoning_enabled(void);
2663extern void kernel_poison_pages(struct page *page, int numpages, int enable);
2664#else
2665static inline bool page_poisoning_enabled(void) { return false; }
2666static inline void kernel_poison_pages(struct page *page, int numpages,
2667 int enable) { }
2668#endif
2669
2670#ifdef CONFIG_INIT_ON_ALLOC_DEFAULT_ON
2671DECLARE_STATIC_KEY_TRUE(init_on_alloc);
2672#else
2673DECLARE_STATIC_KEY_FALSE(init_on_alloc);
2674#endif
2675static inline bool want_init_on_alloc(gfp_t flags)
2676{
2677 if (static_branch_unlikely(&init_on_alloc) &&
2678 !page_poisoning_enabled())
2679 return true;
2680 return flags & __GFP_ZERO;
2681}
2682
2683#ifdef CONFIG_INIT_ON_FREE_DEFAULT_ON
2684DECLARE_STATIC_KEY_TRUE(init_on_free);
2685#else
2686DECLARE_STATIC_KEY_FALSE(init_on_free);
2687#endif
2688static inline bool want_init_on_free(void)
2689{
2690 return static_branch_unlikely(&init_on_free) &&
2691 !page_poisoning_enabled();
2692}
2693
2694#ifdef CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT
2695DECLARE_STATIC_KEY_TRUE(_debug_pagealloc_enabled);
2696#else
2697DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
2698#endif
2699
2700static inline bool debug_pagealloc_enabled(void)
2701{
2702 if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC))
2703 return false;
2704
2705 return static_branch_unlikely(&_debug_pagealloc_enabled);
2706}
2707
2708#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP)
2709extern void __kernel_map_pages(struct page *page, int numpages, int enable);
2710
2711static inline void
2712kernel_map_pages(struct page *page, int numpages, int enable)
2713{
2714 __kernel_map_pages(page, numpages, enable);
2715}
2716#ifdef CONFIG_HIBERNATION
2717extern bool kernel_page_present(struct page *page);
2718#endif
2719#else
2720static inline void
2721kernel_map_pages(struct page *page, int numpages, int enable) {}
2722#ifdef CONFIG_HIBERNATION
2723static inline bool kernel_page_present(struct page *page) { return true; }
2724#endif
2725#endif
2726
2727#ifdef __HAVE_ARCH_GATE_AREA
2728extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
2729extern int in_gate_area_no_mm(unsigned long addr);
2730extern int in_gate_area(struct mm_struct *mm, unsigned long addr);
2731#else
2732static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
2733{
2734 return NULL;
2735}
2736static inline int in_gate_area_no_mm(unsigned long addr) { return 0; }
2737static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
2738{
2739 return 0;
2740}
2741#endif
2742
2743extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);
2744
2745#ifdef CONFIG_SYSCTL
2746extern int sysctl_drop_caches;
2747int drop_caches_sysctl_handler(struct ctl_table *, int,
2748 void __user *, size_t *, loff_t *);
2749#endif
2750
2751void drop_slab(void);
2752void drop_slab_node(int nid);
2753
2754#ifndef CONFIG_MMU
2755#define randomize_va_space 0
2756#else
2757extern int randomize_va_space;
2758#endif
2759
2760const char * arch_vma_name(struct vm_area_struct *vma);
2761#ifdef CONFIG_MMU
2762void print_vma_addr(char *prefix, unsigned long rip);
2763#else
2764static inline void print_vma_addr(char *prefix, unsigned long rip)
2765{
2766}
2767#endif
2768
2769void *sparse_buffer_alloc(unsigned long size);
2770struct page * __populate_section_memmap(unsigned long pfn,
2771 unsigned long nr_pages, int nid, struct vmem_altmap *altmap);
2772pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
2773p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
2774pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
2775pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
2776pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
2777void *vmemmap_alloc_block(unsigned long size, int node);
2778struct vmem_altmap;
2779void *vmemmap_alloc_block_buf(unsigned long size, int node);
2780void *altmap_alloc_block_buf(unsigned long size, struct vmem_altmap *altmap);
2781void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
2782int vmemmap_populate_basepages(unsigned long start, unsigned long end,
2783 int node);
2784int vmemmap_populate(unsigned long start, unsigned long end, int node,
2785 struct vmem_altmap *altmap);
2786void vmemmap_populate_print_last(void);
2787#ifdef CONFIG_MEMORY_HOTPLUG
2788void vmemmap_free(unsigned long start, unsigned long end,
2789 struct vmem_altmap *altmap);
2790#endif
2791void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
2792 unsigned long nr_pages);
2793
2794enum mf_flags {
2795 MF_COUNT_INCREASED = 1 << 0,
2796 MF_ACTION_REQUIRED = 1 << 1,
2797 MF_MUST_KILL = 1 << 2,
2798 MF_SOFT_OFFLINE = 1 << 3,
2799};
2800extern int memory_failure(unsigned long pfn, int flags);
2801extern void memory_failure_queue(unsigned long pfn, int flags);
2802extern int unpoison_memory(unsigned long pfn);
2803extern int get_hwpoison_page(struct page *page);
2804#define put_hwpoison_page(page) put_page(page)
2805extern int sysctl_memory_failure_early_kill;
2806extern int sysctl_memory_failure_recovery;
2807extern void shake_page(struct page *p, int access);
2808extern atomic_long_t num_poisoned_pages __read_mostly;
2809extern int soft_offline_page(struct page *page, int flags);
2810
2811
2812
2813
2814
2815enum mf_result {
2816 MF_IGNORED,
2817 MF_FAILED,
2818 MF_DELAYED,
2819 MF_RECOVERED,
2820};
2821
2822enum mf_action_page_type {
2823 MF_MSG_KERNEL,
2824 MF_MSG_KERNEL_HIGH_ORDER,
2825 MF_MSG_SLAB,
2826 MF_MSG_DIFFERENT_COMPOUND,
2827 MF_MSG_POISONED_HUGE,
2828 MF_MSG_HUGE,
2829 MF_MSG_FREE_HUGE,
2830 MF_MSG_NON_PMD_HUGE,
2831 MF_MSG_UNMAP_FAILED,
2832 MF_MSG_DIRTY_SWAPCACHE,
2833 MF_MSG_CLEAN_SWAPCACHE,
2834 MF_MSG_DIRTY_MLOCKED_LRU,
2835 MF_MSG_CLEAN_MLOCKED_LRU,
2836 MF_MSG_DIRTY_UNEVICTABLE_LRU,
2837 MF_MSG_CLEAN_UNEVICTABLE_LRU,
2838 MF_MSG_DIRTY_LRU,
2839 MF_MSG_CLEAN_LRU,
2840 MF_MSG_TRUNCATED_LRU,
2841 MF_MSG_BUDDY,
2842 MF_MSG_BUDDY_2ND,
2843 MF_MSG_DAX,
2844 MF_MSG_UNKNOWN,
2845};
2846
2847#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
2848extern void clear_huge_page(struct page *page,
2849 unsigned long addr_hint,
2850 unsigned int pages_per_huge_page);
2851extern void copy_user_huge_page(struct page *dst, struct page *src,
2852 unsigned long addr_hint,
2853 struct vm_area_struct *vma,
2854 unsigned int pages_per_huge_page);
2855extern long copy_huge_page_from_user(struct page *dst_page,
2856 const void __user *usr_src,
2857 unsigned int pages_per_huge_page,
2858 bool allow_pagefault);
2859#endif
2860
2861#ifdef CONFIG_DEBUG_PAGEALLOC
2862extern unsigned int _debug_guardpage_minorder;
2863DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
2864
2865static inline unsigned int debug_guardpage_minorder(void)
2866{
2867 return _debug_guardpage_minorder;
2868}
2869
2870static inline bool debug_guardpage_enabled(void)
2871{
2872 return static_branch_unlikely(&_debug_guardpage_enabled);
2873}
2874
2875static inline bool page_is_guard(struct page *page)
2876{
2877 if (!debug_guardpage_enabled())
2878 return false;
2879
2880 return PageGuard(page);
2881}
2882#else
2883static inline unsigned int debug_guardpage_minorder(void) { return 0; }
2884static inline bool debug_guardpage_enabled(void) { return false; }
2885static inline bool page_is_guard(struct page *page) { return false; }
2886#endif
2887
2888#if MAX_NUMNODES > 1
2889void __init setup_nr_node_ids(void);
2890#else
2891static inline void setup_nr_node_ids(void) {}
2892#endif
2893
2894#endif
2895#endif
2896