1
2#ifndef _LINUX_MM_H
3#define _LINUX_MM_H
4
5#include <linux/errno.h>
6
7#ifdef __KERNEL__
8
9#include <linux/mmdebug.h>
10#include <linux/gfp.h>
11#include <linux/bug.h>
12#include <linux/list.h>
13#include <linux/mmzone.h>
14#include <linux/rbtree.h>
15#include <linux/atomic.h>
16#include <linux/debug_locks.h>
17#include <linux/mm_types.h>
18#include <linux/range.h>
19#include <linux/pfn.h>
20#include <linux/percpu-refcount.h>
21#include <linux/bit_spinlock.h>
22#include <linux/shrinker.h>
23#include <linux/resource.h>
24#include <linux/page_ext.h>
25#include <linux/err.h>
26#include <linux/page_ref.h>
27#include <linux/memremap.h>
28#include <linux/overflow.h>
29#include <linux/sizes.h>
30
31struct mempolicy;
32struct anon_vma;
33struct anon_vma_chain;
34struct file_ra_state;
35struct user_struct;
36struct writeback_control;
37struct bdi_writeback;
38
39void init_mm_internals(void);
40
41#ifndef CONFIG_NEED_MULTIPLE_NODES
42extern unsigned long max_mapnr;
43
44static inline void set_max_mapnr(unsigned long limit)
45{
46 max_mapnr = limit;
47}
48#else
49static inline void set_max_mapnr(unsigned long limit) { }
50#endif
51
52extern atomic_long_t _totalram_pages;
53static inline unsigned long totalram_pages(void)
54{
55 return (unsigned long)atomic_long_read(&_totalram_pages);
56}
57
58static inline void totalram_pages_inc(void)
59{
60 atomic_long_inc(&_totalram_pages);
61}
62
63static inline void totalram_pages_dec(void)
64{
65 atomic_long_dec(&_totalram_pages);
66}
67
68static inline void totalram_pages_add(long count)
69{
70 atomic_long_add(count, &_totalram_pages);
71}
72
73static inline void totalram_pages_set(long val)
74{
75 atomic_long_set(&_totalram_pages, val);
76}
77
78extern void * high_memory;
79extern int page_cluster;
80
81#ifdef CONFIG_SYSCTL
82extern int sysctl_legacy_va_layout;
83#else
84#define sysctl_legacy_va_layout 0
85#endif
86
87#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
88extern const int mmap_rnd_bits_min;
89extern const int mmap_rnd_bits_max;
90extern int mmap_rnd_bits __read_mostly;
91#endif
92#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
93extern const int mmap_rnd_compat_bits_min;
94extern const int mmap_rnd_compat_bits_max;
95extern int mmap_rnd_compat_bits __read_mostly;
96#endif
97
98#include <asm/page.h>
99#include <asm/pgtable.h>
100#include <asm/processor.h>
101
102
103
104
105
106
107
108
109#ifndef untagged_addr
110#define untagged_addr(addr) (addr)
111#endif
112
113#ifndef __pa_symbol
114#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
115#endif
116
117#ifndef page_to_virt
118#define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x)))
119#endif
120
121#ifndef lm_alias
122#define lm_alias(x) __va(__pa_symbol(x))
123#endif
124
125
126
127
128
129
130
131
132#ifndef mm_forbids_zeropage
133#define mm_forbids_zeropage(X) (0)
134#endif
135
136
137
138
139
140
141
142#if BITS_PER_LONG == 64
143
144
145
146
147
148
149#define mm_zero_struct_page(pp) __mm_zero_struct_page(pp)
150static inline void __mm_zero_struct_page(struct page *page)
151{
152 unsigned long *_pp = (void *)page;
153
154
155 BUILD_BUG_ON(sizeof(struct page) & 7);
156 BUILD_BUG_ON(sizeof(struct page) < 56);
157 BUILD_BUG_ON(sizeof(struct page) > 80);
158
159 switch (sizeof(struct page)) {
160 case 80:
161 _pp[9] = 0;
162 case 72:
163 _pp[8] = 0;
164 case 64:
165 _pp[7] = 0;
166 case 56:
167 _pp[6] = 0;
168 _pp[5] = 0;
169 _pp[4] = 0;
170 _pp[3] = 0;
171 _pp[2] = 0;
172 _pp[1] = 0;
173 _pp[0] = 0;
174 }
175}
176#else
177#define mm_zero_struct_page(pp) ((void)memset((pp), 0, sizeof(struct page)))
178#endif
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196#define MAPCOUNT_ELF_CORE_MARGIN (5)
197#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
198
199extern int sysctl_max_map_count;
200
201extern unsigned long sysctl_user_reserve_kbytes;
202extern unsigned long sysctl_admin_reserve_kbytes;
203
204extern int sysctl_overcommit_memory;
205extern int sysctl_overcommit_ratio;
206extern unsigned long sysctl_overcommit_kbytes;
207
208extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *,
209 size_t *, loff_t *);
210extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
211 size_t *, loff_t *);
212
213#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
214
215
216#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
217
218
219#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
220
221#define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
222
223
224
225
226
227
228
229
230
231
232struct vm_area_struct *vm_area_alloc(struct mm_struct *);
233struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
234void vm_area_free(struct vm_area_struct *);
235
236#ifndef CONFIG_MMU
237extern struct rb_root nommu_region_tree;
238extern struct rw_semaphore nommu_region_sem;
239
240extern unsigned int kobjsize(const void *objp);
241#endif
242
243
244
245
246
247#define VM_NONE 0x00000000
248
249#define VM_READ 0x00000001
250#define VM_WRITE 0x00000002
251#define VM_EXEC 0x00000004
252#define VM_SHARED 0x00000008
253
254
255#define VM_MAYREAD 0x00000010
256#define VM_MAYWRITE 0x00000020
257#define VM_MAYEXEC 0x00000040
258#define VM_MAYSHARE 0x00000080
259
260#define VM_GROWSDOWN 0x00000100
261#define VM_UFFD_MISSING 0x00000200
262#define VM_PFNMAP 0x00000400
263#define VM_DENYWRITE 0x00000800
264#define VM_UFFD_WP 0x00001000
265
266#define VM_LOCKED 0x00002000
267#define VM_IO 0x00004000
268
269
270#define VM_SEQ_READ 0x00008000
271#define VM_RAND_READ 0x00010000
272
273#define VM_DONTCOPY 0x00020000
274#define VM_DONTEXPAND 0x00040000
275#define VM_LOCKONFAULT 0x00080000
276#define VM_ACCOUNT 0x00100000
277#define VM_NORESERVE 0x00200000
278#define VM_HUGETLB 0x00400000
279#define VM_SYNC 0x00800000
280#define VM_ARCH_1 0x01000000
281#define VM_WIPEONFORK 0x02000000
282#define VM_DONTDUMP 0x04000000
283
284#ifdef CONFIG_MEM_SOFT_DIRTY
285# define VM_SOFTDIRTY 0x08000000
286#else
287# define VM_SOFTDIRTY 0
288#endif
289
290#define VM_MIXEDMAP 0x10000000
291#define VM_HUGEPAGE 0x20000000
292#define VM_NOHUGEPAGE 0x40000000
293#define VM_MERGEABLE 0x80000000
294
295#ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS
296#define VM_HIGH_ARCH_BIT_0 32
297#define VM_HIGH_ARCH_BIT_1 33
298#define VM_HIGH_ARCH_BIT_2 34
299#define VM_HIGH_ARCH_BIT_3 35
300#define VM_HIGH_ARCH_BIT_4 36
301#define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0)
302#define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1)
303#define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2)
304#define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3)
305#define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4)
306#endif
307
308#ifdef CONFIG_ARCH_HAS_PKEYS
309# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0
310# define VM_PKEY_BIT0 VM_HIGH_ARCH_0
311# define VM_PKEY_BIT1 VM_HIGH_ARCH_1
312# define VM_PKEY_BIT2 VM_HIGH_ARCH_2
313# define VM_PKEY_BIT3 VM_HIGH_ARCH_3
314#ifdef CONFIG_PPC
315# define VM_PKEY_BIT4 VM_HIGH_ARCH_4
316#else
317# define VM_PKEY_BIT4 0
318#endif
319#endif
320
321#if defined(CONFIG_X86)
322# define VM_PAT VM_ARCH_1
323#elif defined(CONFIG_PPC)
324# define VM_SAO VM_ARCH_1
325#elif defined(CONFIG_PARISC)
326# define VM_GROWSUP VM_ARCH_1
327#elif defined(CONFIG_IA64)
328# define VM_GROWSUP VM_ARCH_1
329#elif defined(CONFIG_SPARC64)
330# define VM_SPARC_ADI VM_ARCH_1
331# define VM_ARCH_CLEAR VM_SPARC_ADI
332#elif !defined(CONFIG_MMU)
333# define VM_MAPPED_COPY VM_ARCH_1
334#endif
335
336#if defined(CONFIG_X86_INTEL_MPX)
337
338# define VM_MPX VM_HIGH_ARCH_4
339#else
340# define VM_MPX VM_NONE
341#endif
342
343#ifndef VM_GROWSUP
344# define VM_GROWSUP VM_NONE
345#endif
346
347
348#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ)
349
350#ifndef VM_STACK_DEFAULT_FLAGS
351#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
352#endif
353
354#ifdef CONFIG_STACK_GROWSUP
355#define VM_STACK VM_GROWSUP
356#else
357#define VM_STACK VM_GROWSDOWN
358#endif
359
360#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
361
362
363
364
365
366#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
367
368
369#define VM_INIT_DEF_MASK VM_NOHUGEPAGE
370
371
372#define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT))
373
374
375#ifndef VM_ARCH_CLEAR
376# define VM_ARCH_CLEAR VM_NONE
377#endif
378#define VM_FLAGS_CLEAR (ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR)
379
380
381
382
383
384extern pgprot_t protection_map[16];
385
386#define FAULT_FLAG_WRITE 0x01
387#define FAULT_FLAG_MKWRITE 0x02
388#define FAULT_FLAG_ALLOW_RETRY 0x04
389#define FAULT_FLAG_RETRY_NOWAIT 0x08
390#define FAULT_FLAG_KILLABLE 0x10
391#define FAULT_FLAG_TRIED 0x20
392#define FAULT_FLAG_USER 0x40
393#define FAULT_FLAG_REMOTE 0x80
394#define FAULT_FLAG_INSTRUCTION 0x100
395
396#define FAULT_FLAG_TRACE \
397 { FAULT_FLAG_WRITE, "WRITE" }, \
398 { FAULT_FLAG_MKWRITE, "MKWRITE" }, \
399 { FAULT_FLAG_ALLOW_RETRY, "ALLOW_RETRY" }, \
400 { FAULT_FLAG_RETRY_NOWAIT, "RETRY_NOWAIT" }, \
401 { FAULT_FLAG_KILLABLE, "KILLABLE" }, \
402 { FAULT_FLAG_TRIED, "TRIED" }, \
403 { FAULT_FLAG_USER, "USER" }, \
404 { FAULT_FLAG_REMOTE, "REMOTE" }, \
405 { FAULT_FLAG_INSTRUCTION, "INSTRUCTION" }
406
407
408
409
410
411
412
413
414
415
416
417struct vm_fault {
418 struct vm_area_struct *vma;
419 unsigned int flags;
420 gfp_t gfp_mask;
421 pgoff_t pgoff;
422 unsigned long address;
423 pmd_t *pmd;
424
425 pud_t *pud;
426
427
428 pte_t orig_pte;
429
430 struct page *cow_page;
431 struct mem_cgroup *memcg;
432 struct page *page;
433
434
435
436
437
438 pte_t *pte;
439
440
441
442 spinlock_t *ptl;
443
444
445
446 pgtable_t prealloc_pte;
447
448
449
450
451
452
453};
454
455
456enum page_entry_size {
457 PE_SIZE_PTE = 0,
458 PE_SIZE_PMD,
459 PE_SIZE_PUD,
460};
461
462
463
464
465
466
467struct vm_operations_struct {
468 void (*open)(struct vm_area_struct * area);
469 void (*close)(struct vm_area_struct * area);
470 int (*split)(struct vm_area_struct * area, unsigned long addr);
471 int (*mremap)(struct vm_area_struct * area);
472 vm_fault_t (*fault)(struct vm_fault *vmf);
473 vm_fault_t (*huge_fault)(struct vm_fault *vmf,
474 enum page_entry_size pe_size);
475 void (*map_pages)(struct vm_fault *vmf,
476 pgoff_t start_pgoff, pgoff_t end_pgoff);
477 unsigned long (*pagesize)(struct vm_area_struct * area);
478
479
480
481 vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
482
483
484 vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
485
486
487
488
489 int (*access)(struct vm_area_struct *vma, unsigned long addr,
490 void *buf, int len, int write);
491
492
493
494
495 const char *(*name)(struct vm_area_struct *vma);
496
497#ifdef CONFIG_NUMA
498
499
500
501
502
503
504
505 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
506
507
508
509
510
511
512
513
514
515
516
517 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
518 unsigned long addr);
519#endif
520
521
522
523
524
525 struct page *(*find_special_page)(struct vm_area_struct *vma,
526 unsigned long addr);
527};
528
529static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
530{
531 static const struct vm_operations_struct dummy_vm_ops = {};
532
533 memset(vma, 0, sizeof(*vma));
534 vma->vm_mm = mm;
535 vma->vm_ops = &dummy_vm_ops;
536 INIT_LIST_HEAD(&vma->anon_vma_chain);
537}
538
539static inline void vma_set_anonymous(struct vm_area_struct *vma)
540{
541 vma->vm_ops = NULL;
542}
543
544
545#define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
546
547struct mmu_gather;
548struct inode;
549
550#if !defined(__HAVE_ARCH_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
551static inline int pmd_devmap(pmd_t pmd)
552{
553 return 0;
554}
555static inline int pud_devmap(pud_t pud)
556{
557 return 0;
558}
559static inline int pgd_devmap(pgd_t pgd)
560{
561 return 0;
562}
563#endif
564
565
566
567
568
569#include <linux/page-flags.h>
570#include <linux/huge_mm.h>
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588static inline int put_page_testzero(struct page *page)
589{
590 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
591 return page_ref_dec_and_test(page);
592}
593
594
595
596
597
598
599
600static inline int get_page_unless_zero(struct page *page)
601{
602 return page_ref_add_unless(page, 1, 0);
603}
604
605extern int page_is_ram(unsigned long pfn);
606
607enum {
608 REGION_INTERSECTS,
609 REGION_DISJOINT,
610 REGION_MIXED,
611};
612
613int region_intersects(resource_size_t offset, size_t size, unsigned long flags,
614 unsigned long desc);
615
616
617struct page *vmalloc_to_page(const void *addr);
618unsigned long vmalloc_to_pfn(const void *addr);
619
620
621
622
623
624
625
626static inline bool is_vmalloc_addr(const void *x)
627{
628#ifdef CONFIG_MMU
629 unsigned long addr = (unsigned long)x;
630
631 return addr >= VMALLOC_START && addr < VMALLOC_END;
632#else
633 return false;
634#endif
635}
636#ifdef CONFIG_MMU
637extern int is_vmalloc_or_module_addr(const void *x);
638#else
639static inline int is_vmalloc_or_module_addr(const void *x)
640{
641 return 0;
642}
643#endif
644
645extern void *kvmalloc_node(size_t size, gfp_t flags, int node);
646static inline void *kvmalloc(size_t size, gfp_t flags)
647{
648 return kvmalloc_node(size, flags, NUMA_NO_NODE);
649}
650static inline void *kvzalloc_node(size_t size, gfp_t flags, int node)
651{
652 return kvmalloc_node(size, flags | __GFP_ZERO, node);
653}
654static inline void *kvzalloc(size_t size, gfp_t flags)
655{
656 return kvmalloc(size, flags | __GFP_ZERO);
657}
658
659static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
660{
661 size_t bytes;
662
663 if (unlikely(check_mul_overflow(n, size, &bytes)))
664 return NULL;
665
666 return kvmalloc(bytes, flags);
667}
668
669static inline void *kvcalloc(size_t n, size_t size, gfp_t flags)
670{
671 return kvmalloc_array(n, size, flags | __GFP_ZERO);
672}
673
674extern void kvfree(const void *addr);
675
676static inline atomic_t *compound_mapcount_ptr(struct page *page)
677{
678 return &page[1].compound_mapcount;
679}
680
681static inline int compound_mapcount(struct page *page)
682{
683 VM_BUG_ON_PAGE(!PageCompound(page), page);
684 page = compound_head(page);
685 return atomic_read(compound_mapcount_ptr(page)) + 1;
686}
687
688
689
690
691
692
693static inline void page_mapcount_reset(struct page *page)
694{
695 atomic_set(&(page)->_mapcount, -1);
696}
697
698int __page_mapcount(struct page *page);
699
700static inline int page_mapcount(struct page *page)
701{
702 VM_BUG_ON_PAGE(PageSlab(page), page);
703
704 if (unlikely(PageCompound(page)))
705 return __page_mapcount(page);
706 return atomic_read(&page->_mapcount) + 1;
707}
708
709#ifdef CONFIG_TRANSPARENT_HUGEPAGE
710int total_mapcount(struct page *page);
711int page_trans_huge_mapcount(struct page *page, int *total_mapcount);
712#else
713static inline int total_mapcount(struct page *page)
714{
715 return page_mapcount(page);
716}
717static inline int page_trans_huge_mapcount(struct page *page,
718 int *total_mapcount)
719{
720 int mapcount = page_mapcount(page);
721 if (total_mapcount)
722 *total_mapcount = mapcount;
723 return mapcount;
724}
725#endif
726
727static inline struct page *virt_to_head_page(const void *x)
728{
729 struct page *page = virt_to_page(x);
730
731 return compound_head(page);
732}
733
734void __put_page(struct page *page);
735
736void put_pages_list(struct list_head *pages);
737
738void split_page(struct page *page, unsigned int order);
739
740
741
742
743
744
745typedef void compound_page_dtor(struct page *);
746
747
748enum compound_dtor_id {
749 NULL_COMPOUND_DTOR,
750 COMPOUND_PAGE_DTOR,
751#ifdef CONFIG_HUGETLB_PAGE
752 HUGETLB_PAGE_DTOR,
753#endif
754#ifdef CONFIG_TRANSPARENT_HUGEPAGE
755 TRANSHUGE_PAGE_DTOR,
756#endif
757 NR_COMPOUND_DTORS,
758};
759extern compound_page_dtor * const compound_page_dtors[];
760
761static inline void set_compound_page_dtor(struct page *page,
762 enum compound_dtor_id compound_dtor)
763{
764 VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page);
765 page[1].compound_dtor = compound_dtor;
766}
767
768static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
769{
770 VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page);
771 return compound_page_dtors[page[1].compound_dtor];
772}
773
774static inline unsigned int compound_order(struct page *page)
775{
776 if (!PageHead(page))
777 return 0;
778 return page[1].compound_order;
779}
780
781static inline void set_compound_order(struct page *page, unsigned int order)
782{
783 page[1].compound_order = order;
784}
785
786void free_compound_page(struct page *page);
787
788#ifdef CONFIG_MMU
789
790
791
792
793
794
795static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
796{
797 if (likely(vma->vm_flags & VM_WRITE))
798 pte = pte_mkwrite(pte);
799 return pte;
800}
801
802vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
803 struct page *page);
804vm_fault_t finish_fault(struct vm_fault *vmf);
805vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
806#endif
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
875#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
876#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
877#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
878#define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH)
879
880
881
882
883
884
885#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
886#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
887#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
888#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
889#define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0))
890
891
892#ifdef NODE_NOT_IN_PAGE_FLAGS
893#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
894#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \
895 SECTIONS_PGOFF : ZONES_PGOFF)
896#else
897#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
898#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \
899 NODES_PGOFF : ZONES_PGOFF)
900#endif
901
902#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
903
904#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
905#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
906#endif
907
908#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
909#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
910#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
911#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
912#define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1)
913#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
914
915static inline enum zone_type page_zonenum(const struct page *page)
916{
917 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
918}
919
920#ifdef CONFIG_ZONE_DEVICE
921static inline bool is_zone_device_page(const struct page *page)
922{
923 return page_zonenum(page) == ZONE_DEVICE;
924}
925extern void memmap_init_zone_device(struct zone *, unsigned long,
926 unsigned long, struct dev_pagemap *);
927#else
928static inline bool is_zone_device_page(const struct page *page)
929{
930 return false;
931}
932#endif
933
934#ifdef CONFIG_DEV_PAGEMAP_OPS
935void dev_pagemap_get_ops(void);
936void dev_pagemap_put_ops(void);
937void __put_devmap_managed_page(struct page *page);
938DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
939static inline bool put_devmap_managed_page(struct page *page)
940{
941 if (!static_branch_unlikely(&devmap_managed_key))
942 return false;
943 if (!is_zone_device_page(page))
944 return false;
945 switch (page->pgmap->type) {
946 case MEMORY_DEVICE_PRIVATE:
947 case MEMORY_DEVICE_PUBLIC:
948 case MEMORY_DEVICE_FS_DAX:
949 __put_devmap_managed_page(page);
950 return true;
951 default:
952 break;
953 }
954 return false;
955}
956
957static inline bool is_device_private_page(const struct page *page)
958{
959 return is_zone_device_page(page) &&
960 page->pgmap->type == MEMORY_DEVICE_PRIVATE;
961}
962
963static inline bool is_device_public_page(const struct page *page)
964{
965 return is_zone_device_page(page) &&
966 page->pgmap->type == MEMORY_DEVICE_PUBLIC;
967}
968
969#ifdef CONFIG_PCI_P2PDMA
970static inline bool is_pci_p2pdma_page(const struct page *page)
971{
972 return is_zone_device_page(page) &&
973 page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA;
974}
975#else
976static inline bool is_pci_p2pdma_page(const struct page *page)
977{
978 return false;
979}
980#endif
981
982#else
983static inline void dev_pagemap_get_ops(void)
984{
985}
986
987static inline void dev_pagemap_put_ops(void)
988{
989}
990
991static inline bool put_devmap_managed_page(struct page *page)
992{
993 return false;
994}
995
996static inline bool is_device_private_page(const struct page *page)
997{
998 return false;
999}
1000
1001static inline bool is_device_public_page(const struct page *page)
1002{
1003 return false;
1004}
1005
1006static inline bool is_pci_p2pdma_page(const struct page *page)
1007{
1008 return false;
1009}
1010#endif
1011
1012
1013#define page_ref_zero_or_close_to_overflow(page) \
1014 ((unsigned int) page_ref_count(page) + 127u <= 127u)
1015
1016static inline void get_page(struct page *page)
1017{
1018 page = compound_head(page);
1019
1020
1021
1022
1023 VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(page), page);
1024 page_ref_inc(page);
1025}
1026
1027static inline __must_check bool try_get_page(struct page *page)
1028{
1029 page = compound_head(page);
1030 if (WARN_ON_ONCE(page_ref_count(page) <= 0))
1031 return false;
1032 page_ref_inc(page);
1033 return true;
1034}
1035
1036static inline void put_page(struct page *page)
1037{
1038 page = compound_head(page);
1039
1040
1041
1042
1043
1044
1045
1046 if (put_devmap_managed_page(page))
1047 return;
1048
1049 if (put_page_testzero(page))
1050 __put_page(page);
1051}
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068static inline void put_user_page(struct page *page)
1069{
1070 put_page(page);
1071}
1072
1073void put_user_pages_dirty(struct page **pages, unsigned long npages);
1074void put_user_pages_dirty_lock(struct page **pages, unsigned long npages);
1075void put_user_pages(struct page **pages, unsigned long npages);
1076
1077#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
1078#define SECTION_IN_PAGE_FLAGS
1079#endif
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089static inline int page_zone_id(struct page *page)
1090{
1091 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
1092}
1093
1094#ifdef NODE_NOT_IN_PAGE_FLAGS
1095extern int page_to_nid(const struct page *page);
1096#else
1097static inline int page_to_nid(const struct page *page)
1098{
1099 struct page *p = (struct page *)page;
1100
1101 return (PF_POISONED_CHECK(p)->flags >> NODES_PGSHIFT) & NODES_MASK;
1102}
1103#endif
1104
1105#ifdef CONFIG_NUMA_BALANCING
1106static inline int cpu_pid_to_cpupid(int cpu, int pid)
1107{
1108 return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
1109}
1110
1111static inline int cpupid_to_pid(int cpupid)
1112{
1113 return cpupid & LAST__PID_MASK;
1114}
1115
1116static inline int cpupid_to_cpu(int cpupid)
1117{
1118 return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
1119}
1120
1121static inline int cpupid_to_nid(int cpupid)
1122{
1123 return cpu_to_node(cpupid_to_cpu(cpupid));
1124}
1125
1126static inline bool cpupid_pid_unset(int cpupid)
1127{
1128 return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
1129}
1130
1131static inline bool cpupid_cpu_unset(int cpupid)
1132{
1133 return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
1134}
1135
1136static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
1137{
1138 return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
1139}
1140
1141#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
1142#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
1143static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
1144{
1145 return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
1146}
1147
1148static inline int page_cpupid_last(struct page *page)
1149{
1150 return page->_last_cpupid;
1151}
1152static inline void page_cpupid_reset_last(struct page *page)
1153{
1154 page->_last_cpupid = -1 & LAST_CPUPID_MASK;
1155}
1156#else
1157static inline int page_cpupid_last(struct page *page)
1158{
1159 return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
1160}
1161
1162extern int page_cpupid_xchg_last(struct page *page, int cpupid);
1163
1164static inline void page_cpupid_reset_last(struct page *page)
1165{
1166 page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
1167}
1168#endif
1169#else
1170static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
1171{
1172 return page_to_nid(page);
1173}
1174
1175static inline int page_cpupid_last(struct page *page)
1176{
1177 return page_to_nid(page);
1178}
1179
1180static inline int cpupid_to_nid(int cpupid)
1181{
1182 return -1;
1183}
1184
1185static inline int cpupid_to_pid(int cpupid)
1186{
1187 return -1;
1188}
1189
1190static inline int cpupid_to_cpu(int cpupid)
1191{
1192 return -1;
1193}
1194
1195static inline int cpu_pid_to_cpupid(int nid, int pid)
1196{
1197 return -1;
1198}
1199
1200static inline bool cpupid_pid_unset(int cpupid)
1201{
1202 return 1;
1203}
1204
1205static inline void page_cpupid_reset_last(struct page *page)
1206{
1207}
1208
1209static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
1210{
1211 return false;
1212}
1213#endif
1214
1215#ifdef CONFIG_KASAN_SW_TAGS
1216static inline u8 page_kasan_tag(const struct page *page)
1217{
1218 return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
1219}
1220
1221static inline void page_kasan_tag_set(struct page *page, u8 tag)
1222{
1223 page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
1224 page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
1225}
1226
1227static inline void page_kasan_tag_reset(struct page *page)
1228{
1229 page_kasan_tag_set(page, 0xff);
1230}
1231#else
1232static inline u8 page_kasan_tag(const struct page *page)
1233{
1234 return 0xff;
1235}
1236
1237static inline void page_kasan_tag_set(struct page *page, u8 tag) { }
1238static inline void page_kasan_tag_reset(struct page *page) { }
1239#endif
1240
1241static inline struct zone *page_zone(const struct page *page)
1242{
1243 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
1244}
1245
1246static inline pg_data_t *page_pgdat(const struct page *page)
1247{
1248 return NODE_DATA(page_to_nid(page));
1249}
1250
1251#ifdef SECTION_IN_PAGE_FLAGS
1252static inline void set_page_section(struct page *page, unsigned long section)
1253{
1254 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
1255 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
1256}
1257
1258static inline unsigned long page_to_section(const struct page *page)
1259{
1260 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
1261}
1262#endif
1263
1264static inline void set_page_zone(struct page *page, enum zone_type zone)
1265{
1266 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
1267 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
1268}
1269
1270static inline void set_page_node(struct page *page, unsigned long node)
1271{
1272 page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
1273 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
1274}
1275
1276static inline void set_page_links(struct page *page, enum zone_type zone,
1277 unsigned long node, unsigned long pfn)
1278{
1279 set_page_zone(page, zone);
1280 set_page_node(page, node);
1281#ifdef SECTION_IN_PAGE_FLAGS
1282 set_page_section(page, pfn_to_section_nr(pfn));
1283#endif
1284}
1285
1286#ifdef CONFIG_MEMCG
1287static inline struct mem_cgroup *page_memcg(struct page *page)
1288{
1289 return page->mem_cgroup;
1290}
1291static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
1292{
1293 WARN_ON_ONCE(!rcu_read_lock_held());
1294 return READ_ONCE(page->mem_cgroup);
1295}
1296#else
1297static inline struct mem_cgroup *page_memcg(struct page *page)
1298{
1299 return NULL;
1300}
1301static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
1302{
1303 WARN_ON_ONCE(!rcu_read_lock_held());
1304 return NULL;
1305}
1306#endif
1307
1308
1309
1310
1311#include <linux/vmstat.h>
1312
1313static __always_inline void *lowmem_page_address(const struct page *page)
1314{
1315 return page_to_virt(page);
1316}
1317
1318#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
1319#define HASHED_PAGE_VIRTUAL
1320#endif
1321
1322#if defined(WANT_PAGE_VIRTUAL)
1323static inline void *page_address(const struct page *page)
1324{
1325 return page->virtual;
1326}
1327static inline void set_page_address(struct page *page, void *address)
1328{
1329 page->virtual = address;
1330}
1331#define page_address_init() do { } while(0)
1332#endif
1333
1334#if defined(HASHED_PAGE_VIRTUAL)
1335void *page_address(const struct page *page);
1336void set_page_address(struct page *page, void *virtual);
1337void page_address_init(void);
1338#endif
1339
1340#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
1341#define page_address(page) lowmem_page_address(page)
1342#define set_page_address(page, address) do { } while(0)
1343#define page_address_init() do { } while(0)
1344#endif
1345
1346extern void *page_rmapping(struct page *page);
1347extern struct anon_vma *page_anon_vma(struct page *page);
1348extern struct address_space *page_mapping(struct page *page);
1349
1350extern struct address_space *__page_file_mapping(struct page *);
1351
1352static inline
1353struct address_space *page_file_mapping(struct page *page)
1354{
1355 if (unlikely(PageSwapCache(page)))
1356 return __page_file_mapping(page);
1357
1358 return page->mapping;
1359}
1360
1361extern pgoff_t __page_file_index(struct page *page);
1362
1363
1364
1365
1366
1367static inline pgoff_t page_index(struct page *page)
1368{
1369 if (unlikely(PageSwapCache(page)))
1370 return __page_file_index(page);
1371 return page->index;
1372}
1373
1374bool page_mapped(struct page *page);
1375struct address_space *page_mapping(struct page *page);
1376struct address_space *page_mapping_file(struct page *page);
1377
1378
1379
1380
1381
1382
1383static inline bool page_is_pfmemalloc(struct page *page)
1384{
1385
1386
1387
1388
1389 return page->index == -1UL;
1390}
1391
1392
1393
1394
1395
1396static inline void set_page_pfmemalloc(struct page *page)
1397{
1398 page->index = -1UL;
1399}
1400
1401static inline void clear_page_pfmemalloc(struct page *page)
1402{
1403 page->index = 0;
1404}
1405
1406
1407
1408
1409extern void pagefault_out_of_memory(void);
1410
1411#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
1412
1413
1414
1415
1416
1417#define SHOW_MEM_FILTER_NODES (0x0001u)
1418
1419extern void show_free_areas(unsigned int flags, nodemask_t *nodemask);
1420
1421extern bool can_do_mlock(void);
1422extern int user_shm_lock(size_t, struct user_struct *);
1423extern void user_shm_unlock(size_t, struct user_struct *);
1424
1425
1426
1427
1428struct zap_details {
1429 struct address_space *check_mapping;
1430 pgoff_t first_index;
1431 pgoff_t last_index;
1432};
1433
1434struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
1435 pte_t pte, bool with_public_device);
1436#define vm_normal_page(vma, addr, pte) _vm_normal_page(vma, addr, pte, false)
1437
1438struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
1439 pmd_t pmd);
1440
1441void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1442 unsigned long size);
1443void zap_page_range(struct vm_area_struct *vma, unsigned long address,
1444 unsigned long size);
1445void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
1446 unsigned long start, unsigned long end);
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472struct mm_walk {
1473 int (*pud_entry)(pud_t *pud, unsigned long addr,
1474 unsigned long next, struct mm_walk *walk);
1475 int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
1476 unsigned long next, struct mm_walk *walk);
1477 int (*pte_entry)(pte_t *pte, unsigned long addr,
1478 unsigned long next, struct mm_walk *walk);
1479 int (*pte_hole)(unsigned long addr, unsigned long next,
1480 struct mm_walk *walk);
1481 int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
1482 unsigned long addr, unsigned long next,
1483 struct mm_walk *walk);
1484 int (*test_walk)(unsigned long addr, unsigned long next,
1485 struct mm_walk *walk);
1486 struct mm_struct *mm;
1487 struct vm_area_struct *vma;
1488 void *private;
1489};
1490
1491struct mmu_notifier_range;
1492
1493int walk_page_range(unsigned long addr, unsigned long end,
1494 struct mm_walk *walk);
1495int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk);
1496void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
1497 unsigned long end, unsigned long floor, unsigned long ceiling);
1498int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
1499 struct vm_area_struct *vma);
1500int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
1501 struct mmu_notifier_range *range,
1502 pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
1503int follow_pfn(struct vm_area_struct *vma, unsigned long address,
1504 unsigned long *pfn);
1505int follow_phys(struct vm_area_struct *vma, unsigned long address,
1506 unsigned int flags, unsigned long *prot, resource_size_t *phys);
1507int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
1508 void *buf, int len, int write);
1509
1510extern void truncate_pagecache(struct inode *inode, loff_t new);
1511extern void truncate_setsize(struct inode *inode, loff_t newsize);
1512void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
1513void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
1514int truncate_inode_page(struct address_space *mapping, struct page *page);
1515int generic_error_remove_page(struct address_space *mapping, struct page *page);
1516int invalidate_inode_page(struct page *page);
1517
1518#ifdef CONFIG_MMU
1519extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
1520 unsigned long address, unsigned int flags);
1521extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
1522 unsigned long address, unsigned int fault_flags,
1523 bool *unlocked);
1524void unmap_mapping_pages(struct address_space *mapping,
1525 pgoff_t start, pgoff_t nr, bool even_cows);
1526void unmap_mapping_range(struct address_space *mapping,
1527 loff_t const holebegin, loff_t const holelen, int even_cows);
1528#else
1529static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
1530 unsigned long address, unsigned int flags)
1531{
1532
1533 BUG();
1534 return VM_FAULT_SIGBUS;
1535}
1536static inline int fixup_user_fault(struct task_struct *tsk,
1537 struct mm_struct *mm, unsigned long address,
1538 unsigned int fault_flags, bool *unlocked)
1539{
1540
1541 BUG();
1542 return -EFAULT;
1543}
1544static inline void unmap_mapping_pages(struct address_space *mapping,
1545 pgoff_t start, pgoff_t nr, bool even_cows) { }
1546static inline void unmap_mapping_range(struct address_space *mapping,
1547 loff_t const holebegin, loff_t const holelen, int even_cows) { }
1548#endif
1549
1550static inline void unmap_shared_mapping_range(struct address_space *mapping,
1551 loff_t const holebegin, loff_t const holelen)
1552{
1553 unmap_mapping_range(mapping, holebegin, holelen, 0);
1554}
1555
1556extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
1557 void *buf, int len, unsigned int gup_flags);
1558extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1559 void *buf, int len, unsigned int gup_flags);
1560extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
1561 unsigned long addr, void *buf, int len, unsigned int gup_flags);
1562
1563long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
1564 unsigned long start, unsigned long nr_pages,
1565 unsigned int gup_flags, struct page **pages,
1566 struct vm_area_struct **vmas, int *locked);
1567long get_user_pages(unsigned long start, unsigned long nr_pages,
1568 unsigned int gup_flags, struct page **pages,
1569 struct vm_area_struct **vmas);
1570long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
1571 unsigned int gup_flags, struct page **pages, int *locked);
1572long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
1573 struct page **pages, unsigned int gup_flags);
1574
1575int get_user_pages_fast(unsigned long start, int nr_pages,
1576 unsigned int gup_flags, struct page **pages);
1577
1578
1579struct frame_vector {
1580 unsigned int nr_allocated;
1581 unsigned int nr_frames;
1582 bool got_ref;
1583 bool is_pfns;
1584 void *ptrs[0];
1585
1586
1587};
1588
1589struct frame_vector *frame_vector_create(unsigned int nr_frames);
1590void frame_vector_destroy(struct frame_vector *vec);
1591int get_vaddr_frames(unsigned long start, unsigned int nr_pfns,
1592 unsigned int gup_flags, struct frame_vector *vec);
1593void put_vaddr_frames(struct frame_vector *vec);
1594int frame_vector_to_pages(struct frame_vector *vec);
1595void frame_vector_to_pfns(struct frame_vector *vec);
1596
1597static inline unsigned int frame_vector_count(struct frame_vector *vec)
1598{
1599 return vec->nr_frames;
1600}
1601
1602static inline struct page **frame_vector_pages(struct frame_vector *vec)
1603{
1604 if (vec->is_pfns) {
1605 int err = frame_vector_to_pages(vec);
1606
1607 if (err)
1608 return ERR_PTR(err);
1609 }
1610 return (struct page **)(vec->ptrs);
1611}
1612
1613static inline unsigned long *frame_vector_pfns(struct frame_vector *vec)
1614{
1615 if (!vec->is_pfns)
1616 frame_vector_to_pfns(vec);
1617 return (unsigned long *)(vec->ptrs);
1618}
1619
1620struct kvec;
1621int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
1622 struct page **pages);
1623int get_kernel_page(unsigned long start, int write, struct page **pages);
1624struct page *get_dump_page(unsigned long addr);
1625
1626extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
1627extern void do_invalidatepage(struct page *page, unsigned int offset,
1628 unsigned int length);
1629
1630void __set_page_dirty(struct page *, struct address_space *, int warn);
1631int __set_page_dirty_nobuffers(struct page *page);
1632int __set_page_dirty_no_writeback(struct page *page);
1633int redirty_page_for_writepage(struct writeback_control *wbc,
1634 struct page *page);
1635void account_page_dirtied(struct page *page, struct address_space *mapping);
1636void account_page_cleaned(struct page *page, struct address_space *mapping,
1637 struct bdi_writeback *wb);
1638int set_page_dirty(struct page *page);
1639int set_page_dirty_lock(struct page *page);
1640void __cancel_dirty_page(struct page *page);
1641static inline void cancel_dirty_page(struct page *page)
1642{
1643
1644 if (PageDirty(page))
1645 __cancel_dirty_page(page);
1646}
1647int clear_page_dirty_for_io(struct page *page);
1648
1649int get_cmdline(struct task_struct *task, char *buffer, int buflen);
1650
1651static inline bool vma_is_anonymous(struct vm_area_struct *vma)
1652{
1653 return !vma->vm_ops;
1654}
1655
1656#ifdef CONFIG_SHMEM
1657
1658
1659
1660
1661bool vma_is_shmem(struct vm_area_struct *vma);
1662#else
1663static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
1664#endif
1665
1666int vma_is_stack_for_current(struct vm_area_struct *vma);
1667
1668extern unsigned long move_page_tables(struct vm_area_struct *vma,
1669 unsigned long old_addr, struct vm_area_struct *new_vma,
1670 unsigned long new_addr, unsigned long len,
1671 bool need_rmap_locks);
1672extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
1673 unsigned long end, pgprot_t newprot,
1674 int dirty_accountable, int prot_numa);
1675extern int mprotect_fixup(struct vm_area_struct *vma,
1676 struct vm_area_struct **pprev, unsigned long start,
1677 unsigned long end, unsigned long newflags);
1678
1679
1680
1681
1682int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1683 struct page **pages);
1684
1685
1686
1687static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
1688{
1689 long val = atomic_long_read(&mm->rss_stat.count[member]);
1690
1691#ifdef SPLIT_RSS_COUNTING
1692
1693
1694
1695
1696 if (val < 0)
1697 val = 0;
1698#endif
1699 return (unsigned long)val;
1700}
1701
1702static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1703{
1704 atomic_long_add(value, &mm->rss_stat.count[member]);
1705}
1706
1707static inline void inc_mm_counter(struct mm_struct *mm, int member)
1708{
1709 atomic_long_inc(&mm->rss_stat.count[member]);
1710}
1711
1712static inline void dec_mm_counter(struct mm_struct *mm, int member)
1713{
1714 atomic_long_dec(&mm->rss_stat.count[member]);
1715}
1716
1717
1718static inline int mm_counter_file(struct page *page)
1719{
1720 if (PageSwapBacked(page))
1721 return MM_SHMEMPAGES;
1722 return MM_FILEPAGES;
1723}
1724
1725static inline int mm_counter(struct page *page)
1726{
1727 if (PageAnon(page))
1728 return MM_ANONPAGES;
1729 return mm_counter_file(page);
1730}
1731
1732static inline unsigned long get_mm_rss(struct mm_struct *mm)
1733{
1734 return get_mm_counter(mm, MM_FILEPAGES) +
1735 get_mm_counter(mm, MM_ANONPAGES) +
1736 get_mm_counter(mm, MM_SHMEMPAGES);
1737}
1738
1739static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
1740{
1741 return max(mm->hiwater_rss, get_mm_rss(mm));
1742}
1743
1744static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
1745{
1746 return max(mm->hiwater_vm, mm->total_vm);
1747}
1748
1749static inline void update_hiwater_rss(struct mm_struct *mm)
1750{
1751 unsigned long _rss = get_mm_rss(mm);
1752
1753 if ((mm)->hiwater_rss < _rss)
1754 (mm)->hiwater_rss = _rss;
1755}
1756
1757static inline void update_hiwater_vm(struct mm_struct *mm)
1758{
1759 if (mm->hiwater_vm < mm->total_vm)
1760 mm->hiwater_vm = mm->total_vm;
1761}
1762
1763static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
1764{
1765 mm->hiwater_rss = get_mm_rss(mm);
1766}
1767
1768static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
1769 struct mm_struct *mm)
1770{
1771 unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
1772
1773 if (*maxrss < hiwater_rss)
1774 *maxrss = hiwater_rss;
1775}
1776
1777#if defined(SPLIT_RSS_COUNTING)
1778void sync_mm_rss(struct mm_struct *mm);
1779#else
1780static inline void sync_mm_rss(struct mm_struct *mm)
1781{
1782}
1783#endif
1784
1785#ifndef __HAVE_ARCH_PTE_DEVMAP
1786static inline int pte_devmap(pte_t pte)
1787{
1788 return 0;
1789}
1790#endif
1791
1792int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
1793
1794extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1795 spinlock_t **ptl);
1796static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
1797 spinlock_t **ptl)
1798{
1799 pte_t *ptep;
1800 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
1801 return ptep;
1802}
1803
1804#ifdef __PAGETABLE_P4D_FOLDED
1805static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
1806 unsigned long address)
1807{
1808 return 0;
1809}
1810#else
1811int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
1812#endif
1813
1814#if defined(__PAGETABLE_PUD_FOLDED) || !defined(CONFIG_MMU)
1815static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d,
1816 unsigned long address)
1817{
1818 return 0;
1819}
1820static inline void mm_inc_nr_puds(struct mm_struct *mm) {}
1821static inline void mm_dec_nr_puds(struct mm_struct *mm) {}
1822
1823#else
1824int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
1825
1826static inline void mm_inc_nr_puds(struct mm_struct *mm)
1827{
1828 if (mm_pud_folded(mm))
1829 return;
1830 atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
1831}
1832
1833static inline void mm_dec_nr_puds(struct mm_struct *mm)
1834{
1835 if (mm_pud_folded(mm))
1836 return;
1837 atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
1838}
1839#endif
1840
1841#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
1842static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
1843 unsigned long address)
1844{
1845 return 0;
1846}
1847
1848static inline void mm_inc_nr_pmds(struct mm_struct *mm) {}
1849static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
1850
1851#else
1852int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
1853
1854static inline void mm_inc_nr_pmds(struct mm_struct *mm)
1855{
1856 if (mm_pmd_folded(mm))
1857 return;
1858 atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
1859}
1860
1861static inline void mm_dec_nr_pmds(struct mm_struct *mm)
1862{
1863 if (mm_pmd_folded(mm))
1864 return;
1865 atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
1866}
1867#endif
1868
1869#ifdef CONFIG_MMU
1870static inline void mm_pgtables_bytes_init(struct mm_struct *mm)
1871{
1872 atomic_long_set(&mm->pgtables_bytes, 0);
1873}
1874
1875static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
1876{
1877 return atomic_long_read(&mm->pgtables_bytes);
1878}
1879
1880static inline void mm_inc_nr_ptes(struct mm_struct *mm)
1881{
1882 atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
1883}
1884
1885static inline void mm_dec_nr_ptes(struct mm_struct *mm)
1886{
1887 atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
1888}
1889#else
1890
1891static inline void mm_pgtables_bytes_init(struct mm_struct *mm) {}
1892static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
1893{
1894 return 0;
1895}
1896
1897static inline void mm_inc_nr_ptes(struct mm_struct *mm) {}
1898static inline void mm_dec_nr_ptes(struct mm_struct *mm) {}
1899#endif
1900
1901int __pte_alloc(struct mm_struct *mm, pmd_t *pmd);
1902int __pte_alloc_kernel(pmd_t *pmd);
1903
1904
1905
1906
1907
1908#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
1909
1910#ifndef __ARCH_HAS_5LEVEL_HACK
1911static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
1912 unsigned long address)
1913{
1914 return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ?
1915 NULL : p4d_offset(pgd, address);
1916}
1917
1918static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
1919 unsigned long address)
1920{
1921 return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ?
1922 NULL : pud_offset(p4d, address);
1923}
1924#endif
1925
1926static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
1927{
1928 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
1929 NULL: pmd_offset(pud, address);
1930}
1931#endif
1932
1933#if USE_SPLIT_PTE_PTLOCKS
1934#if ALLOC_SPLIT_PTLOCKS
1935void __init ptlock_cache_init(void);
1936extern bool ptlock_alloc(struct page *page);
1937extern void ptlock_free(struct page *page);
1938
1939static inline spinlock_t *ptlock_ptr(struct page *page)
1940{
1941 return page->ptl;
1942}
1943#else
1944static inline void ptlock_cache_init(void)
1945{
1946}
1947
1948static inline bool ptlock_alloc(struct page *page)
1949{
1950 return true;
1951}
1952
1953static inline void ptlock_free(struct page *page)
1954{
1955}
1956
1957static inline spinlock_t *ptlock_ptr(struct page *page)
1958{
1959 return &page->ptl;
1960}
1961#endif
1962
1963static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1964{
1965 return ptlock_ptr(pmd_page(*pmd));
1966}
1967
1968static inline bool ptlock_init(struct page *page)
1969{
1970
1971
1972
1973
1974
1975
1976
1977 VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
1978 if (!ptlock_alloc(page))
1979 return false;
1980 spin_lock_init(ptlock_ptr(page));
1981 return true;
1982}
1983
1984#else
1985
1986
1987
1988static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1989{
1990 return &mm->page_table_lock;
1991}
1992static inline void ptlock_cache_init(void) {}
1993static inline bool ptlock_init(struct page *page) { return true; }
1994static inline void ptlock_free(struct page *page) {}
1995#endif
1996
1997static inline void pgtable_init(void)
1998{
1999 ptlock_cache_init();
2000 pgtable_cache_init();
2001}
2002
2003static inline bool pgtable_page_ctor(struct page *page)
2004{
2005 if (!ptlock_init(page))
2006 return false;
2007 __SetPageTable(page);
2008 inc_zone_page_state(page, NR_PAGETABLE);
2009 return true;
2010}
2011
2012static inline void pgtable_page_dtor(struct page *page)
2013{
2014 ptlock_free(page);
2015 __ClearPageTable(page);
2016 dec_zone_page_state(page, NR_PAGETABLE);
2017}
2018
2019#define pte_offset_map_lock(mm, pmd, address, ptlp) \
2020({ \
2021 spinlock_t *__ptl = pte_lockptr(mm, pmd); \
2022 pte_t *__pte = pte_offset_map(pmd, address); \
2023 *(ptlp) = __ptl; \
2024 spin_lock(__ptl); \
2025 __pte; \
2026})
2027
2028#define pte_unmap_unlock(pte, ptl) do { \
2029 spin_unlock(ptl); \
2030 pte_unmap(pte); \
2031} while (0)
2032
2033#define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd))
2034
2035#define pte_alloc_map(mm, pmd, address) \
2036 (pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address))
2037
2038#define pte_alloc_map_lock(mm, pmd, address, ptlp) \
2039 (pte_alloc(mm, pmd) ? \
2040 NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
2041
2042#define pte_alloc_kernel(pmd, address) \
2043 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \
2044 NULL: pte_offset_kernel(pmd, address))
2045
2046#if USE_SPLIT_PMD_PTLOCKS
2047
2048static struct page *pmd_to_page(pmd_t *pmd)
2049{
2050 unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
2051 return virt_to_page((void *)((unsigned long) pmd & mask));
2052}
2053
2054static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
2055{
2056 return ptlock_ptr(pmd_to_page(pmd));
2057}
2058
2059static inline bool pgtable_pmd_page_ctor(struct page *page)
2060{
2061#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2062 page->pmd_huge_pte = NULL;
2063#endif
2064 return ptlock_init(page);
2065}
2066
2067static inline void pgtable_pmd_page_dtor(struct page *page)
2068{
2069#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2070 VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
2071#endif
2072 ptlock_free(page);
2073}
2074
2075#define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte)
2076
2077#else
2078
2079static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
2080{
2081 return &mm->page_table_lock;
2082}
2083
2084static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; }
2085static inline void pgtable_pmd_page_dtor(struct page *page) {}
2086
2087#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
2088
2089#endif
2090
2091static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
2092{
2093 spinlock_t *ptl = pmd_lockptr(mm, pmd);
2094 spin_lock(ptl);
2095 return ptl;
2096}
2097
2098
2099
2100
2101
2102
2103
2104static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud)
2105{
2106 return &mm->page_table_lock;
2107}
2108
2109static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
2110{
2111 spinlock_t *ptl = pud_lockptr(mm, pud);
2112
2113 spin_lock(ptl);
2114 return ptl;
2115}
2116
2117extern void __init pagecache_init(void);
2118extern void free_area_init(unsigned long * zones_size);
2119extern void __init free_area_init_node(int nid, unsigned long * zones_size,
2120 unsigned long zone_start_pfn, unsigned long *zholes_size);
2121extern void free_initmem(void);
2122
2123
2124
2125
2126
2127
2128
2129extern unsigned long free_reserved_area(void *start, void *end,
2130 int poison, const char *s);
2131
2132#ifdef CONFIG_HIGHMEM
2133
2134
2135
2136
2137extern void free_highmem_page(struct page *page);
2138#endif
2139
2140extern void adjust_managed_page_count(struct page *page, long count);
2141extern void mem_init_print_info(const char *str);
2142
2143extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end);
2144
2145
2146static inline void __free_reserved_page(struct page *page)
2147{
2148 ClearPageReserved(page);
2149 init_page_count(page);
2150 __free_page(page);
2151}
2152
2153static inline void free_reserved_page(struct page *page)
2154{
2155 __free_reserved_page(page);
2156 adjust_managed_page_count(page, 1);
2157}
2158
2159static inline void mark_page_reserved(struct page *page)
2160{
2161 SetPageReserved(page);
2162 adjust_managed_page_count(page, -1);
2163}
2164
2165
2166
2167
2168
2169
2170
2171static inline unsigned long free_initmem_default(int poison)
2172{
2173 extern char __init_begin[], __init_end[];
2174
2175 return free_reserved_area(&__init_begin, &__init_end,
2176 poison, "unused kernel");
2177}
2178
2179static inline unsigned long get_num_physpages(void)
2180{
2181 int nid;
2182 unsigned long phys_pages = 0;
2183
2184 for_each_online_node(nid)
2185 phys_pages += node_present_pages(nid);
2186
2187 return phys_pages;
2188}
2189
2190#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217extern void free_area_init_nodes(unsigned long *max_zone_pfn);
2218unsigned long node_map_pfn_alignment(void);
2219unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
2220 unsigned long end_pfn);
2221extern unsigned long absent_pages_in_range(unsigned long start_pfn,
2222 unsigned long end_pfn);
2223extern void get_pfn_range_for_nid(unsigned int nid,
2224 unsigned long *start_pfn, unsigned long *end_pfn);
2225extern unsigned long find_min_pfn_with_active_regions(void);
2226extern void free_bootmem_with_active_regions(int nid,
2227 unsigned long max_low_pfn);
2228extern void sparse_memory_present_with_active_regions(int nid);
2229
2230#endif
2231
2232#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
2233 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
2234static inline int __early_pfn_to_nid(unsigned long pfn,
2235 struct mminit_pfnnid_cache *state)
2236{
2237 return 0;
2238}
2239#else
2240
2241extern int __meminit early_pfn_to_nid(unsigned long pfn);
2242
2243extern int __meminit __early_pfn_to_nid(unsigned long pfn,
2244 struct mminit_pfnnid_cache *state);
2245#endif
2246
2247#if !defined(CONFIG_FLAT_NODE_MEM_MAP)
2248void zero_resv_unavail(void);
2249#else
2250static inline void zero_resv_unavail(void) {}
2251#endif
2252
2253extern void set_dma_reserve(unsigned long new_dma_reserve);
2254extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long,
2255 enum memmap_context, struct vmem_altmap *);
2256extern void setup_per_zone_wmarks(void);
2257extern int __meminit init_per_zone_wmark_min(void);
2258extern void mem_init(void);
2259extern void __init mmap_init(void);
2260extern void show_mem(unsigned int flags, nodemask_t *nodemask);
2261extern long si_mem_available(void);
2262extern void si_meminfo(struct sysinfo * val);
2263extern void si_meminfo_node(struct sysinfo *val, int nid);
2264#ifdef __HAVE_ARCH_RESERVED_KERNEL_PAGES
2265extern unsigned long arch_reserved_kernel_pages(void);
2266#endif
2267
2268extern __printf(3, 4)
2269void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);
2270
2271extern void setup_per_cpu_pageset(void);
2272
2273extern void zone_pcp_update(struct zone *zone);
2274extern void zone_pcp_reset(struct zone *zone);
2275
2276
2277extern int min_free_kbytes;
2278extern int watermark_boost_factor;
2279extern int watermark_scale_factor;
2280
2281
2282extern atomic_long_t mmap_pages_allocated;
2283extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
2284
2285
2286void vma_interval_tree_insert(struct vm_area_struct *node,
2287 struct rb_root_cached *root);
2288void vma_interval_tree_insert_after(struct vm_area_struct *node,
2289 struct vm_area_struct *prev,
2290 struct rb_root_cached *root);
2291void vma_interval_tree_remove(struct vm_area_struct *node,
2292 struct rb_root_cached *root);
2293struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root,
2294 unsigned long start, unsigned long last);
2295struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
2296 unsigned long start, unsigned long last);
2297
2298#define vma_interval_tree_foreach(vma, root, start, last) \
2299 for (vma = vma_interval_tree_iter_first(root, start, last); \
2300 vma; vma = vma_interval_tree_iter_next(vma, start, last))
2301
2302void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
2303 struct rb_root_cached *root);
2304void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
2305 struct rb_root_cached *root);
2306struct anon_vma_chain *
2307anon_vma_interval_tree_iter_first(struct rb_root_cached *root,
2308 unsigned long start, unsigned long last);
2309struct anon_vma_chain *anon_vma_interval_tree_iter_next(
2310 struct anon_vma_chain *node, unsigned long start, unsigned long last);
2311#ifdef CONFIG_DEBUG_VM_RB
2312void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
2313#endif
2314
2315#define anon_vma_interval_tree_foreach(avc, root, start, last) \
2316 for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
2317 avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
2318
2319
2320extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
2321extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
2322 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
2323 struct vm_area_struct *expand);
2324static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
2325 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
2326{
2327 return __vma_adjust(vma, start, end, pgoff, insert, NULL);
2328}
2329extern struct vm_area_struct *vma_merge(struct mm_struct *,
2330 struct vm_area_struct *prev, unsigned long addr, unsigned long end,
2331 unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
2332 struct mempolicy *, struct vm_userfaultfd_ctx);
2333extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
2334extern int __split_vma(struct mm_struct *, struct vm_area_struct *,
2335 unsigned long addr, int new_below);
2336extern int split_vma(struct mm_struct *, struct vm_area_struct *,
2337 unsigned long addr, int new_below);
2338extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
2339extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
2340 struct rb_node **, struct rb_node *);
2341extern void unlink_file_vma(struct vm_area_struct *);
2342extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
2343 unsigned long addr, unsigned long len, pgoff_t pgoff,
2344 bool *need_rmap_locks);
2345extern void exit_mmap(struct mm_struct *);
2346
2347static inline int check_data_rlimit(unsigned long rlim,
2348 unsigned long new,
2349 unsigned long start,
2350 unsigned long end_data,
2351 unsigned long start_data)
2352{
2353 if (rlim < RLIM_INFINITY) {
2354 if (((new - start) + (end_data - start_data)) > rlim)
2355 return -ENOSPC;
2356 }
2357
2358 return 0;
2359}
2360
2361extern int mm_take_all_locks(struct mm_struct *mm);
2362extern void mm_drop_all_locks(struct mm_struct *mm);
2363
2364extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
2365extern struct file *get_mm_exe_file(struct mm_struct *mm);
2366extern struct file *get_task_exe_file(struct task_struct *task);
2367
2368extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
2369extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
2370
2371extern bool vma_is_special_mapping(const struct vm_area_struct *vma,
2372 const struct vm_special_mapping *sm);
2373extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
2374 unsigned long addr, unsigned long len,
2375 unsigned long flags,
2376 const struct vm_special_mapping *spec);
2377
2378extern int install_special_mapping(struct mm_struct *mm,
2379 unsigned long addr, unsigned long len,
2380 unsigned long flags, struct page **pages);
2381
2382extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
2383
2384extern unsigned long mmap_region(struct file *file, unsigned long addr,
2385 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
2386 struct list_head *uf);
2387extern unsigned long do_mmap(struct file *file, unsigned long addr,
2388 unsigned long len, unsigned long prot, unsigned long flags,
2389 vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
2390 struct list_head *uf);
2391extern int __do_munmap(struct mm_struct *, unsigned long, size_t,
2392 struct list_head *uf, bool downgrade);
2393extern int do_munmap(struct mm_struct *, unsigned long, size_t,
2394 struct list_head *uf);
2395
2396static inline unsigned long
2397do_mmap_pgoff(struct file *file, unsigned long addr,
2398 unsigned long len, unsigned long prot, unsigned long flags,
2399 unsigned long pgoff, unsigned long *populate,
2400 struct list_head *uf)
2401{
2402 return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate, uf);
2403}
2404
2405#ifdef CONFIG_MMU
2406extern int __mm_populate(unsigned long addr, unsigned long len,
2407 int ignore_errors);
2408static inline void mm_populate(unsigned long addr, unsigned long len)
2409{
2410
2411 (void) __mm_populate(addr, len, 1);
2412}
2413#else
2414static inline void mm_populate(unsigned long addr, unsigned long len) {}
2415#endif
2416
2417
2418extern int __must_check vm_brk(unsigned long, unsigned long);
2419extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long);
2420extern int vm_munmap(unsigned long, size_t);
2421extern unsigned long __must_check vm_mmap(struct file *, unsigned long,
2422 unsigned long, unsigned long,
2423 unsigned long, unsigned long);
2424
2425struct vm_unmapped_area_info {
2426#define VM_UNMAPPED_AREA_TOPDOWN 1
2427 unsigned long flags;
2428 unsigned long length;
2429 unsigned long low_limit;
2430 unsigned long high_limit;
2431 unsigned long align_mask;
2432 unsigned long align_offset;
2433};
2434
2435extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
2436extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447static inline unsigned long
2448vm_unmapped_area(struct vm_unmapped_area_info *info)
2449{
2450 if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
2451 return unmapped_area_topdown(info);
2452 else
2453 return unmapped_area(info);
2454}
2455
2456
2457extern void truncate_inode_pages(struct address_space *, loff_t);
2458extern void truncate_inode_pages_range(struct address_space *,
2459 loff_t lstart, loff_t lend);
2460extern void truncate_inode_pages_final(struct address_space *);
2461
2462
2463extern vm_fault_t filemap_fault(struct vm_fault *vmf);
2464extern void filemap_map_pages(struct vm_fault *vmf,
2465 pgoff_t start_pgoff, pgoff_t end_pgoff);
2466extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
2467
2468
2469int __must_check write_one_page(struct page *page);
2470void task_dirty_inc(struct task_struct *tsk);
2471
2472
2473#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE)
2474
2475int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
2476 pgoff_t offset, unsigned long nr_to_read);
2477
2478void page_cache_sync_readahead(struct address_space *mapping,
2479 struct file_ra_state *ra,
2480 struct file *filp,
2481 pgoff_t offset,
2482 unsigned long size);
2483
2484void page_cache_async_readahead(struct address_space *mapping,
2485 struct file_ra_state *ra,
2486 struct file *filp,
2487 struct page *pg,
2488 pgoff_t offset,
2489 unsigned long size);
2490
2491extern unsigned long stack_guard_gap;
2492
2493extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
2494
2495
2496extern int expand_downwards(struct vm_area_struct *vma,
2497 unsigned long address);
2498#if VM_GROWSUP
2499extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
2500#else
2501 #define expand_upwards(vma, address) (0)
2502#endif
2503
2504
2505extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
2506extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
2507 struct vm_area_struct **pprev);
2508
2509
2510
2511static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
2512{
2513 struct vm_area_struct * vma = find_vma(mm,start_addr);
2514
2515 if (vma && end_addr <= vma->vm_start)
2516 vma = NULL;
2517 return vma;
2518}
2519
2520static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
2521{
2522 unsigned long vm_start = vma->vm_start;
2523
2524 if (vma->vm_flags & VM_GROWSDOWN) {
2525 vm_start -= stack_guard_gap;
2526 if (vm_start > vma->vm_start)
2527 vm_start = 0;
2528 }
2529 return vm_start;
2530}
2531
2532static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
2533{
2534 unsigned long vm_end = vma->vm_end;
2535
2536 if (vma->vm_flags & VM_GROWSUP) {
2537 vm_end += stack_guard_gap;
2538 if (vm_end < vma->vm_end)
2539 vm_end = -PAGE_SIZE;
2540 }
2541 return vm_end;
2542}
2543
2544static inline unsigned long vma_pages(struct vm_area_struct *vma)
2545{
2546 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
2547}
2548
2549
2550static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
2551 unsigned long vm_start, unsigned long vm_end)
2552{
2553 struct vm_area_struct *vma = find_vma(mm, vm_start);
2554
2555 if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
2556 vma = NULL;
2557
2558 return vma;
2559}
2560
2561static inline bool range_in_vma(struct vm_area_struct *vma,
2562 unsigned long start, unsigned long end)
2563{
2564 return (vma && vma->vm_start <= start && end <= vma->vm_end);
2565}
2566
2567#ifdef CONFIG_MMU
2568pgprot_t vm_get_page_prot(unsigned long vm_flags);
2569void vma_set_page_prot(struct vm_area_struct *vma);
2570#else
2571static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
2572{
2573 return __pgprot(0);
2574}
2575static inline void vma_set_page_prot(struct vm_area_struct *vma)
2576{
2577 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2578}
2579#endif
2580
2581#ifdef CONFIG_NUMA_BALANCING
2582unsigned long change_prot_numa(struct vm_area_struct *vma,
2583 unsigned long start, unsigned long end);
2584#endif
2585
2586struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
2587int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
2588 unsigned long pfn, unsigned long size, pgprot_t);
2589int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
2590int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2591 unsigned long num);
2592int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
2593 unsigned long num);
2594vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2595 unsigned long pfn);
2596vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2597 unsigned long pfn, pgprot_t pgprot);
2598vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2599 pfn_t pfn);
2600vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
2601 unsigned long addr, pfn_t pfn);
2602int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
2603
2604static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
2605 unsigned long addr, struct page *page)
2606{
2607 int err = vm_insert_page(vma, addr, page);
2608
2609 if (err == -ENOMEM)
2610 return VM_FAULT_OOM;
2611 if (err < 0 && err != -EBUSY)
2612 return VM_FAULT_SIGBUS;
2613
2614 return VM_FAULT_NOPAGE;
2615}
2616
2617static inline vm_fault_t vmf_error(int err)
2618{
2619 if (err == -ENOMEM)
2620 return VM_FAULT_OOM;
2621 return VM_FAULT_SIGBUS;
2622}
2623
2624struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
2625 unsigned int foll_flags);
2626
2627#define FOLL_WRITE 0x01
2628#define FOLL_TOUCH 0x02
2629#define FOLL_GET 0x04
2630#define FOLL_DUMP 0x08
2631#define FOLL_FORCE 0x10
2632#define FOLL_NOWAIT 0x20
2633
2634#define FOLL_POPULATE 0x40
2635#define FOLL_SPLIT 0x80
2636#define FOLL_HWPOISON 0x100
2637#define FOLL_NUMA 0x200
2638#define FOLL_MIGRATION 0x400
2639#define FOLL_TRIED 0x800
2640#define FOLL_MLOCK 0x1000
2641#define FOLL_REMOTE 0x2000
2642#define FOLL_COW 0x4000
2643#define FOLL_ANON 0x8000
2644#define FOLL_LONGTERM 0x10000
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
2674{
2675 if (vm_fault & VM_FAULT_OOM)
2676 return -ENOMEM;
2677 if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
2678 return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT;
2679 if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
2680 return -EFAULT;
2681 return 0;
2682}
2683
2684typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
2685 void *data);
2686extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
2687 unsigned long size, pte_fn_t fn, void *data);
2688
2689
2690#ifdef CONFIG_PAGE_POISONING
2691extern bool page_poisoning_enabled(void);
2692extern void kernel_poison_pages(struct page *page, int numpages, int enable);
2693#else
2694static inline bool page_poisoning_enabled(void) { return false; }
2695static inline void kernel_poison_pages(struct page *page, int numpages,
2696 int enable) { }
2697#endif
2698
2699extern bool _debug_pagealloc_enabled;
2700
2701static inline bool debug_pagealloc_enabled(void)
2702{
2703 return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) && _debug_pagealloc_enabled;
2704}
2705
2706#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP)
2707extern void __kernel_map_pages(struct page *page, int numpages, int enable);
2708
2709static inline void
2710kernel_map_pages(struct page *page, int numpages, int enable)
2711{
2712 __kernel_map_pages(page, numpages, enable);
2713}
2714#ifdef CONFIG_HIBERNATION
2715extern bool kernel_page_present(struct page *page);
2716#endif
2717#else
2718static inline void
2719kernel_map_pages(struct page *page, int numpages, int enable) {}
2720#ifdef CONFIG_HIBERNATION
2721static inline bool kernel_page_present(struct page *page) { return true; }
2722#endif
2723#endif
2724
2725#ifdef __HAVE_ARCH_GATE_AREA
2726extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
2727extern int in_gate_area_no_mm(unsigned long addr);
2728extern int in_gate_area(struct mm_struct *mm, unsigned long addr);
2729#else
2730static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
2731{
2732 return NULL;
2733}
2734static inline int in_gate_area_no_mm(unsigned long addr) { return 0; }
2735static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
2736{
2737 return 0;
2738}
2739#endif
2740
2741extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);
2742
2743#ifdef CONFIG_SYSCTL
2744extern int sysctl_drop_caches;
2745int drop_caches_sysctl_handler(struct ctl_table *, int,
2746 void __user *, size_t *, loff_t *);
2747#endif
2748
2749void drop_slab(void);
2750void drop_slab_node(int nid);
2751
2752#ifndef CONFIG_MMU
2753#define randomize_va_space 0
2754#else
2755extern int randomize_va_space;
2756#endif
2757
2758const char * arch_vma_name(struct vm_area_struct *vma);
2759void print_vma_addr(char *prefix, unsigned long rip);
2760
2761void *sparse_buffer_alloc(unsigned long size);
2762struct page *sparse_mem_map_populate(unsigned long pnum, int nid,
2763 struct vmem_altmap *altmap);
2764pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
2765p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
2766pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
2767pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
2768pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
2769void *vmemmap_alloc_block(unsigned long size, int node);
2770struct vmem_altmap;
2771void *vmemmap_alloc_block_buf(unsigned long size, int node);
2772void *altmap_alloc_block_buf(unsigned long size, struct vmem_altmap *altmap);
2773void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
2774int vmemmap_populate_basepages(unsigned long start, unsigned long end,
2775 int node);
2776int vmemmap_populate(unsigned long start, unsigned long end, int node,
2777 struct vmem_altmap *altmap);
2778void vmemmap_populate_print_last(void);
2779#ifdef CONFIG_MEMORY_HOTPLUG
2780void vmemmap_free(unsigned long start, unsigned long end,
2781 struct vmem_altmap *altmap);
2782#endif
2783void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
2784 unsigned long nr_pages);
2785
2786enum mf_flags {
2787 MF_COUNT_INCREASED = 1 << 0,
2788 MF_ACTION_REQUIRED = 1 << 1,
2789 MF_MUST_KILL = 1 << 2,
2790 MF_SOFT_OFFLINE = 1 << 3,
2791};
2792extern int memory_failure(unsigned long pfn, int flags);
2793extern void memory_failure_queue(unsigned long pfn, int flags);
2794extern int unpoison_memory(unsigned long pfn);
2795extern int get_hwpoison_page(struct page *page);
2796#define put_hwpoison_page(page) put_page(page)
2797extern int sysctl_memory_failure_early_kill;
2798extern int sysctl_memory_failure_recovery;
2799extern void shake_page(struct page *p, int access);
2800extern atomic_long_t num_poisoned_pages __read_mostly;
2801extern int soft_offline_page(struct page *page, int flags);
2802
2803
2804
2805
2806
2807enum mf_result {
2808 MF_IGNORED,
2809 MF_FAILED,
2810 MF_DELAYED,
2811 MF_RECOVERED,
2812};
2813
2814enum mf_action_page_type {
2815 MF_MSG_KERNEL,
2816 MF_MSG_KERNEL_HIGH_ORDER,
2817 MF_MSG_SLAB,
2818 MF_MSG_DIFFERENT_COMPOUND,
2819 MF_MSG_POISONED_HUGE,
2820 MF_MSG_HUGE,
2821 MF_MSG_FREE_HUGE,
2822 MF_MSG_NON_PMD_HUGE,
2823 MF_MSG_UNMAP_FAILED,
2824 MF_MSG_DIRTY_SWAPCACHE,
2825 MF_MSG_CLEAN_SWAPCACHE,
2826 MF_MSG_DIRTY_MLOCKED_LRU,
2827 MF_MSG_CLEAN_MLOCKED_LRU,
2828 MF_MSG_DIRTY_UNEVICTABLE_LRU,
2829 MF_MSG_CLEAN_UNEVICTABLE_LRU,
2830 MF_MSG_DIRTY_LRU,
2831 MF_MSG_CLEAN_LRU,
2832 MF_MSG_TRUNCATED_LRU,
2833 MF_MSG_BUDDY,
2834 MF_MSG_BUDDY_2ND,
2835 MF_MSG_DAX,
2836 MF_MSG_UNKNOWN,
2837};
2838
2839#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
2840extern void clear_huge_page(struct page *page,
2841 unsigned long addr_hint,
2842 unsigned int pages_per_huge_page);
2843extern void copy_user_huge_page(struct page *dst, struct page *src,
2844 unsigned long addr_hint,
2845 struct vm_area_struct *vma,
2846 unsigned int pages_per_huge_page);
2847extern long copy_huge_page_from_user(struct page *dst_page,
2848 const void __user *usr_src,
2849 unsigned int pages_per_huge_page,
2850 bool allow_pagefault);
2851#endif
2852
2853extern struct page_ext_operations debug_guardpage_ops;
2854
2855#ifdef CONFIG_DEBUG_PAGEALLOC
2856extern unsigned int _debug_guardpage_minorder;
2857extern bool _debug_guardpage_enabled;
2858
2859static inline unsigned int debug_guardpage_minorder(void)
2860{
2861 return _debug_guardpage_minorder;
2862}
2863
2864static inline bool debug_guardpage_enabled(void)
2865{
2866 return _debug_guardpage_enabled;
2867}
2868
2869static inline bool page_is_guard(struct page *page)
2870{
2871 struct page_ext *page_ext;
2872
2873 if (!debug_guardpage_enabled())
2874 return false;
2875
2876 page_ext = lookup_page_ext(page);
2877 if (unlikely(!page_ext))
2878 return false;
2879
2880 return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
2881}
2882#else
2883static inline unsigned int debug_guardpage_minorder(void) { return 0; }
2884static inline bool debug_guardpage_enabled(void) { return false; }
2885static inline bool page_is_guard(struct page *page) { return false; }
2886#endif
2887
2888#if MAX_NUMNODES > 1
2889void __init setup_nr_node_ids(void);
2890#else
2891static inline void setup_nr_node_ids(void) {}
2892#endif
2893
2894#endif
2895#endif
2896