1
2#ifndef _LINUX_MM_H
3#define _LINUX_MM_H
4
5#include <linux/errno.h>
6
7#ifdef __KERNEL__
8
9#include <linux/mmdebug.h>
10#include <linux/gfp.h>
11#include <linux/bug.h>
12#include <linux/list.h>
13#include <linux/mmzone.h>
14#include <linux/rbtree.h>
15#include <linux/atomic.h>
16#include <linux/debug_locks.h>
17#include <linux/mm_types.h>
18#include <linux/range.h>
19#include <linux/pfn.h>
20#include <linux/percpu-refcount.h>
21#include <linux/bit_spinlock.h>
22#include <linux/shrinker.h>
23#include <linux/resource.h>
24#include <linux/page_ext.h>
25#include <linux/err.h>
26#include <linux/page_ref.h>
27#include <linux/memremap.h>
28#include <linux/overflow.h>
29#include <linux/sizes.h>
30
31#include <linux/rh_kabi.h>
32
33struct mempolicy;
34struct anon_vma;
35struct anon_vma_chain;
36struct file_ra_state;
37struct user_struct;
38struct writeback_control;
39struct bdi_writeback;
40
41void init_mm_internals(void);
42
43#ifndef CONFIG_NEED_MULTIPLE_NODES
44extern unsigned long max_mapnr;
45
46static inline void set_max_mapnr(unsigned long limit)
47{
48 max_mapnr = limit;
49}
50#else
51static inline void set_max_mapnr(unsigned long limit) { }
52#endif
53
54extern unsigned long totalram_pages;
55extern void * high_memory;
56extern int page_cluster;
57
58#ifdef CONFIG_SYSCTL
59extern int sysctl_legacy_va_layout;
60#else
61#define sysctl_legacy_va_layout 0
62#endif
63
64#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
65extern const int mmap_rnd_bits_min;
66extern const int mmap_rnd_bits_max;
67extern int mmap_rnd_bits __read_mostly;
68#endif
69#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
70extern const int mmap_rnd_compat_bits_min;
71extern const int mmap_rnd_compat_bits_max;
72extern int mmap_rnd_compat_bits __read_mostly;
73#endif
74
75#include <asm/page.h>
76#include <asm/pgtable.h>
77#include <asm/processor.h>
78
79#ifndef __pa_symbol
80#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
81#endif
82
83#ifndef page_to_virt
84#define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x)))
85#endif
86
87#ifndef lm_alias
88#define lm_alias(x) __va(__pa_symbol(x))
89#endif
90
91
92
93
94
95
96
97
98#ifndef mm_forbids_zeropage
99#define mm_forbids_zeropage(X) (0)
100#endif
101
102
103
104
105
106
107#ifndef mm_zero_struct_page
108#define mm_zero_struct_page(pp) ((void)memset((pp), 0, sizeof(struct page)))
109#endif
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127#define MAPCOUNT_ELF_CORE_MARGIN (5)
128#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
129
130extern int sysctl_max_map_count;
131
132extern unsigned long sysctl_user_reserve_kbytes;
133extern unsigned long sysctl_admin_reserve_kbytes;
134
135extern int sysctl_overcommit_memory;
136extern int sysctl_overcommit_ratio;
137extern unsigned long sysctl_overcommit_kbytes;
138
139extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *,
140 size_t *, loff_t *);
141extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
142 size_t *, loff_t *);
143
144#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
145
146
147#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
148
149
150#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
151
152
153
154
155
156
157
158
159
160
161struct vm_area_struct *vm_area_alloc(struct mm_struct *);
162struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
163void vm_area_free(struct vm_area_struct *);
164
165#ifndef CONFIG_MMU
166extern struct rb_root nommu_region_tree;
167extern struct rw_semaphore nommu_region_sem;
168
169extern unsigned int kobjsize(const void *objp);
170#endif
171
172
173
174
175
176#define VM_NONE 0x00000000
177
178#define VM_READ 0x00000001
179#define VM_WRITE 0x00000002
180#define VM_EXEC 0x00000004
181#define VM_SHARED 0x00000008
182
183
184#define VM_MAYREAD 0x00000010
185#define VM_MAYWRITE 0x00000020
186#define VM_MAYEXEC 0x00000040
187#define VM_MAYSHARE 0x00000080
188
189#define VM_GROWSDOWN 0x00000100
190#define VM_UFFD_MISSING 0x00000200
191#define VM_PFNMAP 0x00000400
192#define VM_DENYWRITE 0x00000800
193#define VM_UFFD_WP 0x00001000
194
195#define VM_LOCKED 0x00002000
196#define VM_IO 0x00004000
197
198
199#define VM_SEQ_READ 0x00008000
200#define VM_RAND_READ 0x00010000
201
202#define VM_DONTCOPY 0x00020000
203#define VM_DONTEXPAND 0x00040000
204#define VM_LOCKONFAULT 0x00080000
205#define VM_ACCOUNT 0x00100000
206#define VM_NORESERVE 0x00200000
207#define VM_HUGETLB 0x00400000
208#define VM_SYNC 0x00800000
209#define VM_ARCH_1 0x01000000
210#define VM_WIPEONFORK 0x02000000
211#define VM_DONTDUMP 0x04000000
212
213#ifdef CONFIG_MEM_SOFT_DIRTY
214# define VM_SOFTDIRTY 0x08000000
215#else
216# define VM_SOFTDIRTY 0
217#endif
218
219#define VM_MIXEDMAP 0x10000000
220#define VM_HUGEPAGE 0x20000000
221#define VM_NOHUGEPAGE 0x40000000
222#define VM_MERGEABLE 0x80000000
223
224#ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS
225#define VM_HIGH_ARCH_BIT_0 32
226#define VM_HIGH_ARCH_BIT_1 33
227#define VM_HIGH_ARCH_BIT_2 34
228#define VM_HIGH_ARCH_BIT_3 35
229#define VM_HIGH_ARCH_BIT_4 36
230#define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0)
231#define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1)
232#define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2)
233#define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3)
234#define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4)
235#endif
236
237#ifdef CONFIG_ARCH_HAS_PKEYS
238# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0
239# define VM_PKEY_BIT0 VM_HIGH_ARCH_0
240# define VM_PKEY_BIT1 VM_HIGH_ARCH_1
241# define VM_PKEY_BIT2 VM_HIGH_ARCH_2
242# define VM_PKEY_BIT3 VM_HIGH_ARCH_3
243#ifdef CONFIG_PPC
244# define VM_PKEY_BIT4 VM_HIGH_ARCH_4
245#else
246# define VM_PKEY_BIT4 0
247#endif
248#endif
249
250#if defined(CONFIG_X86)
251# define VM_PAT VM_ARCH_1
252#elif defined(CONFIG_PPC)
253# define VM_SAO VM_ARCH_1
254#elif defined(CONFIG_PARISC)
255# define VM_GROWSUP VM_ARCH_1
256#elif defined(CONFIG_IA64)
257# define VM_GROWSUP VM_ARCH_1
258#elif defined(CONFIG_SPARC64)
259# define VM_SPARC_ADI VM_ARCH_1
260# define VM_ARCH_CLEAR VM_SPARC_ADI
261#elif !defined(CONFIG_MMU)
262# define VM_MAPPED_COPY VM_ARCH_1
263#endif
264
265#if defined(CONFIG_X86_INTEL_MPX)
266
267# define VM_MPX VM_HIGH_ARCH_4
268#else
269# define VM_MPX VM_NONE
270#endif
271
272#ifndef VM_GROWSUP
273# define VM_GROWSUP VM_NONE
274#endif
275
276
277#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ)
278
279#ifndef VM_STACK_DEFAULT_FLAGS
280#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
281#endif
282
283#ifdef CONFIG_STACK_GROWSUP
284#define VM_STACK VM_GROWSUP
285#else
286#define VM_STACK VM_GROWSDOWN
287#endif
288
289#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
290
291
292
293
294
295#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
296
297
298#define VM_INIT_DEF_MASK VM_NOHUGEPAGE
299
300
301#define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT))
302
303
304#ifndef VM_ARCH_CLEAR
305# define VM_ARCH_CLEAR VM_NONE
306#endif
307#define VM_FLAGS_CLEAR (ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR)
308
309
310
311
312
313extern pgprot_t protection_map[16];
314
315#define FAULT_FLAG_WRITE 0x01
316#define FAULT_FLAG_MKWRITE 0x02
317#define FAULT_FLAG_ALLOW_RETRY 0x04
318#define FAULT_FLAG_RETRY_NOWAIT 0x08
319#define FAULT_FLAG_KILLABLE 0x10
320#define FAULT_FLAG_TRIED 0x20
321#define FAULT_FLAG_USER 0x40
322#define FAULT_FLAG_REMOTE 0x80
323#define FAULT_FLAG_INSTRUCTION 0x100
324
325#define FAULT_FLAG_TRACE \
326 { FAULT_FLAG_WRITE, "WRITE" }, \
327 { FAULT_FLAG_MKWRITE, "MKWRITE" }, \
328 { FAULT_FLAG_ALLOW_RETRY, "ALLOW_RETRY" }, \
329 { FAULT_FLAG_RETRY_NOWAIT, "RETRY_NOWAIT" }, \
330 { FAULT_FLAG_KILLABLE, "KILLABLE" }, \
331 { FAULT_FLAG_TRIED, "TRIED" }, \
332 { FAULT_FLAG_USER, "USER" }, \
333 { FAULT_FLAG_REMOTE, "REMOTE" }, \
334 { FAULT_FLAG_INSTRUCTION, "INSTRUCTION" }
335
336
337
338
339
340
341
342
343
344
345
346struct vm_fault {
347 struct vm_area_struct *vma;
348 unsigned int flags;
349 gfp_t gfp_mask;
350 pgoff_t pgoff;
351 unsigned long address;
352 pmd_t *pmd;
353
354 pud_t *pud;
355
356
357 pte_t orig_pte;
358
359 struct page *cow_page;
360 struct mem_cgroup *memcg;
361 struct page *page;
362
363
364
365
366
367 pte_t *pte;
368
369
370
371 spinlock_t *ptl;
372
373
374
375 pgtable_t prealloc_pte;
376
377
378
379
380
381
382};
383
384
385enum page_entry_size {
386 PE_SIZE_PTE = 0,
387 PE_SIZE_PMD,
388 PE_SIZE_PUD,
389};
390
391
392
393
394
395
396struct vm_operations_struct {
397 void (*open)(struct vm_area_struct * area);
398 void (*close)(struct vm_area_struct * area);
399 int (*split)(struct vm_area_struct * area, unsigned long addr);
400 int (*mremap)(struct vm_area_struct * area);
401 vm_fault_t (*fault)(struct vm_fault *vmf);
402 vm_fault_t (*huge_fault)(struct vm_fault *vmf,
403 enum page_entry_size pe_size);
404 void (*map_pages)(struct vm_fault *vmf,
405 pgoff_t start_pgoff, pgoff_t end_pgoff);
406 unsigned long (*pagesize)(struct vm_area_struct * area);
407
408
409
410 vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
411
412
413 vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
414
415
416
417
418 int (*access)(struct vm_area_struct *vma, unsigned long addr,
419 void *buf, int len, int write);
420
421
422
423
424 const char *(*name)(struct vm_area_struct *vma);
425
426#ifdef CONFIG_NUMA
427
428
429
430
431
432
433
434 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
435
436
437
438
439
440
441
442
443
444
445
446 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
447 unsigned long addr);
448#endif
449
450
451
452
453
454 struct page *(*find_special_page)(struct vm_area_struct *vma,
455 unsigned long addr);
456 RH_KABI_RESERVE(1)
457 RH_KABI_RESERVE(2)
458 RH_KABI_RESERVE(3)
459 RH_KABI_RESERVE(4)
460};
461
462static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
463{
464 static const struct vm_operations_struct dummy_vm_ops = {};
465
466 vma->vm_mm = mm;
467 vma->vm_ops = &dummy_vm_ops;
468 INIT_LIST_HEAD(&vma->anon_vma_chain);
469}
470
471static inline void vma_set_anonymous(struct vm_area_struct *vma)
472{
473 vma->vm_ops = NULL;
474}
475
476
477#define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
478
479struct mmu_gather;
480struct inode;
481
482#if !defined(__HAVE_ARCH_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
483static inline int pmd_devmap(pmd_t pmd)
484{
485 return 0;
486}
487static inline int pud_devmap(pud_t pud)
488{
489 return 0;
490}
491static inline int pgd_devmap(pgd_t pgd)
492{
493 return 0;
494}
495#endif
496
497
498
499
500
501#include <linux/page-flags.h>
502#include <linux/huge_mm.h>
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520static inline int put_page_testzero(struct page *page)
521{
522 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
523 return page_ref_dec_and_test(page);
524}
525
526
527
528
529
530
531
532static inline int get_page_unless_zero(struct page *page)
533{
534 return page_ref_add_unless(page, 1, 0);
535}
536
537extern int page_is_ram(unsigned long pfn);
538
539enum {
540 REGION_INTERSECTS,
541 REGION_DISJOINT,
542 REGION_MIXED,
543};
544
545int region_intersects(resource_size_t offset, size_t size, unsigned long flags,
546 unsigned long desc);
547
548
549struct page *vmalloc_to_page(const void *addr);
550unsigned long vmalloc_to_pfn(const void *addr);
551
552
553
554
555
556
557
558static inline bool is_vmalloc_addr(const void *x)
559{
560#ifdef CONFIG_MMU
561 unsigned long addr = (unsigned long)x;
562
563 return addr >= VMALLOC_START && addr < VMALLOC_END;
564#else
565 return false;
566#endif
567}
568
569#ifndef is_ioremap_addr
570#define is_ioremap_addr(x) is_vmalloc_addr(x)
571#endif
572
573#ifdef CONFIG_MMU
574extern int is_vmalloc_or_module_addr(const void *x);
575#else
576static inline int is_vmalloc_or_module_addr(const void *x)
577{
578 return 0;
579}
580#endif
581
582extern void *kvmalloc_node(size_t size, gfp_t flags, int node);
583static inline void *kvmalloc(size_t size, gfp_t flags)
584{
585 return kvmalloc_node(size, flags, NUMA_NO_NODE);
586}
587static inline void *kvzalloc_node(size_t size, gfp_t flags, int node)
588{
589 return kvmalloc_node(size, flags | __GFP_ZERO, node);
590}
591static inline void *kvzalloc(size_t size, gfp_t flags)
592{
593 return kvmalloc(size, flags | __GFP_ZERO);
594}
595
596static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
597{
598 size_t bytes;
599
600 if (unlikely(check_mul_overflow(n, size, &bytes)))
601 return NULL;
602
603 return kvmalloc(bytes, flags);
604}
605
606static inline void *kvcalloc(size_t n, size_t size, gfp_t flags)
607{
608 return kvmalloc_array(n, size, flags | __GFP_ZERO);
609}
610
611extern void kvfree(const void *addr);
612
613static inline atomic_t *compound_mapcount_ptr(struct page *page)
614{
615 return &page[1].compound_mapcount;
616}
617
618static inline int compound_mapcount(struct page *page)
619{
620 VM_BUG_ON_PAGE(!PageCompound(page), page);
621 page = compound_head(page);
622 return atomic_read(compound_mapcount_ptr(page)) + 1;
623}
624
625
626
627
628
629
630static inline void page_mapcount_reset(struct page *page)
631{
632 atomic_set(&(page)->_mapcount, -1);
633}
634
635int __page_mapcount(struct page *page);
636
637static inline int page_mapcount(struct page *page)
638{
639 VM_BUG_ON_PAGE(PageSlab(page), page);
640
641 if (unlikely(PageCompound(page)))
642 return __page_mapcount(page);
643 return atomic_read(&page->_mapcount) + 1;
644}
645
646#ifdef CONFIG_TRANSPARENT_HUGEPAGE
647int total_mapcount(struct page *page);
648int page_trans_huge_mapcount(struct page *page, int *total_mapcount);
649#else
650static inline int total_mapcount(struct page *page)
651{
652 return page_mapcount(page);
653}
654static inline int page_trans_huge_mapcount(struct page *page,
655 int *total_mapcount)
656{
657 int mapcount = page_mapcount(page);
658 if (total_mapcount)
659 *total_mapcount = mapcount;
660 return mapcount;
661}
662#endif
663
664static inline struct page *virt_to_head_page(const void *x)
665{
666 struct page *page = virt_to_page(x);
667
668 return compound_head(page);
669}
670
671void __put_page(struct page *page);
672
673void put_pages_list(struct list_head *pages);
674
675void split_page(struct page *page, unsigned int order);
676
677
678
679
680
681
682typedef void compound_page_dtor(struct page *);
683
684
685enum compound_dtor_id {
686 NULL_COMPOUND_DTOR,
687 COMPOUND_PAGE_DTOR,
688#ifdef CONFIG_HUGETLB_PAGE
689 HUGETLB_PAGE_DTOR,
690#endif
691#ifdef CONFIG_TRANSPARENT_HUGEPAGE
692 TRANSHUGE_PAGE_DTOR,
693#endif
694 NR_COMPOUND_DTORS,
695};
696extern compound_page_dtor * const compound_page_dtors[];
697
698static inline void set_compound_page_dtor(struct page *page,
699 enum compound_dtor_id compound_dtor)
700{
701 VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page);
702 page[1].compound_dtor = compound_dtor;
703}
704
705static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
706{
707 VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page);
708 return compound_page_dtors[page[1].compound_dtor];
709}
710
711static inline unsigned int compound_order(struct page *page)
712{
713 if (!PageHead(page))
714 return 0;
715 return page[1].compound_order;
716}
717
718static inline void set_compound_order(struct page *page, unsigned int order)
719{
720 page[1].compound_order = order;
721}
722
723void free_compound_page(struct page *page);
724
725#ifdef CONFIG_MMU
726
727
728
729
730
731
732static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
733{
734 if (likely(vma->vm_flags & VM_WRITE))
735 pte = pte_mkwrite(pte);
736 return pte;
737}
738
739vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
740 struct page *page);
741vm_fault_t finish_fault(struct vm_fault *vmf);
742vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
743#endif
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
812#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
813#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
814#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
815
816
817
818
819
820
821#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
822#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
823#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
824#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
825
826
827#ifdef NODE_NOT_IN_PAGE_FLAGS
828#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
829#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \
830 SECTIONS_PGOFF : ZONES_PGOFF)
831#else
832#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
833#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \
834 NODES_PGOFF : ZONES_PGOFF)
835#endif
836
837#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
838
839#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
840#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
841#endif
842
843#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
844#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
845#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
846#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
847#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
848
849static inline enum zone_type page_zonenum(const struct page *page)
850{
851 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
852}
853
854#ifdef CONFIG_ZONE_DEVICE
855static inline bool is_zone_device_page(const struct page *page)
856{
857 return page_zonenum(page) == ZONE_DEVICE;
858}
859extern void memmap_init_zone_device(struct zone *, unsigned long,
860 unsigned long, struct dev_pagemap *);
861#else
862static inline bool is_zone_device_page(const struct page *page)
863{
864 return false;
865}
866#endif
867
868#ifdef CONFIG_DEV_PAGEMAP_OPS
869void __put_devmap_managed_page(struct page *page);
870DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
871static inline bool put_devmap_managed_page(struct page *page)
872{
873 if (!static_branch_unlikely(&devmap_managed_key))
874 return false;
875 if (!is_zone_device_page(page))
876 return false;
877 switch (page->pgmap->type) {
878 case MEMORY_DEVICE_PRIVATE:
879 case MEMORY_DEVICE_FS_DAX:
880 __put_devmap_managed_page(page);
881 return true;
882 default:
883 break;
884 }
885 return false;
886}
887
888static inline bool is_device_private_page(const struct page *page)
889{
890 return is_zone_device_page(page) &&
891 page->pgmap->type == MEMORY_DEVICE_PRIVATE;
892}
893
894#ifdef CONFIG_PCI_P2PDMA
895static inline bool is_pci_p2pdma_page(const struct page *page)
896{
897 return is_zone_device_page(page) &&
898 page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA;
899}
900#else
901static inline bool is_pci_p2pdma_page(const struct page *page)
902{
903 return false;
904}
905#endif
906
907#else
908static inline bool put_devmap_managed_page(struct page *page)
909{
910 return false;
911}
912
913static inline bool is_device_private_page(const struct page *page)
914{
915 return false;
916}
917
918static inline bool is_pci_p2pdma_page(const struct page *page)
919{
920 return false;
921}
922#endif
923
924
925#define page_ref_zero_or_close_to_overflow(page) \
926 ((unsigned int) page_ref_count(page) + 127u <= 127u)
927
928static inline void get_page(struct page *page)
929{
930 page = compound_head(page);
931
932
933
934
935 VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(page), page);
936 page_ref_inc(page);
937}
938
939static inline __must_check bool try_get_page(struct page *page)
940{
941 page = compound_head(page);
942 if (WARN_ON_ONCE(page_ref_count(page) <= 0))
943 return false;
944 page_ref_inc(page);
945 return true;
946}
947
948static inline void put_page(struct page *page)
949{
950 page = compound_head(page);
951
952
953
954
955
956
957
958 if (put_devmap_managed_page(page))
959 return;
960
961 if (put_page_testzero(page))
962 __put_page(page);
963}
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980static inline void put_user_page(struct page *page)
981{
982 put_page(page);
983}
984
985void put_user_pages_dirty(struct page **pages, unsigned long npages);
986void put_user_pages_dirty_lock(struct page **pages, unsigned long npages);
987void put_user_pages(struct page **pages, unsigned long npages);
988
989#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
990#define SECTION_IN_PAGE_FLAGS
991#endif
992
993
994
995
996
997
998
999
1000
1001static inline int page_zone_id(struct page *page)
1002{
1003 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
1004}
1005
1006static inline int zone_to_nid(struct zone *zone)
1007{
1008#ifdef CONFIG_NUMA
1009 return zone->node;
1010#else
1011 return 0;
1012#endif
1013}
1014
1015#ifdef NODE_NOT_IN_PAGE_FLAGS
1016extern int page_to_nid(const struct page *page);
1017#else
1018static inline int page_to_nid(const struct page *page)
1019{
1020 struct page *p = (struct page *)page;
1021
1022 return (PF_POISONED_CHECK(p)->flags >> NODES_PGSHIFT) & NODES_MASK;
1023}
1024#endif
1025
1026#ifdef CONFIG_NUMA_BALANCING
1027static inline int cpu_pid_to_cpupid(int cpu, int pid)
1028{
1029 return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
1030}
1031
1032static inline int cpupid_to_pid(int cpupid)
1033{
1034 return cpupid & LAST__PID_MASK;
1035}
1036
1037static inline int cpupid_to_cpu(int cpupid)
1038{
1039 return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
1040}
1041
1042static inline int cpupid_to_nid(int cpupid)
1043{
1044 return cpu_to_node(cpupid_to_cpu(cpupid));
1045}
1046
1047static inline bool cpupid_pid_unset(int cpupid)
1048{
1049 return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
1050}
1051
1052static inline bool cpupid_cpu_unset(int cpupid)
1053{
1054 return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
1055}
1056
1057static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
1058{
1059 return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
1060}
1061
1062#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
1063#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
1064static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
1065{
1066 return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
1067}
1068
1069static inline int page_cpupid_last(struct page *page)
1070{
1071 return page->_last_cpupid;
1072}
1073static inline void page_cpupid_reset_last(struct page *page)
1074{
1075 page->_last_cpupid = -1 & LAST_CPUPID_MASK;
1076}
1077#else
1078static inline int page_cpupid_last(struct page *page)
1079{
1080 return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
1081}
1082
1083extern int page_cpupid_xchg_last(struct page *page, int cpupid);
1084
1085static inline void page_cpupid_reset_last(struct page *page)
1086{
1087 page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
1088}
1089#endif
1090#else
1091static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
1092{
1093 return page_to_nid(page);
1094}
1095
1096static inline int page_cpupid_last(struct page *page)
1097{
1098 return page_to_nid(page);
1099}
1100
1101static inline int cpupid_to_nid(int cpupid)
1102{
1103 return -1;
1104}
1105
1106static inline int cpupid_to_pid(int cpupid)
1107{
1108 return -1;
1109}
1110
1111static inline int cpupid_to_cpu(int cpupid)
1112{
1113 return -1;
1114}
1115
1116static inline int cpu_pid_to_cpupid(int nid, int pid)
1117{
1118 return -1;
1119}
1120
1121static inline bool cpupid_pid_unset(int cpupid)
1122{
1123 return 1;
1124}
1125
1126static inline void page_cpupid_reset_last(struct page *page)
1127{
1128}
1129
1130static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
1131{
1132 return false;
1133}
1134#endif
1135
1136static inline struct zone *page_zone(const struct page *page)
1137{
1138 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
1139}
1140
1141static inline pg_data_t *page_pgdat(const struct page *page)
1142{
1143 return NODE_DATA(page_to_nid(page));
1144}
1145
1146#ifdef SECTION_IN_PAGE_FLAGS
1147static inline void set_page_section(struct page *page, unsigned long section)
1148{
1149 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
1150 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
1151}
1152
1153static inline unsigned long page_to_section(const struct page *page)
1154{
1155 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
1156}
1157#endif
1158
1159static inline void set_page_zone(struct page *page, enum zone_type zone)
1160{
1161 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
1162 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
1163}
1164
1165static inline void set_page_node(struct page *page, unsigned long node)
1166{
1167 page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
1168 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
1169}
1170
1171static inline void set_page_links(struct page *page, enum zone_type zone,
1172 unsigned long node, unsigned long pfn)
1173{
1174 set_page_zone(page, zone);
1175 set_page_node(page, node);
1176#ifdef SECTION_IN_PAGE_FLAGS
1177 set_page_section(page, pfn_to_section_nr(pfn));
1178#endif
1179}
1180
1181#ifdef CONFIG_MEMCG
1182static inline struct mem_cgroup *page_memcg(struct page *page)
1183{
1184 return page->mem_cgroup;
1185}
1186static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
1187{
1188 WARN_ON_ONCE(!rcu_read_lock_held());
1189 return READ_ONCE(page->mem_cgroup);
1190}
1191#else
1192static inline struct mem_cgroup *page_memcg(struct page *page)
1193{
1194 return NULL;
1195}
1196static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
1197{
1198 WARN_ON_ONCE(!rcu_read_lock_held());
1199 return NULL;
1200}
1201#endif
1202
1203
1204
1205
1206#include <linux/vmstat.h>
1207
1208static __always_inline void *lowmem_page_address(const struct page *page)
1209{
1210 return page_to_virt(page);
1211}
1212
1213#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
1214#define HASHED_PAGE_VIRTUAL
1215#endif
1216
1217#if defined(WANT_PAGE_VIRTUAL)
1218static inline void *page_address(const struct page *page)
1219{
1220 return page->virtual;
1221}
1222static inline void set_page_address(struct page *page, void *address)
1223{
1224 page->virtual = address;
1225}
1226#define page_address_init() do { } while(0)
1227#endif
1228
1229#if defined(HASHED_PAGE_VIRTUAL)
1230void *page_address(const struct page *page);
1231void set_page_address(struct page *page, void *virtual);
1232void page_address_init(void);
1233#endif
1234
1235#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
1236#define page_address(page) lowmem_page_address(page)
1237#define set_page_address(page, address) do { } while(0)
1238#define page_address_init() do { } while(0)
1239#endif
1240
1241extern void *page_rmapping(struct page *page);
1242extern struct anon_vma *page_anon_vma(struct page *page);
1243extern struct address_space *page_mapping(struct page *page);
1244
1245extern struct address_space *__page_file_mapping(struct page *);
1246
1247static inline
1248struct address_space *page_file_mapping(struct page *page)
1249{
1250 if (unlikely(PageSwapCache(page)))
1251 return __page_file_mapping(page);
1252
1253 return page->mapping;
1254}
1255
1256extern pgoff_t __page_file_index(struct page *page);
1257
1258
1259
1260
1261
1262static inline pgoff_t page_index(struct page *page)
1263{
1264 if (unlikely(PageSwapCache(page)))
1265 return __page_file_index(page);
1266 return page->index;
1267}
1268
1269bool page_mapped(struct page *page);
1270struct address_space *page_mapping(struct page *page);
1271struct address_space *page_mapping_file(struct page *page);
1272
1273
1274
1275
1276
1277
1278static inline bool page_is_pfmemalloc(struct page *page)
1279{
1280
1281
1282
1283
1284 return page->index == -1UL;
1285}
1286
1287
1288
1289
1290
1291static inline void set_page_pfmemalloc(struct page *page)
1292{
1293 page->index = -1UL;
1294}
1295
1296static inline void clear_page_pfmemalloc(struct page *page)
1297{
1298 page->index = 0;
1299}
1300
1301
1302
1303
1304extern void pagefault_out_of_memory(void);
1305
1306#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
1307
1308
1309
1310
1311
1312#define SHOW_MEM_FILTER_NODES (0x0001u)
1313
1314extern void show_free_areas(unsigned int flags, nodemask_t *nodemask);
1315
1316extern bool can_do_mlock(void);
1317extern int user_shm_lock(size_t, struct user_struct *);
1318extern void user_shm_unlock(size_t, struct user_struct *);
1319
1320
1321
1322
1323struct zap_details {
1324 struct address_space *check_mapping;
1325 pgoff_t first_index;
1326 pgoff_t last_index;
1327};
1328
1329struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
1330 pte_t pte);
1331struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
1332 pmd_t pmd);
1333
1334void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1335 unsigned long size);
1336void zap_page_range(struct vm_area_struct *vma, unsigned long address,
1337 unsigned long size);
1338void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
1339 unsigned long start, unsigned long end);
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365struct mm_walk {
1366 int (*pud_entry)(pud_t *pud, unsigned long addr,
1367 unsigned long next, struct mm_walk *walk);
1368 int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
1369 unsigned long next, struct mm_walk *walk);
1370 int (*pte_entry)(pte_t *pte, unsigned long addr,
1371 unsigned long next, struct mm_walk *walk);
1372 int (*pte_hole)(unsigned long addr, unsigned long next,
1373 struct mm_walk *walk);
1374 int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
1375 unsigned long addr, unsigned long next,
1376 struct mm_walk *walk);
1377 int (*test_walk)(unsigned long addr, unsigned long next,
1378 struct mm_walk *walk);
1379 struct mm_struct *mm;
1380 struct vm_area_struct *vma;
1381 void *private;
1382};
1383
1384int walk_page_range(unsigned long addr, unsigned long end,
1385 struct mm_walk *walk);
1386int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk);
1387void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
1388 unsigned long end, unsigned long floor, unsigned long ceiling);
1389int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
1390 struct vm_area_struct *vma);
1391int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
1392 unsigned long *start, unsigned long *end,
1393 pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
1394int follow_pfn(struct vm_area_struct *vma, unsigned long address,
1395 unsigned long *pfn);
1396int follow_phys(struct vm_area_struct *vma, unsigned long address,
1397 unsigned int flags, unsigned long *prot, resource_size_t *phys);
1398int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
1399 void *buf, int len, int write);
1400
1401extern void truncate_pagecache(struct inode *inode, loff_t new);
1402extern void truncate_setsize(struct inode *inode, loff_t newsize);
1403void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
1404void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
1405int truncate_inode_page(struct address_space *mapping, struct page *page);
1406int generic_error_remove_page(struct address_space *mapping, struct page *page);
1407int invalidate_inode_page(struct page *page);
1408
1409#ifdef CONFIG_MMU
1410extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
1411 unsigned long address, unsigned int flags);
1412extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
1413 unsigned long address, unsigned int fault_flags,
1414 bool *unlocked);
1415void unmap_mapping_pages(struct address_space *mapping,
1416 pgoff_t start, pgoff_t nr, bool even_cows);
1417void unmap_mapping_range(struct address_space *mapping,
1418 loff_t const holebegin, loff_t const holelen, int even_cows);
1419#else
1420static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
1421 unsigned long address, unsigned int flags)
1422{
1423
1424 BUG();
1425 return VM_FAULT_SIGBUS;
1426}
1427static inline int fixup_user_fault(struct task_struct *tsk,
1428 struct mm_struct *mm, unsigned long address,
1429 unsigned int fault_flags, bool *unlocked)
1430{
1431
1432 BUG();
1433 return -EFAULT;
1434}
1435static inline void unmap_mapping_pages(struct address_space *mapping,
1436 pgoff_t start, pgoff_t nr, bool even_cows) { }
1437static inline void unmap_mapping_range(struct address_space *mapping,
1438 loff_t const holebegin, loff_t const holelen, int even_cows) { }
1439#endif
1440
1441static inline void unmap_shared_mapping_range(struct address_space *mapping,
1442 loff_t const holebegin, loff_t const holelen)
1443{
1444 unmap_mapping_range(mapping, holebegin, holelen, 0);
1445}
1446
1447extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
1448 void *buf, int len, unsigned int gup_flags);
1449extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1450 void *buf, int len, unsigned int gup_flags);
1451extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
1452 unsigned long addr, void *buf, int len, unsigned int gup_flags);
1453
1454long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
1455 unsigned long start, unsigned long nr_pages,
1456 unsigned int gup_flags, struct page **pages,
1457 struct vm_area_struct **vmas, int *locked);
1458long get_user_pages(unsigned long start, unsigned long nr_pages,
1459 unsigned int gup_flags, struct page **pages,
1460 struct vm_area_struct **vmas);
1461long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
1462 unsigned int gup_flags, struct page **pages, int *locked);
1463long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
1464 struct page **pages, unsigned int gup_flags);
1465
1466int get_user_pages_fast(unsigned long start, int nr_pages,
1467 unsigned int gup_flags, struct page **pages);
1468
1469
1470struct frame_vector {
1471 unsigned int nr_allocated;
1472 unsigned int nr_frames;
1473 bool got_ref;
1474 bool is_pfns;
1475 void *ptrs[0];
1476
1477
1478};
1479
1480struct frame_vector *frame_vector_create(unsigned int nr_frames);
1481void frame_vector_destroy(struct frame_vector *vec);
1482int get_vaddr_frames(unsigned long start, unsigned int nr_pfns,
1483 unsigned int gup_flags, struct frame_vector *vec);
1484void put_vaddr_frames(struct frame_vector *vec);
1485int frame_vector_to_pages(struct frame_vector *vec);
1486void frame_vector_to_pfns(struct frame_vector *vec);
1487
1488static inline unsigned int frame_vector_count(struct frame_vector *vec)
1489{
1490 return vec->nr_frames;
1491}
1492
1493static inline struct page **frame_vector_pages(struct frame_vector *vec)
1494{
1495 if (vec->is_pfns) {
1496 int err = frame_vector_to_pages(vec);
1497
1498 if (err)
1499 return ERR_PTR(err);
1500 }
1501 return (struct page **)(vec->ptrs);
1502}
1503
1504static inline unsigned long *frame_vector_pfns(struct frame_vector *vec)
1505{
1506 if (!vec->is_pfns)
1507 frame_vector_to_pfns(vec);
1508 return (unsigned long *)(vec->ptrs);
1509}
1510
1511struct kvec;
1512int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
1513 struct page **pages);
1514int get_kernel_page(unsigned long start, int write, struct page **pages);
1515struct page *get_dump_page(unsigned long addr);
1516
1517extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
1518extern void do_invalidatepage(struct page *page, unsigned int offset,
1519 unsigned int length);
1520
1521void __set_page_dirty(struct page *, struct address_space *, int warn);
1522int __set_page_dirty_nobuffers(struct page *page);
1523int __set_page_dirty_no_writeback(struct page *page);
1524int redirty_page_for_writepage(struct writeback_control *wbc,
1525 struct page *page);
1526void account_page_dirtied(struct page *page, struct address_space *mapping);
1527void account_page_cleaned(struct page *page, struct address_space *mapping,
1528 struct bdi_writeback *wb);
1529int set_page_dirty(struct page *page);
1530int set_page_dirty_lock(struct page *page);
1531void __cancel_dirty_page(struct page *page);
1532static inline void cancel_dirty_page(struct page *page)
1533{
1534
1535 if (PageDirty(page))
1536 __cancel_dirty_page(page);
1537}
1538int clear_page_dirty_for_io(struct page *page);
1539
1540int get_cmdline(struct task_struct *task, char *buffer, int buflen);
1541
1542static inline bool vma_is_anonymous(struct vm_area_struct *vma)
1543{
1544 return !vma->vm_ops;
1545}
1546
1547#ifdef CONFIG_SHMEM
1548
1549
1550
1551
1552bool vma_is_shmem(struct vm_area_struct *vma);
1553#else
1554static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
1555#endif
1556
1557int vma_is_stack_for_current(struct vm_area_struct *vma);
1558
1559extern unsigned long move_page_tables(struct vm_area_struct *vma,
1560 unsigned long old_addr, struct vm_area_struct *new_vma,
1561 unsigned long new_addr, unsigned long len,
1562 bool need_rmap_locks);
1563extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
1564 unsigned long end, pgprot_t newprot,
1565 int dirty_accountable, int prot_numa);
1566extern int mprotect_fixup(struct vm_area_struct *vma,
1567 struct vm_area_struct **pprev, unsigned long start,
1568 unsigned long end, unsigned long newflags);
1569
1570
1571
1572
1573int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1574 struct page **pages);
1575
1576
1577
1578static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
1579{
1580 long val = atomic_long_read(&mm->rss_stat.count[member]);
1581
1582#ifdef SPLIT_RSS_COUNTING
1583
1584
1585
1586
1587 if (val < 0)
1588 val = 0;
1589#endif
1590 return (unsigned long)val;
1591}
1592
1593static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1594{
1595 atomic_long_add(value, &mm->rss_stat.count[member]);
1596}
1597
1598static inline void inc_mm_counter(struct mm_struct *mm, int member)
1599{
1600 atomic_long_inc(&mm->rss_stat.count[member]);
1601}
1602
1603static inline void dec_mm_counter(struct mm_struct *mm, int member)
1604{
1605 atomic_long_dec(&mm->rss_stat.count[member]);
1606}
1607
1608
1609static inline int mm_counter_file(struct page *page)
1610{
1611 if (PageSwapBacked(page))
1612 return MM_SHMEMPAGES;
1613 return MM_FILEPAGES;
1614}
1615
1616static inline int mm_counter(struct page *page)
1617{
1618 if (PageAnon(page))
1619 return MM_ANONPAGES;
1620 return mm_counter_file(page);
1621}
1622
1623static inline unsigned long get_mm_rss(struct mm_struct *mm)
1624{
1625 return get_mm_counter(mm, MM_FILEPAGES) +
1626 get_mm_counter(mm, MM_ANONPAGES) +
1627 get_mm_counter(mm, MM_SHMEMPAGES);
1628}
1629
1630static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
1631{
1632 return max(mm->hiwater_rss, get_mm_rss(mm));
1633}
1634
1635static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
1636{
1637 return max(mm->hiwater_vm, mm->total_vm);
1638}
1639
1640static inline void update_hiwater_rss(struct mm_struct *mm)
1641{
1642 unsigned long _rss = get_mm_rss(mm);
1643
1644 if ((mm)->hiwater_rss < _rss)
1645 (mm)->hiwater_rss = _rss;
1646}
1647
1648static inline void update_hiwater_vm(struct mm_struct *mm)
1649{
1650 if (mm->hiwater_vm < mm->total_vm)
1651 mm->hiwater_vm = mm->total_vm;
1652}
1653
1654static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
1655{
1656 mm->hiwater_rss = get_mm_rss(mm);
1657}
1658
1659static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
1660 struct mm_struct *mm)
1661{
1662 unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
1663
1664 if (*maxrss < hiwater_rss)
1665 *maxrss = hiwater_rss;
1666}
1667
1668#if defined(SPLIT_RSS_COUNTING)
1669void sync_mm_rss(struct mm_struct *mm);
1670#else
1671static inline void sync_mm_rss(struct mm_struct *mm)
1672{
1673}
1674#endif
1675
1676#ifndef __HAVE_ARCH_PTE_DEVMAP
1677static inline int pte_devmap(pte_t pte)
1678{
1679 return 0;
1680}
1681#endif
1682
1683int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
1684
1685extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1686 spinlock_t **ptl);
1687static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
1688 spinlock_t **ptl)
1689{
1690 pte_t *ptep;
1691 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
1692 return ptep;
1693}
1694
1695#ifdef __PAGETABLE_P4D_FOLDED
1696static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
1697 unsigned long address)
1698{
1699 return 0;
1700}
1701#else
1702int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
1703#endif
1704
1705#if defined(__PAGETABLE_PUD_FOLDED) || !defined(CONFIG_MMU)
1706static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d,
1707 unsigned long address)
1708{
1709 return 0;
1710}
1711static inline void mm_inc_nr_puds(struct mm_struct *mm) {}
1712static inline void mm_dec_nr_puds(struct mm_struct *mm) {}
1713
1714#else
1715int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
1716
1717static inline void mm_inc_nr_puds(struct mm_struct *mm)
1718{
1719 if (mm_pud_folded(mm))
1720 return;
1721 atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
1722}
1723
1724static inline void mm_dec_nr_puds(struct mm_struct *mm)
1725{
1726 if (mm_pud_folded(mm))
1727 return;
1728 atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
1729}
1730#endif
1731
1732#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
1733static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
1734 unsigned long address)
1735{
1736 return 0;
1737}
1738
1739static inline void mm_inc_nr_pmds(struct mm_struct *mm) {}
1740static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
1741
1742#else
1743int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
1744
1745static inline void mm_inc_nr_pmds(struct mm_struct *mm)
1746{
1747 if (mm_pmd_folded(mm))
1748 return;
1749 atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
1750}
1751
1752static inline void mm_dec_nr_pmds(struct mm_struct *mm)
1753{
1754 if (mm_pmd_folded(mm))
1755 return;
1756 atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
1757}
1758#endif
1759
1760#ifdef CONFIG_MMU
1761static inline void mm_pgtables_bytes_init(struct mm_struct *mm)
1762{
1763 atomic_long_set(&mm->pgtables_bytes, 0);
1764}
1765
1766static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
1767{
1768 return atomic_long_read(&mm->pgtables_bytes);
1769}
1770
1771static inline void mm_inc_nr_ptes(struct mm_struct *mm)
1772{
1773 atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
1774}
1775
1776static inline void mm_dec_nr_ptes(struct mm_struct *mm)
1777{
1778 atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
1779}
1780#else
1781
1782static inline void mm_pgtables_bytes_init(struct mm_struct *mm) {}
1783static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
1784{
1785 return 0;
1786}
1787
1788static inline void mm_inc_nr_ptes(struct mm_struct *mm) {}
1789static inline void mm_dec_nr_ptes(struct mm_struct *mm) {}
1790#endif
1791
1792int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address);
1793int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
1794
1795
1796
1797
1798
1799#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
1800
1801#ifndef __ARCH_HAS_5LEVEL_HACK
1802static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
1803 unsigned long address)
1804{
1805 return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ?
1806 NULL : p4d_offset(pgd, address);
1807}
1808
1809static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
1810 unsigned long address)
1811{
1812 return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ?
1813 NULL : pud_offset(p4d, address);
1814}
1815#endif
1816
1817static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
1818{
1819 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
1820 NULL: pmd_offset(pud, address);
1821}
1822#endif
1823
1824#if USE_SPLIT_PTE_PTLOCKS
1825#if ALLOC_SPLIT_PTLOCKS
1826void __init ptlock_cache_init(void);
1827extern bool ptlock_alloc(struct page *page);
1828extern void ptlock_free(struct page *page);
1829
1830static inline spinlock_t *ptlock_ptr(struct page *page)
1831{
1832 return page->ptl;
1833}
1834#else
1835static inline void ptlock_cache_init(void)
1836{
1837}
1838
1839static inline bool ptlock_alloc(struct page *page)
1840{
1841 return true;
1842}
1843
1844static inline void ptlock_free(struct page *page)
1845{
1846}
1847
1848static inline spinlock_t *ptlock_ptr(struct page *page)
1849{
1850 return &page->ptl;
1851}
1852#endif
1853
1854static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1855{
1856 return ptlock_ptr(pmd_page(*pmd));
1857}
1858
1859static inline bool ptlock_init(struct page *page)
1860{
1861
1862
1863
1864
1865
1866
1867
1868 VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
1869 if (!ptlock_alloc(page))
1870 return false;
1871 spin_lock_init(ptlock_ptr(page));
1872 return true;
1873}
1874
1875
1876static inline void pte_lock_deinit(struct page *page)
1877{
1878 page->mapping = NULL;
1879 ptlock_free(page);
1880}
1881
1882#else
1883
1884
1885
1886static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1887{
1888 return &mm->page_table_lock;
1889}
1890static inline void ptlock_cache_init(void) {}
1891static inline bool ptlock_init(struct page *page) { return true; }
1892static inline void pte_lock_deinit(struct page *page) {}
1893#endif
1894
1895static inline void pgtable_init(void)
1896{
1897 ptlock_cache_init();
1898 pgtable_cache_init();
1899}
1900
1901static inline bool pgtable_page_ctor(struct page *page)
1902{
1903 if (!ptlock_init(page))
1904 return false;
1905 __SetPageTable(page);
1906 inc_zone_page_state(page, NR_PAGETABLE);
1907 return true;
1908}
1909
1910static inline void pgtable_page_dtor(struct page *page)
1911{
1912 pte_lock_deinit(page);
1913 __ClearPageTable(page);
1914 dec_zone_page_state(page, NR_PAGETABLE);
1915}
1916
1917#define pte_offset_map_lock(mm, pmd, address, ptlp) \
1918({ \
1919 spinlock_t *__ptl = pte_lockptr(mm, pmd); \
1920 pte_t *__pte = pte_offset_map(pmd, address); \
1921 *(ptlp) = __ptl; \
1922 spin_lock(__ptl); \
1923 __pte; \
1924})
1925
1926#define pte_unmap_unlock(pte, ptl) do { \
1927 spin_unlock(ptl); \
1928 pte_unmap(pte); \
1929} while (0)
1930
1931#define pte_alloc(mm, pmd, address) \
1932 (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd, address))
1933
1934#define pte_alloc_map(mm, pmd, address) \
1935 (pte_alloc(mm, pmd, address) ? NULL : pte_offset_map(pmd, address))
1936
1937#define pte_alloc_map_lock(mm, pmd, address, ptlp) \
1938 (pte_alloc(mm, pmd, address) ? \
1939 NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
1940
1941#define pte_alloc_kernel(pmd, address) \
1942 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
1943 NULL: pte_offset_kernel(pmd, address))
1944
1945#if USE_SPLIT_PMD_PTLOCKS
1946
1947static struct page *pmd_to_page(pmd_t *pmd)
1948{
1949 unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
1950 return virt_to_page((void *)((unsigned long) pmd & mask));
1951}
1952
1953static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
1954{
1955 return ptlock_ptr(pmd_to_page(pmd));
1956}
1957
1958static inline bool pgtable_pmd_page_ctor(struct page *page)
1959{
1960#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1961 page->pmd_huge_pte = NULL;
1962#endif
1963 return ptlock_init(page);
1964}
1965
1966static inline void pgtable_pmd_page_dtor(struct page *page)
1967{
1968#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1969 VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
1970#endif
1971 ptlock_free(page);
1972}
1973
1974#define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte)
1975
1976#else
1977
1978static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
1979{
1980 return &mm->page_table_lock;
1981}
1982
1983static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; }
1984static inline void pgtable_pmd_page_dtor(struct page *page) {}
1985
1986#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
1987
1988#endif
1989
1990static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
1991{
1992 spinlock_t *ptl = pmd_lockptr(mm, pmd);
1993 spin_lock(ptl);
1994 return ptl;
1995}
1996
1997
1998
1999
2000
2001
2002
2003static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud)
2004{
2005 return &mm->page_table_lock;
2006}
2007
2008static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
2009{
2010 spinlock_t *ptl = pud_lockptr(mm, pud);
2011
2012 spin_lock(ptl);
2013 return ptl;
2014}
2015
2016extern void __init pagecache_init(void);
2017extern void free_area_init(unsigned long * zones_size);
2018extern void free_area_init_node(int nid, unsigned long * zones_size,
2019 unsigned long zone_start_pfn, unsigned long *zholes_size);
2020extern void free_initmem(void);
2021
2022
2023
2024
2025
2026
2027
2028extern unsigned long free_reserved_area(void *start, void *end,
2029 int poison, char *s);
2030
2031#ifdef CONFIG_HIGHMEM
2032
2033
2034
2035
2036extern void free_highmem_page(struct page *page);
2037#endif
2038
2039extern void adjust_managed_page_count(struct page *page, long count);
2040extern void mem_init_print_info(const char *str);
2041
2042extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end);
2043
2044
2045static inline void __free_reserved_page(struct page *page)
2046{
2047 ClearPageReserved(page);
2048 init_page_count(page);
2049 __free_page(page);
2050}
2051
2052static inline void free_reserved_page(struct page *page)
2053{
2054 __free_reserved_page(page);
2055 adjust_managed_page_count(page, 1);
2056}
2057
2058static inline void mark_page_reserved(struct page *page)
2059{
2060 SetPageReserved(page);
2061 adjust_managed_page_count(page, -1);
2062}
2063
2064
2065
2066
2067
2068
2069
2070static inline unsigned long free_initmem_default(int poison)
2071{
2072 extern char __init_begin[], __init_end[];
2073
2074 return free_reserved_area(&__init_begin, &__init_end,
2075 poison, "unused kernel");
2076}
2077
2078static inline unsigned long get_num_physpages(void)
2079{
2080 int nid;
2081 unsigned long phys_pages = 0;
2082
2083 for_each_online_node(nid)
2084 phys_pages += node_present_pages(nid);
2085
2086 return phys_pages;
2087}
2088
2089#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116extern void free_area_init_nodes(unsigned long *max_zone_pfn);
2117unsigned long node_map_pfn_alignment(void);
2118unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
2119 unsigned long end_pfn);
2120extern unsigned long absent_pages_in_range(unsigned long start_pfn,
2121 unsigned long end_pfn);
2122extern void get_pfn_range_for_nid(unsigned int nid,
2123 unsigned long *start_pfn, unsigned long *end_pfn);
2124extern unsigned long find_min_pfn_with_active_regions(void);
2125extern void free_bootmem_with_active_regions(int nid,
2126 unsigned long max_low_pfn);
2127extern void sparse_memory_present_with_active_regions(int nid);
2128
2129#endif
2130
2131#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
2132 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
2133static inline int __early_pfn_to_nid(unsigned long pfn,
2134 struct mminit_pfnnid_cache *state)
2135{
2136 return 0;
2137}
2138#else
2139
2140extern int __meminit early_pfn_to_nid(unsigned long pfn);
2141
2142extern int __meminit __early_pfn_to_nid(unsigned long pfn,
2143 struct mminit_pfnnid_cache *state);
2144#endif
2145
2146#if defined(CONFIG_HAVE_MEMBLOCK) && !defined(CONFIG_FLAT_NODE_MEM_MAP)
2147void zero_resv_unavail(void);
2148#else
2149static inline void zero_resv_unavail(void) {}
2150#endif
2151
2152extern void set_dma_reserve(unsigned long new_dma_reserve);
2153extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long,
2154 enum memmap_context, struct vmem_altmap *);
2155extern void setup_per_zone_wmarks(void);
2156extern int __meminit init_per_zone_wmark_min(void);
2157extern void mem_init(void);
2158extern void __init mmap_init(void);
2159extern void show_mem(unsigned int flags, nodemask_t *nodemask);
2160extern long si_mem_available(void);
2161extern void si_meminfo(struct sysinfo * val);
2162extern void si_meminfo_node(struct sysinfo *val, int nid);
2163#ifdef __HAVE_ARCH_RESERVED_KERNEL_PAGES
2164extern unsigned long arch_reserved_kernel_pages(void);
2165#endif
2166
2167extern __printf(3, 4)
2168void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);
2169
2170extern void setup_per_cpu_pageset(void);
2171
2172extern void zone_pcp_update(struct zone *zone);
2173extern void zone_pcp_reset(struct zone *zone);
2174
2175
2176extern int min_free_kbytes;
2177extern int watermark_scale_factor;
2178
2179
2180extern atomic_long_t mmap_pages_allocated;
2181extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
2182
2183
2184void vma_interval_tree_insert(struct vm_area_struct *node,
2185 struct rb_root_cached *root);
2186void vma_interval_tree_insert_after(struct vm_area_struct *node,
2187 struct vm_area_struct *prev,
2188 struct rb_root_cached *root);
2189void vma_interval_tree_remove(struct vm_area_struct *node,
2190 struct rb_root_cached *root);
2191struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root,
2192 unsigned long start, unsigned long last);
2193struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
2194 unsigned long start, unsigned long last);
2195
2196#define vma_interval_tree_foreach(vma, root, start, last) \
2197 for (vma = vma_interval_tree_iter_first(root, start, last); \
2198 vma; vma = vma_interval_tree_iter_next(vma, start, last))
2199
2200void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
2201 struct rb_root_cached *root);
2202void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
2203 struct rb_root_cached *root);
2204struct anon_vma_chain *
2205anon_vma_interval_tree_iter_first(struct rb_root_cached *root,
2206 unsigned long start, unsigned long last);
2207struct anon_vma_chain *anon_vma_interval_tree_iter_next(
2208 struct anon_vma_chain *node, unsigned long start, unsigned long last);
2209#ifdef CONFIG_DEBUG_VM_RB
2210void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
2211#endif
2212
2213#define anon_vma_interval_tree_foreach(avc, root, start, last) \
2214 for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
2215 avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
2216
2217
2218extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
2219extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
2220 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
2221 struct vm_area_struct *expand);
2222static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
2223 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
2224{
2225 return __vma_adjust(vma, start, end, pgoff, insert, NULL);
2226}
2227extern struct vm_area_struct *vma_merge(struct mm_struct *,
2228 struct vm_area_struct *prev, unsigned long addr, unsigned long end,
2229 unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
2230 struct mempolicy *, struct vm_userfaultfd_ctx);
2231extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
2232extern int __split_vma(struct mm_struct *, struct vm_area_struct *,
2233 unsigned long addr, int new_below);
2234extern int split_vma(struct mm_struct *, struct vm_area_struct *,
2235 unsigned long addr, int new_below);
2236extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
2237extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
2238 struct rb_node **, struct rb_node *);
2239extern void unlink_file_vma(struct vm_area_struct *);
2240extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
2241 unsigned long addr, unsigned long len, pgoff_t pgoff,
2242 bool *need_rmap_locks);
2243extern void exit_mmap(struct mm_struct *);
2244
2245static inline int check_data_rlimit(unsigned long rlim,
2246 unsigned long new,
2247 unsigned long start,
2248 unsigned long end_data,
2249 unsigned long start_data)
2250{
2251 if (rlim < RLIM_INFINITY) {
2252 if (((new - start) + (end_data - start_data)) > rlim)
2253 return -ENOSPC;
2254 }
2255
2256 return 0;
2257}
2258
2259extern int mm_take_all_locks(struct mm_struct *mm);
2260extern void mm_drop_all_locks(struct mm_struct *mm);
2261
2262extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
2263extern struct file *get_mm_exe_file(struct mm_struct *mm);
2264extern struct file *get_task_exe_file(struct task_struct *task);
2265
2266extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
2267extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
2268
2269extern bool vma_is_special_mapping(const struct vm_area_struct *vma,
2270 const struct vm_special_mapping *sm);
2271extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
2272 unsigned long addr, unsigned long len,
2273 unsigned long flags,
2274 const struct vm_special_mapping *spec);
2275
2276extern int install_special_mapping(struct mm_struct *mm,
2277 unsigned long addr, unsigned long len,
2278 unsigned long flags, struct page **pages);
2279
2280extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
2281
2282extern unsigned long mmap_region(struct file *file, unsigned long addr,
2283 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
2284 struct list_head *uf);
2285extern unsigned long do_mmap(struct file *file, unsigned long addr,
2286 unsigned long len, unsigned long prot, unsigned long flags,
2287 vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
2288 struct list_head *uf);
2289extern int do_munmap(struct mm_struct *, unsigned long, size_t,
2290 struct list_head *uf);
2291
2292static inline unsigned long
2293do_mmap_pgoff(struct file *file, unsigned long addr,
2294 unsigned long len, unsigned long prot, unsigned long flags,
2295 unsigned long pgoff, unsigned long *populate,
2296 struct list_head *uf)
2297{
2298 return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate, uf);
2299}
2300
2301#ifdef CONFIG_MMU
2302extern int __mm_populate(unsigned long addr, unsigned long len,
2303 int ignore_errors);
2304static inline void mm_populate(unsigned long addr, unsigned long len)
2305{
2306
2307 (void) __mm_populate(addr, len, 1);
2308}
2309#else
2310static inline void mm_populate(unsigned long addr, unsigned long len) {}
2311#endif
2312
2313
2314extern int __must_check vm_brk(unsigned long, unsigned long);
2315extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long);
2316extern int vm_munmap(unsigned long, size_t);
2317extern unsigned long __must_check vm_mmap(struct file *, unsigned long,
2318 unsigned long, unsigned long,
2319 unsigned long, unsigned long);
2320
2321struct vm_unmapped_area_info {
2322#define VM_UNMAPPED_AREA_TOPDOWN 1
2323 unsigned long flags;
2324 unsigned long length;
2325 unsigned long low_limit;
2326 unsigned long high_limit;
2327 unsigned long align_mask;
2328 unsigned long align_offset;
2329};
2330
2331extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
2332extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343static inline unsigned long
2344vm_unmapped_area(struct vm_unmapped_area_info *info)
2345{
2346 if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
2347 return unmapped_area_topdown(info);
2348 else
2349 return unmapped_area(info);
2350}
2351
2352
2353extern void truncate_inode_pages(struct address_space *, loff_t);
2354extern void truncate_inode_pages_range(struct address_space *,
2355 loff_t lstart, loff_t lend);
2356extern void truncate_inode_pages_final(struct address_space *);
2357
2358
2359extern vm_fault_t filemap_fault(struct vm_fault *vmf);
2360extern void filemap_map_pages(struct vm_fault *vmf,
2361 pgoff_t start_pgoff, pgoff_t end_pgoff);
2362extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
2363
2364
2365int __must_check write_one_page(struct page *page);
2366void task_dirty_inc(struct task_struct *tsk);
2367
2368
2369#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE)
2370
2371int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
2372 pgoff_t offset, unsigned long nr_to_read);
2373
2374void page_cache_sync_readahead(struct address_space *mapping,
2375 struct file_ra_state *ra,
2376 struct file *filp,
2377 pgoff_t offset,
2378 unsigned long size);
2379
2380void page_cache_async_readahead(struct address_space *mapping,
2381 struct file_ra_state *ra,
2382 struct file *filp,
2383 struct page *pg,
2384 pgoff_t offset,
2385 unsigned long size);
2386
2387extern unsigned long stack_guard_gap;
2388
2389extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
2390
2391
2392extern int expand_downwards(struct vm_area_struct *vma,
2393 unsigned long address);
2394#if VM_GROWSUP
2395extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
2396#else
2397 #define expand_upwards(vma, address) (0)
2398#endif
2399
2400
2401extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
2402extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
2403 struct vm_area_struct **pprev);
2404
2405
2406
2407static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
2408{
2409 struct vm_area_struct * vma = find_vma(mm,start_addr);
2410
2411 if (vma && end_addr <= vma->vm_start)
2412 vma = NULL;
2413 return vma;
2414}
2415
2416static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
2417{
2418 unsigned long vm_start = vma->vm_start;
2419
2420 if (vma->vm_flags & VM_GROWSDOWN) {
2421 vm_start -= stack_guard_gap;
2422 if (vm_start > vma->vm_start)
2423 vm_start = 0;
2424 }
2425 return vm_start;
2426}
2427
2428static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
2429{
2430 unsigned long vm_end = vma->vm_end;
2431
2432 if (vma->vm_flags & VM_GROWSUP) {
2433 vm_end += stack_guard_gap;
2434 if (vm_end < vma->vm_end)
2435 vm_end = -PAGE_SIZE;
2436 }
2437 return vm_end;
2438}
2439
2440static inline unsigned long vma_pages(struct vm_area_struct *vma)
2441{
2442 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
2443}
2444
2445
2446static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
2447 unsigned long vm_start, unsigned long vm_end)
2448{
2449 struct vm_area_struct *vma = find_vma(mm, vm_start);
2450
2451 if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
2452 vma = NULL;
2453
2454 return vma;
2455}
2456
2457static inline bool range_in_vma(struct vm_area_struct *vma,
2458 unsigned long start, unsigned long end)
2459{
2460 return (vma && vma->vm_start <= start && end <= vma->vm_end);
2461}
2462
2463#ifdef CONFIG_MMU
2464pgprot_t vm_get_page_prot(unsigned long vm_flags);
2465void vma_set_page_prot(struct vm_area_struct *vma);
2466#else
2467static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
2468{
2469 return __pgprot(0);
2470}
2471static inline void vma_set_page_prot(struct vm_area_struct *vma)
2472{
2473 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2474}
2475#endif
2476
2477#ifdef CONFIG_NUMA_BALANCING
2478unsigned long change_prot_numa(struct vm_area_struct *vma,
2479 unsigned long start, unsigned long end);
2480#endif
2481
2482struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
2483int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
2484 unsigned long pfn, unsigned long size, pgprot_t);
2485int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
2486int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2487 unsigned long num);
2488int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
2489 unsigned long num);
2490vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2491 unsigned long pfn);
2492vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2493 unsigned long pfn, pgprot_t pgprot);
2494vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2495 pfn_t pfn);
2496vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
2497 unsigned long addr, pfn_t pfn);
2498int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
2499
2500static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
2501 unsigned long addr, struct page *page)
2502{
2503 int err = vm_insert_page(vma, addr, page);
2504
2505 if (err == -ENOMEM)
2506 return VM_FAULT_OOM;
2507 if (err < 0 && err != -EBUSY)
2508 return VM_FAULT_SIGBUS;
2509
2510 return VM_FAULT_NOPAGE;
2511}
2512
2513static inline vm_fault_t vmf_error(int err)
2514{
2515 if (err == -ENOMEM)
2516 return VM_FAULT_OOM;
2517 return VM_FAULT_SIGBUS;
2518}
2519
2520struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
2521 unsigned int foll_flags);
2522
2523#define FOLL_WRITE 0x01
2524#define FOLL_TOUCH 0x02
2525#define FOLL_GET 0x04
2526#define FOLL_DUMP 0x08
2527#define FOLL_FORCE 0x10
2528#define FOLL_NOWAIT 0x20
2529
2530#define FOLL_POPULATE 0x40
2531#define FOLL_SPLIT 0x80
2532#define FOLL_HWPOISON 0x100
2533#define FOLL_NUMA 0x200
2534#define FOLL_MIGRATION 0x400
2535#define FOLL_TRIED 0x800
2536#define FOLL_MLOCK 0x1000
2537#define FOLL_REMOTE 0x2000
2538#define FOLL_COW 0x4000
2539#define FOLL_ANON 0x8000
2540#define FOLL_LONGTERM 0x10000
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
2570{
2571 if (vm_fault & VM_FAULT_OOM)
2572 return -ENOMEM;
2573 if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
2574 return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT;
2575 if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
2576 return -EFAULT;
2577 return 0;
2578}
2579
2580typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data);
2581extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
2582 unsigned long size, pte_fn_t fn, void *data);
2583
2584
2585#ifdef CONFIG_PAGE_POISONING
2586extern bool page_poisoning_enabled(void);
2587extern void kernel_poison_pages(struct page *page, int numpages, int enable);
2588#else
2589static inline bool page_poisoning_enabled(void) { return false; }
2590static inline void kernel_poison_pages(struct page *page, int numpages,
2591 int enable) { }
2592#endif
2593
2594extern bool _debug_pagealloc_enabled;
2595
2596static inline bool debug_pagealloc_enabled(void)
2597{
2598 return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) && _debug_pagealloc_enabled;
2599}
2600
2601#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP)
2602extern void __kernel_map_pages(struct page *page, int numpages, int enable);
2603
2604static inline void
2605kernel_map_pages(struct page *page, int numpages, int enable)
2606{
2607 __kernel_map_pages(page, numpages, enable);
2608}
2609#ifdef CONFIG_HIBERNATION
2610extern bool kernel_page_present(struct page *page);
2611#endif
2612#else
2613static inline void
2614kernel_map_pages(struct page *page, int numpages, int enable) {}
2615#ifdef CONFIG_HIBERNATION
2616static inline bool kernel_page_present(struct page *page) { return true; }
2617#endif
2618#endif
2619
2620#ifdef __HAVE_ARCH_GATE_AREA
2621extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
2622extern int in_gate_area_no_mm(unsigned long addr);
2623extern int in_gate_area(struct mm_struct *mm, unsigned long addr);
2624#else
2625static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
2626{
2627 return NULL;
2628}
2629static inline int in_gate_area_no_mm(unsigned long addr) { return 0; }
2630static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
2631{
2632 return 0;
2633}
2634#endif
2635
2636extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);
2637
2638#ifdef CONFIG_SYSCTL
2639extern int sysctl_drop_caches;
2640int drop_caches_sysctl_handler(struct ctl_table *, int,
2641 void __user *, size_t *, loff_t *);
2642#endif
2643
2644void drop_slab(void);
2645void drop_slab_node(int nid);
2646
2647#ifndef CONFIG_MMU
2648#define randomize_va_space 0
2649#else
2650extern int randomize_va_space;
2651#endif
2652
2653const char * arch_vma_name(struct vm_area_struct *vma);
2654void print_vma_addr(char *prefix, unsigned long rip);
2655
2656void *sparse_buffer_alloc(unsigned long size);
2657struct page * __populate_section_memmap(unsigned long pfn,
2658 unsigned long nr_pages, int nid, struct vmem_altmap *altmap);
2659pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
2660p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
2661pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
2662pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
2663pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
2664void *vmemmap_alloc_block(unsigned long size, int node);
2665struct vmem_altmap;
2666void *vmemmap_alloc_block_buf(unsigned long size, int node);
2667void *altmap_alloc_block_buf(unsigned long size, struct vmem_altmap *altmap);
2668void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
2669int vmemmap_populate_basepages(unsigned long start, unsigned long end,
2670 int node);
2671int vmemmap_populate(unsigned long start, unsigned long end, int node,
2672 struct vmem_altmap *altmap);
2673void vmemmap_populate_print_last(void);
2674#ifdef CONFIG_MEMORY_HOTPLUG
2675void vmemmap_free(unsigned long start, unsigned long end,
2676 struct vmem_altmap *altmap);
2677#endif
2678void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
2679 unsigned long nr_pages);
2680
2681enum mf_flags {
2682 MF_COUNT_INCREASED = 1 << 0,
2683 MF_ACTION_REQUIRED = 1 << 1,
2684 MF_MUST_KILL = 1 << 2,
2685 MF_SOFT_OFFLINE = 1 << 3,
2686};
2687extern int memory_failure(unsigned long pfn, int flags);
2688extern void memory_failure_queue(unsigned long pfn, int flags);
2689extern int unpoison_memory(unsigned long pfn);
2690extern int get_hwpoison_page(struct page *page);
2691#define put_hwpoison_page(page) put_page(page)
2692extern int sysctl_memory_failure_early_kill;
2693extern int sysctl_memory_failure_recovery;
2694extern void shake_page(struct page *p, int access);
2695extern atomic_long_t num_poisoned_pages __read_mostly;
2696extern int soft_offline_page(struct page *page, int flags);
2697
2698
2699
2700
2701
2702enum mf_result {
2703 MF_IGNORED,
2704 MF_FAILED,
2705 MF_DELAYED,
2706 MF_RECOVERED,
2707};
2708
2709enum mf_action_page_type {
2710 MF_MSG_KERNEL,
2711 MF_MSG_KERNEL_HIGH_ORDER,
2712 MF_MSG_SLAB,
2713 MF_MSG_DIFFERENT_COMPOUND,
2714 MF_MSG_POISONED_HUGE,
2715 MF_MSG_HUGE,
2716 MF_MSG_FREE_HUGE,
2717 MF_MSG_NON_PMD_HUGE,
2718 MF_MSG_UNMAP_FAILED,
2719 MF_MSG_DIRTY_SWAPCACHE,
2720 MF_MSG_CLEAN_SWAPCACHE,
2721 MF_MSG_DIRTY_MLOCKED_LRU,
2722 MF_MSG_CLEAN_MLOCKED_LRU,
2723 MF_MSG_DIRTY_UNEVICTABLE_LRU,
2724 MF_MSG_CLEAN_UNEVICTABLE_LRU,
2725 MF_MSG_DIRTY_LRU,
2726 MF_MSG_CLEAN_LRU,
2727 MF_MSG_TRUNCATED_LRU,
2728 MF_MSG_BUDDY,
2729 MF_MSG_BUDDY_2ND,
2730 MF_MSG_DAX,
2731 MF_MSG_UNKNOWN,
2732};
2733
2734#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
2735extern void clear_huge_page(struct page *page,
2736 unsigned long addr_hint,
2737 unsigned int pages_per_huge_page);
2738extern void copy_user_huge_page(struct page *dst, struct page *src,
2739 unsigned long addr, struct vm_area_struct *vma,
2740 unsigned int pages_per_huge_page);
2741extern long copy_huge_page_from_user(struct page *dst_page,
2742 const void __user *usr_src,
2743 unsigned int pages_per_huge_page,
2744 bool allow_pagefault);
2745#endif
2746
2747extern struct page_ext_operations debug_guardpage_ops;
2748
2749#ifdef CONFIG_DEBUG_PAGEALLOC
2750extern unsigned int _debug_guardpage_minorder;
2751extern bool _debug_guardpage_enabled;
2752
2753static inline unsigned int debug_guardpage_minorder(void)
2754{
2755 return _debug_guardpage_minorder;
2756}
2757
2758static inline bool debug_guardpage_enabled(void)
2759{
2760 return _debug_guardpage_enabled;
2761}
2762
2763static inline bool page_is_guard(struct page *page)
2764{
2765 struct page_ext *page_ext;
2766
2767 if (!debug_guardpage_enabled())
2768 return false;
2769
2770 page_ext = lookup_page_ext(page);
2771 if (unlikely(!page_ext))
2772 return false;
2773
2774 return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
2775}
2776#else
2777static inline unsigned int debug_guardpage_minorder(void) { return 0; }
2778static inline bool debug_guardpage_enabled(void) { return false; }
2779static inline bool page_is_guard(struct page *page) { return false; }
2780#endif
2781
2782#if MAX_NUMNODES > 1
2783void __init setup_nr_node_ids(void);
2784#else
2785static inline void setup_nr_node_ids(void) {}
2786#endif
2787
2788#endif
2789#endif
2790