1
2#ifndef _LINUX_MM_H
3#define _LINUX_MM_H
4
5#include <linux/errno.h>
6
7#ifdef __KERNEL__
8
9#include <linux/mmdebug.h>
10#include <linux/gfp.h>
11#include <linux/bug.h>
12#include <linux/list.h>
13#include <linux/mmzone.h>
14#include <linux/rbtree.h>
15#include <linux/atomic.h>
16#include <linux/debug_locks.h>
17#include <linux/mm_types.h>
18#include <linux/range.h>
19#include <linux/pfn.h>
20#include <linux/percpu-refcount.h>
21#include <linux/bit_spinlock.h>
22#include <linux/shrinker.h>
23#include <linux/resource.h>
24#include <linux/page_ext.h>
25#include <linux/err.h>
26#include <linux/page_ref.h>
27#include <linux/memremap.h>
28#include <linux/overflow.h>
29
30struct mempolicy;
31struct anon_vma;
32struct anon_vma_chain;
33struct file_ra_state;
34struct user_struct;
35struct writeback_control;
36struct bdi_writeback;
37
38void init_mm_internals(void);
39
40#ifndef CONFIG_NEED_MULTIPLE_NODES
41extern unsigned long max_mapnr;
42
43static inline void set_max_mapnr(unsigned long limit)
44{
45 max_mapnr = limit;
46}
47#else
48static inline void set_max_mapnr(unsigned long limit) { }
49#endif
50
51extern unsigned long totalram_pages;
52extern void * high_memory;
53extern int page_cluster;
54
55#ifdef CONFIG_SYSCTL
56extern int sysctl_legacy_va_layout;
57#else
58#define sysctl_legacy_va_layout 0
59#endif
60
61#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
62extern const int mmap_rnd_bits_min;
63extern const int mmap_rnd_bits_max;
64extern int mmap_rnd_bits __read_mostly;
65#endif
66#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
67extern const int mmap_rnd_compat_bits_min;
68extern const int mmap_rnd_compat_bits_max;
69extern int mmap_rnd_compat_bits __read_mostly;
70#endif
71
72#include <asm/page.h>
73#include <asm/pgtable.h>
74#include <asm/processor.h>
75
76#ifndef __pa_symbol
77#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
78#endif
79
80#ifndef page_to_virt
81#define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x)))
82#endif
83
84#ifndef lm_alias
85#define lm_alias(x) __va(__pa_symbol(x))
86#endif
87
88
89
90
91
92
93
94
95#ifndef mm_forbids_zeropage
96#define mm_forbids_zeropage(X) (0)
97#endif
98
99
100
101
102
103
104#ifndef mm_zero_struct_page
105#define mm_zero_struct_page(pp) ((void)memset((pp), 0, sizeof(struct page)))
106#endif
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124#define MAPCOUNT_ELF_CORE_MARGIN (5)
125#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
126
127extern int sysctl_max_map_count;
128
129extern unsigned long sysctl_user_reserve_kbytes;
130extern unsigned long sysctl_admin_reserve_kbytes;
131
132extern int sysctl_overcommit_memory;
133extern int sysctl_overcommit_ratio;
134extern unsigned long sysctl_overcommit_kbytes;
135
136extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *,
137 size_t *, loff_t *);
138extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
139 size_t *, loff_t *);
140
141#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
142
143
144#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
145
146
147#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
148
149
150
151
152
153
154
155
156
157
158struct vm_area_struct *vm_area_alloc(struct mm_struct *);
159struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
160void vm_area_free(struct vm_area_struct *);
161
162#ifndef CONFIG_MMU
163extern struct rb_root nommu_region_tree;
164extern struct rw_semaphore nommu_region_sem;
165
166extern unsigned int kobjsize(const void *objp);
167#endif
168
169
170
171
172
173#define VM_NONE 0x00000000
174
175#define VM_READ 0x00000001
176#define VM_WRITE 0x00000002
177#define VM_EXEC 0x00000004
178#define VM_SHARED 0x00000008
179
180
181#define VM_MAYREAD 0x00000010
182#define VM_MAYWRITE 0x00000020
183#define VM_MAYEXEC 0x00000040
184#define VM_MAYSHARE 0x00000080
185
186#define VM_GROWSDOWN 0x00000100
187#define VM_UFFD_MISSING 0x00000200
188#define VM_PFNMAP 0x00000400
189#define VM_DENYWRITE 0x00000800
190#define VM_UFFD_WP 0x00001000
191
192#define VM_LOCKED 0x00002000
193#define VM_IO 0x00004000
194
195
196#define VM_SEQ_READ 0x00008000
197#define VM_RAND_READ 0x00010000
198
199#define VM_DONTCOPY 0x00020000
200#define VM_DONTEXPAND 0x00040000
201#define VM_LOCKONFAULT 0x00080000
202#define VM_ACCOUNT 0x00100000
203#define VM_NORESERVE 0x00200000
204#define VM_HUGETLB 0x00400000
205#define VM_SYNC 0x00800000
206#define VM_ARCH_1 0x01000000
207#define VM_WIPEONFORK 0x02000000
208#define VM_DONTDUMP 0x04000000
209
210#ifdef CONFIG_MEM_SOFT_DIRTY
211# define VM_SOFTDIRTY 0x08000000
212#else
213# define VM_SOFTDIRTY 0
214#endif
215
216#define VM_MIXEDMAP 0x10000000
217#define VM_HUGEPAGE 0x20000000
218#define VM_NOHUGEPAGE 0x40000000
219#define VM_MERGEABLE 0x80000000
220
221#ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS
222#define VM_HIGH_ARCH_BIT_0 32
223#define VM_HIGH_ARCH_BIT_1 33
224#define VM_HIGH_ARCH_BIT_2 34
225#define VM_HIGH_ARCH_BIT_3 35
226#define VM_HIGH_ARCH_BIT_4 36
227#define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0)
228#define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1)
229#define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2)
230#define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3)
231#define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4)
232#endif
233
234#ifdef CONFIG_ARCH_HAS_PKEYS
235# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0
236# define VM_PKEY_BIT0 VM_HIGH_ARCH_0
237# define VM_PKEY_BIT1 VM_HIGH_ARCH_1
238# define VM_PKEY_BIT2 VM_HIGH_ARCH_2
239# define VM_PKEY_BIT3 VM_HIGH_ARCH_3
240#ifdef CONFIG_PPC
241# define VM_PKEY_BIT4 VM_HIGH_ARCH_4
242#else
243# define VM_PKEY_BIT4 0
244#endif
245#endif
246
247#if defined(CONFIG_X86)
248# define VM_PAT VM_ARCH_1
249#elif defined(CONFIG_PPC)
250# define VM_SAO VM_ARCH_1
251#elif defined(CONFIG_PARISC)
252# define VM_GROWSUP VM_ARCH_1
253#elif defined(CONFIG_IA64)
254# define VM_GROWSUP VM_ARCH_1
255#elif defined(CONFIG_SPARC64)
256# define VM_SPARC_ADI VM_ARCH_1
257# define VM_ARCH_CLEAR VM_SPARC_ADI
258#elif !defined(CONFIG_MMU)
259# define VM_MAPPED_COPY VM_ARCH_1
260#endif
261
262#if defined(CONFIG_X86_INTEL_MPX)
263
264# define VM_MPX VM_HIGH_ARCH_4
265#else
266# define VM_MPX VM_NONE
267#endif
268
269#ifndef VM_GROWSUP
270# define VM_GROWSUP VM_NONE
271#endif
272
273
274#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ)
275
276#ifndef VM_STACK_DEFAULT_FLAGS
277#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
278#endif
279
280#ifdef CONFIG_STACK_GROWSUP
281#define VM_STACK VM_GROWSUP
282#else
283#define VM_STACK VM_GROWSDOWN
284#endif
285
286#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
287
288
289
290
291
292#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
293
294
295#define VM_INIT_DEF_MASK VM_NOHUGEPAGE
296
297
298#define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT))
299
300
301#ifndef VM_ARCH_CLEAR
302# define VM_ARCH_CLEAR VM_NONE
303#endif
304#define VM_FLAGS_CLEAR (ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR)
305
306
307
308
309
310extern pgprot_t protection_map[16];
311
312#define FAULT_FLAG_WRITE 0x01
313#define FAULT_FLAG_MKWRITE 0x02
314#define FAULT_FLAG_ALLOW_RETRY 0x04
315#define FAULT_FLAG_RETRY_NOWAIT 0x08
316#define FAULT_FLAG_KILLABLE 0x10
317#define FAULT_FLAG_TRIED 0x20
318#define FAULT_FLAG_USER 0x40
319#define FAULT_FLAG_REMOTE 0x80
320#define FAULT_FLAG_INSTRUCTION 0x100
321
322#define FAULT_FLAG_TRACE \
323 { FAULT_FLAG_WRITE, "WRITE" }, \
324 { FAULT_FLAG_MKWRITE, "MKWRITE" }, \
325 { FAULT_FLAG_ALLOW_RETRY, "ALLOW_RETRY" }, \
326 { FAULT_FLAG_RETRY_NOWAIT, "RETRY_NOWAIT" }, \
327 { FAULT_FLAG_KILLABLE, "KILLABLE" }, \
328 { FAULT_FLAG_TRIED, "TRIED" }, \
329 { FAULT_FLAG_USER, "USER" }, \
330 { FAULT_FLAG_REMOTE, "REMOTE" }, \
331 { FAULT_FLAG_INSTRUCTION, "INSTRUCTION" }
332
333
334
335
336
337
338
339
340
341
342
343struct vm_fault {
344 struct vm_area_struct *vma;
345 unsigned int flags;
346 gfp_t gfp_mask;
347 pgoff_t pgoff;
348 unsigned long address;
349 pmd_t *pmd;
350
351 pud_t *pud;
352
353
354 pte_t orig_pte;
355
356 struct page *cow_page;
357 struct mem_cgroup *memcg;
358 struct page *page;
359
360
361
362
363
364 pte_t *pte;
365
366
367
368 spinlock_t *ptl;
369
370
371
372 pgtable_t prealloc_pte;
373
374
375
376
377
378
379};
380
381
382enum page_entry_size {
383 PE_SIZE_PTE = 0,
384 PE_SIZE_PMD,
385 PE_SIZE_PUD,
386};
387
388
389
390
391
392
393struct vm_operations_struct {
394 void (*open)(struct vm_area_struct * area);
395 void (*close)(struct vm_area_struct * area);
396 int (*split)(struct vm_area_struct * area, unsigned long addr);
397 int (*mremap)(struct vm_area_struct * area);
398 vm_fault_t (*fault)(struct vm_fault *vmf);
399 vm_fault_t (*huge_fault)(struct vm_fault *vmf,
400 enum page_entry_size pe_size);
401 void (*map_pages)(struct vm_fault *vmf,
402 pgoff_t start_pgoff, pgoff_t end_pgoff);
403 unsigned long (*pagesize)(struct vm_area_struct * area);
404
405
406
407 vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
408
409
410 vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
411
412
413
414
415 int (*access)(struct vm_area_struct *vma, unsigned long addr,
416 void *buf, int len, int write);
417
418
419
420
421 const char *(*name)(struct vm_area_struct *vma);
422
423#ifdef CONFIG_NUMA
424
425
426
427
428
429
430
431 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
432
433
434
435
436
437
438
439
440
441
442
443 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
444 unsigned long addr);
445#endif
446
447
448
449
450
451 struct page *(*find_special_page)(struct vm_area_struct *vma,
452 unsigned long addr);
453};
454
455static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
456{
457 static const struct vm_operations_struct dummy_vm_ops = {};
458
459 memset(vma, 0, sizeof(*vma));
460 vma->vm_mm = mm;
461 vma->vm_ops = &dummy_vm_ops;
462 INIT_LIST_HEAD(&vma->anon_vma_chain);
463}
464
465static inline void vma_set_anonymous(struct vm_area_struct *vma)
466{
467 vma->vm_ops = NULL;
468}
469
470
471#define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
472
473struct mmu_gather;
474struct inode;
475
476#define page_private(page) ((page)->private)
477#define set_page_private(page, v) ((page)->private = (v))
478
479#if !defined(__HAVE_ARCH_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
480static inline int pmd_devmap(pmd_t pmd)
481{
482 return 0;
483}
484static inline int pud_devmap(pud_t pud)
485{
486 return 0;
487}
488static inline int pgd_devmap(pgd_t pgd)
489{
490 return 0;
491}
492#endif
493
494
495
496
497
498#include <linux/page-flags.h>
499#include <linux/huge_mm.h>
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517static inline int put_page_testzero(struct page *page)
518{
519 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
520 return page_ref_dec_and_test(page);
521}
522
523
524
525
526
527
528
529static inline int get_page_unless_zero(struct page *page)
530{
531 return page_ref_add_unless(page, 1, 0);
532}
533
534extern int page_is_ram(unsigned long pfn);
535
536enum {
537 REGION_INTERSECTS,
538 REGION_DISJOINT,
539 REGION_MIXED,
540};
541
542int region_intersects(resource_size_t offset, size_t size, unsigned long flags,
543 unsigned long desc);
544
545
546struct page *vmalloc_to_page(const void *addr);
547unsigned long vmalloc_to_pfn(const void *addr);
548
549
550
551
552
553
554
555static inline bool is_vmalloc_addr(const void *x)
556{
557#ifdef CONFIG_MMU
558 unsigned long addr = (unsigned long)x;
559
560 return addr >= VMALLOC_START && addr < VMALLOC_END;
561#else
562 return false;
563#endif
564}
565#ifdef CONFIG_MMU
566extern int is_vmalloc_or_module_addr(const void *x);
567#else
568static inline int is_vmalloc_or_module_addr(const void *x)
569{
570 return 0;
571}
572#endif
573
574extern void *kvmalloc_node(size_t size, gfp_t flags, int node);
575static inline void *kvmalloc(size_t size, gfp_t flags)
576{
577 return kvmalloc_node(size, flags, NUMA_NO_NODE);
578}
579static inline void *kvzalloc_node(size_t size, gfp_t flags, int node)
580{
581 return kvmalloc_node(size, flags | __GFP_ZERO, node);
582}
583static inline void *kvzalloc(size_t size, gfp_t flags)
584{
585 return kvmalloc(size, flags | __GFP_ZERO);
586}
587
588static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
589{
590 size_t bytes;
591
592 if (unlikely(check_mul_overflow(n, size, &bytes)))
593 return NULL;
594
595 return kvmalloc(bytes, flags);
596}
597
598static inline void *kvcalloc(size_t n, size_t size, gfp_t flags)
599{
600 return kvmalloc_array(n, size, flags | __GFP_ZERO);
601}
602
603extern void kvfree(const void *addr);
604
605static inline atomic_t *compound_mapcount_ptr(struct page *page)
606{
607 return &page[1].compound_mapcount;
608}
609
610static inline int compound_mapcount(struct page *page)
611{
612 VM_BUG_ON_PAGE(!PageCompound(page), page);
613 page = compound_head(page);
614 return atomic_read(compound_mapcount_ptr(page)) + 1;
615}
616
617
618
619
620
621
622static inline void page_mapcount_reset(struct page *page)
623{
624 atomic_set(&(page)->_mapcount, -1);
625}
626
627int __page_mapcount(struct page *page);
628
629static inline int page_mapcount(struct page *page)
630{
631 VM_BUG_ON_PAGE(PageSlab(page), page);
632
633 if (unlikely(PageCompound(page)))
634 return __page_mapcount(page);
635 return atomic_read(&page->_mapcount) + 1;
636}
637
638#ifdef CONFIG_TRANSPARENT_HUGEPAGE
639int total_mapcount(struct page *page);
640int page_trans_huge_mapcount(struct page *page, int *total_mapcount);
641#else
642static inline int total_mapcount(struct page *page)
643{
644 return page_mapcount(page);
645}
646static inline int page_trans_huge_mapcount(struct page *page,
647 int *total_mapcount)
648{
649 int mapcount = page_mapcount(page);
650 if (total_mapcount)
651 *total_mapcount = mapcount;
652 return mapcount;
653}
654#endif
655
656static inline struct page *virt_to_head_page(const void *x)
657{
658 struct page *page = virt_to_page(x);
659
660 return compound_head(page);
661}
662
663void __put_page(struct page *page);
664
665void put_pages_list(struct list_head *pages);
666
667void split_page(struct page *page, unsigned int order);
668
669
670
671
672
673
674typedef void compound_page_dtor(struct page *);
675
676
677enum compound_dtor_id {
678 NULL_COMPOUND_DTOR,
679 COMPOUND_PAGE_DTOR,
680#ifdef CONFIG_HUGETLB_PAGE
681 HUGETLB_PAGE_DTOR,
682#endif
683#ifdef CONFIG_TRANSPARENT_HUGEPAGE
684 TRANSHUGE_PAGE_DTOR,
685#endif
686 NR_COMPOUND_DTORS,
687};
688extern compound_page_dtor * const compound_page_dtors[];
689
690static inline void set_compound_page_dtor(struct page *page,
691 enum compound_dtor_id compound_dtor)
692{
693 VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page);
694 page[1].compound_dtor = compound_dtor;
695}
696
697static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
698{
699 VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page);
700 return compound_page_dtors[page[1].compound_dtor];
701}
702
703static inline unsigned int compound_order(struct page *page)
704{
705 if (!PageHead(page))
706 return 0;
707 return page[1].compound_order;
708}
709
710static inline void set_compound_order(struct page *page, unsigned int order)
711{
712 page[1].compound_order = order;
713}
714
715void free_compound_page(struct page *page);
716
717#ifdef CONFIG_MMU
718
719
720
721
722
723
724static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
725{
726 if (likely(vma->vm_flags & VM_WRITE))
727 pte = pte_mkwrite(pte);
728 return pte;
729}
730
731vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
732 struct page *page);
733vm_fault_t finish_fault(struct vm_fault *vmf);
734vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
735#endif
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
804#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
805#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
806#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
807
808
809
810
811
812
813#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
814#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
815#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
816#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
817
818
819#ifdef NODE_NOT_IN_PAGE_FLAGS
820#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
821#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \
822 SECTIONS_PGOFF : ZONES_PGOFF)
823#else
824#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
825#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \
826 NODES_PGOFF : ZONES_PGOFF)
827#endif
828
829#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
830
831#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
832#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
833#endif
834
835#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
836#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
837#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
838#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
839#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
840
841static inline enum zone_type page_zonenum(const struct page *page)
842{
843 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
844}
845
846#ifdef CONFIG_ZONE_DEVICE
847static inline bool is_zone_device_page(const struct page *page)
848{
849 return page_zonenum(page) == ZONE_DEVICE;
850}
851#else
852static inline bool is_zone_device_page(const struct page *page)
853{
854 return false;
855}
856#endif
857
858#ifdef CONFIG_DEV_PAGEMAP_OPS
859void dev_pagemap_get_ops(void);
860void dev_pagemap_put_ops(void);
861void __put_devmap_managed_page(struct page *page);
862DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
863static inline bool put_devmap_managed_page(struct page *page)
864{
865 if (!static_branch_unlikely(&devmap_managed_key))
866 return false;
867 if (!is_zone_device_page(page))
868 return false;
869 switch (page->pgmap->type) {
870 case MEMORY_DEVICE_PRIVATE:
871 case MEMORY_DEVICE_PUBLIC:
872 case MEMORY_DEVICE_FS_DAX:
873 __put_devmap_managed_page(page);
874 return true;
875 default:
876 break;
877 }
878 return false;
879}
880
881static inline bool is_device_private_page(const struct page *page)
882{
883 return is_zone_device_page(page) &&
884 page->pgmap->type == MEMORY_DEVICE_PRIVATE;
885}
886
887static inline bool is_device_public_page(const struct page *page)
888{
889 return is_zone_device_page(page) &&
890 page->pgmap->type == MEMORY_DEVICE_PUBLIC;
891}
892
893#else
894static inline void dev_pagemap_get_ops(void)
895{
896}
897
898static inline void dev_pagemap_put_ops(void)
899{
900}
901
902static inline bool put_devmap_managed_page(struct page *page)
903{
904 return false;
905}
906
907static inline bool is_device_private_page(const struct page *page)
908{
909 return false;
910}
911
912static inline bool is_device_public_page(const struct page *page)
913{
914 return false;
915}
916#endif
917
918static inline void get_page(struct page *page)
919{
920 page = compound_head(page);
921
922
923
924
925 VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page);
926 page_ref_inc(page);
927}
928
929static inline void put_page(struct page *page)
930{
931 page = compound_head(page);
932
933
934
935
936
937
938
939 if (put_devmap_managed_page(page))
940 return;
941
942 if (put_page_testzero(page))
943 __put_page(page);
944}
945
946#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
947#define SECTION_IN_PAGE_FLAGS
948#endif
949
950
951
952
953
954
955
956
957
958static inline int page_zone_id(struct page *page)
959{
960 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
961}
962
963#ifdef NODE_NOT_IN_PAGE_FLAGS
964extern int page_to_nid(const struct page *page);
965#else
966static inline int page_to_nid(const struct page *page)
967{
968 struct page *p = (struct page *)page;
969
970 return (PF_POISONED_CHECK(p)->flags >> NODES_PGSHIFT) & NODES_MASK;
971}
972#endif
973
974#ifdef CONFIG_NUMA_BALANCING
975static inline int cpu_pid_to_cpupid(int cpu, int pid)
976{
977 return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
978}
979
980static inline int cpupid_to_pid(int cpupid)
981{
982 return cpupid & LAST__PID_MASK;
983}
984
985static inline int cpupid_to_cpu(int cpupid)
986{
987 return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
988}
989
990static inline int cpupid_to_nid(int cpupid)
991{
992 return cpu_to_node(cpupid_to_cpu(cpupid));
993}
994
995static inline bool cpupid_pid_unset(int cpupid)
996{
997 return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
998}
999
1000static inline bool cpupid_cpu_unset(int cpupid)
1001{
1002 return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
1003}
1004
1005static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
1006{
1007 return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
1008}
1009
1010#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
1011#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
1012static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
1013{
1014 return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
1015}
1016
1017static inline int page_cpupid_last(struct page *page)
1018{
1019 return page->_last_cpupid;
1020}
1021static inline void page_cpupid_reset_last(struct page *page)
1022{
1023 page->_last_cpupid = -1 & LAST_CPUPID_MASK;
1024}
1025#else
1026static inline int page_cpupid_last(struct page *page)
1027{
1028 return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
1029}
1030
1031extern int page_cpupid_xchg_last(struct page *page, int cpupid);
1032
1033static inline void page_cpupid_reset_last(struct page *page)
1034{
1035 page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
1036}
1037#endif
1038#else
1039static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
1040{
1041 return page_to_nid(page);
1042}
1043
1044static inline int page_cpupid_last(struct page *page)
1045{
1046 return page_to_nid(page);
1047}
1048
1049static inline int cpupid_to_nid(int cpupid)
1050{
1051 return -1;
1052}
1053
1054static inline int cpupid_to_pid(int cpupid)
1055{
1056 return -1;
1057}
1058
1059static inline int cpupid_to_cpu(int cpupid)
1060{
1061 return -1;
1062}
1063
1064static inline int cpu_pid_to_cpupid(int nid, int pid)
1065{
1066 return -1;
1067}
1068
1069static inline bool cpupid_pid_unset(int cpupid)
1070{
1071 return 1;
1072}
1073
1074static inline void page_cpupid_reset_last(struct page *page)
1075{
1076}
1077
1078static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
1079{
1080 return false;
1081}
1082#endif
1083
1084static inline struct zone *page_zone(const struct page *page)
1085{
1086 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
1087}
1088
1089static inline pg_data_t *page_pgdat(const struct page *page)
1090{
1091 return NODE_DATA(page_to_nid(page));
1092}
1093
1094#ifdef SECTION_IN_PAGE_FLAGS
1095static inline void set_page_section(struct page *page, unsigned long section)
1096{
1097 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
1098 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
1099}
1100
1101static inline unsigned long page_to_section(const struct page *page)
1102{
1103 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
1104}
1105#endif
1106
1107static inline void set_page_zone(struct page *page, enum zone_type zone)
1108{
1109 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
1110 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
1111}
1112
1113static inline void set_page_node(struct page *page, unsigned long node)
1114{
1115 page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
1116 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
1117}
1118
1119static inline void set_page_links(struct page *page, enum zone_type zone,
1120 unsigned long node, unsigned long pfn)
1121{
1122 set_page_zone(page, zone);
1123 set_page_node(page, node);
1124#ifdef SECTION_IN_PAGE_FLAGS
1125 set_page_section(page, pfn_to_section_nr(pfn));
1126#endif
1127}
1128
1129#ifdef CONFIG_MEMCG
1130static inline struct mem_cgroup *page_memcg(struct page *page)
1131{
1132 return page->mem_cgroup;
1133}
1134static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
1135{
1136 WARN_ON_ONCE(!rcu_read_lock_held());
1137 return READ_ONCE(page->mem_cgroup);
1138}
1139#else
1140static inline struct mem_cgroup *page_memcg(struct page *page)
1141{
1142 return NULL;
1143}
1144static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
1145{
1146 WARN_ON_ONCE(!rcu_read_lock_held());
1147 return NULL;
1148}
1149#endif
1150
1151
1152
1153
1154#include <linux/vmstat.h>
1155
1156static __always_inline void *lowmem_page_address(const struct page *page)
1157{
1158 return page_to_virt(page);
1159}
1160
1161#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
1162#define HASHED_PAGE_VIRTUAL
1163#endif
1164
1165#if defined(WANT_PAGE_VIRTUAL)
1166static inline void *page_address(const struct page *page)
1167{
1168 return page->virtual;
1169}
1170static inline void set_page_address(struct page *page, void *address)
1171{
1172 page->virtual = address;
1173}
1174#define page_address_init() do { } while(0)
1175#endif
1176
1177#if defined(HASHED_PAGE_VIRTUAL)
1178void *page_address(const struct page *page);
1179void set_page_address(struct page *page, void *virtual);
1180void page_address_init(void);
1181#endif
1182
1183#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
1184#define page_address(page) lowmem_page_address(page)
1185#define set_page_address(page, address) do { } while(0)
1186#define page_address_init() do { } while(0)
1187#endif
1188
1189extern void *page_rmapping(struct page *page);
1190extern struct anon_vma *page_anon_vma(struct page *page);
1191extern struct address_space *page_mapping(struct page *page);
1192
1193extern struct address_space *__page_file_mapping(struct page *);
1194
1195static inline
1196struct address_space *page_file_mapping(struct page *page)
1197{
1198 if (unlikely(PageSwapCache(page)))
1199 return __page_file_mapping(page);
1200
1201 return page->mapping;
1202}
1203
1204extern pgoff_t __page_file_index(struct page *page);
1205
1206
1207
1208
1209
1210static inline pgoff_t page_index(struct page *page)
1211{
1212 if (unlikely(PageSwapCache(page)))
1213 return __page_file_index(page);
1214 return page->index;
1215}
1216
1217bool page_mapped(struct page *page);
1218struct address_space *page_mapping(struct page *page);
1219struct address_space *page_mapping_file(struct page *page);
1220
1221
1222
1223
1224
1225
1226static inline bool page_is_pfmemalloc(struct page *page)
1227{
1228
1229
1230
1231
1232 return page->index == -1UL;
1233}
1234
1235
1236
1237
1238
1239static inline void set_page_pfmemalloc(struct page *page)
1240{
1241 page->index = -1UL;
1242}
1243
1244static inline void clear_page_pfmemalloc(struct page *page)
1245{
1246 page->index = 0;
1247}
1248
1249
1250
1251
1252
1253
1254
1255#define VM_FAULT_OOM 0x0001
1256#define VM_FAULT_SIGBUS 0x0002
1257#define VM_FAULT_MAJOR 0x0004
1258#define VM_FAULT_WRITE 0x0008
1259#define VM_FAULT_HWPOISON 0x0010
1260#define VM_FAULT_HWPOISON_LARGE 0x0020
1261#define VM_FAULT_SIGSEGV 0x0040
1262
1263#define VM_FAULT_NOPAGE 0x0100
1264#define VM_FAULT_LOCKED 0x0200
1265#define VM_FAULT_RETRY 0x0400
1266#define VM_FAULT_FALLBACK 0x0800
1267#define VM_FAULT_DONE_COW 0x1000
1268#define VM_FAULT_NEEDDSYNC 0x2000
1269
1270
1271
1272#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \
1273 VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \
1274 VM_FAULT_FALLBACK)
1275
1276#define VM_FAULT_RESULT_TRACE \
1277 { VM_FAULT_OOM, "OOM" }, \
1278 { VM_FAULT_SIGBUS, "SIGBUS" }, \
1279 { VM_FAULT_MAJOR, "MAJOR" }, \
1280 { VM_FAULT_WRITE, "WRITE" }, \
1281 { VM_FAULT_HWPOISON, "HWPOISON" }, \
1282 { VM_FAULT_HWPOISON_LARGE, "HWPOISON_LARGE" }, \
1283 { VM_FAULT_SIGSEGV, "SIGSEGV" }, \
1284 { VM_FAULT_NOPAGE, "NOPAGE" }, \
1285 { VM_FAULT_LOCKED, "LOCKED" }, \
1286 { VM_FAULT_RETRY, "RETRY" }, \
1287 { VM_FAULT_FALLBACK, "FALLBACK" }, \
1288 { VM_FAULT_DONE_COW, "DONE_COW" }, \
1289 { VM_FAULT_NEEDDSYNC, "NEEDDSYNC" }
1290
1291
1292#define VM_FAULT_SET_HINDEX(x) ((x) << 12)
1293#define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf)
1294
1295
1296
1297
1298extern void pagefault_out_of_memory(void);
1299
1300#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
1301
1302
1303
1304
1305
1306#define SHOW_MEM_FILTER_NODES (0x0001u)
1307
1308extern void show_free_areas(unsigned int flags, nodemask_t *nodemask);
1309
1310extern bool can_do_mlock(void);
1311extern int user_shm_lock(size_t, struct user_struct *);
1312extern void user_shm_unlock(size_t, struct user_struct *);
1313
1314
1315
1316
1317struct zap_details {
1318 struct address_space *check_mapping;
1319 pgoff_t first_index;
1320 pgoff_t last_index;
1321};
1322
1323struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
1324 pte_t pte, bool with_public_device);
1325#define vm_normal_page(vma, addr, pte) _vm_normal_page(vma, addr, pte, false)
1326
1327struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
1328 pmd_t pmd);
1329
1330void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1331 unsigned long size);
1332void zap_page_range(struct vm_area_struct *vma, unsigned long address,
1333 unsigned long size);
1334void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
1335 unsigned long start, unsigned long end);
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361struct mm_walk {
1362 int (*pud_entry)(pud_t *pud, unsigned long addr,
1363 unsigned long next, struct mm_walk *walk);
1364 int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
1365 unsigned long next, struct mm_walk *walk);
1366 int (*pte_entry)(pte_t *pte, unsigned long addr,
1367 unsigned long next, struct mm_walk *walk);
1368 int (*pte_hole)(unsigned long addr, unsigned long next,
1369 struct mm_walk *walk);
1370 int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
1371 unsigned long addr, unsigned long next,
1372 struct mm_walk *walk);
1373 int (*test_walk)(unsigned long addr, unsigned long next,
1374 struct mm_walk *walk);
1375 struct mm_struct *mm;
1376 struct vm_area_struct *vma;
1377 void *private;
1378};
1379
1380int walk_page_range(unsigned long addr, unsigned long end,
1381 struct mm_walk *walk);
1382int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk);
1383void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
1384 unsigned long end, unsigned long floor, unsigned long ceiling);
1385int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
1386 struct vm_area_struct *vma);
1387int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
1388 unsigned long *start, unsigned long *end,
1389 pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
1390int follow_pfn(struct vm_area_struct *vma, unsigned long address,
1391 unsigned long *pfn);
1392int follow_phys(struct vm_area_struct *vma, unsigned long address,
1393 unsigned int flags, unsigned long *prot, resource_size_t *phys);
1394int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
1395 void *buf, int len, int write);
1396
1397extern void truncate_pagecache(struct inode *inode, loff_t new);
1398extern void truncate_setsize(struct inode *inode, loff_t newsize);
1399void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
1400void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
1401int truncate_inode_page(struct address_space *mapping, struct page *page);
1402int generic_error_remove_page(struct address_space *mapping, struct page *page);
1403int invalidate_inode_page(struct page *page);
1404
1405#ifdef CONFIG_MMU
1406extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
1407 unsigned long address, unsigned int flags);
1408extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
1409 unsigned long address, unsigned int fault_flags,
1410 bool *unlocked);
1411void unmap_mapping_pages(struct address_space *mapping,
1412 pgoff_t start, pgoff_t nr, bool even_cows);
1413void unmap_mapping_range(struct address_space *mapping,
1414 loff_t const holebegin, loff_t const holelen, int even_cows);
1415#else
1416static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
1417 unsigned long address, unsigned int flags)
1418{
1419
1420 BUG();
1421 return VM_FAULT_SIGBUS;
1422}
1423static inline int fixup_user_fault(struct task_struct *tsk,
1424 struct mm_struct *mm, unsigned long address,
1425 unsigned int fault_flags, bool *unlocked)
1426{
1427
1428 BUG();
1429 return -EFAULT;
1430}
1431static inline void unmap_mapping_pages(struct address_space *mapping,
1432 pgoff_t start, pgoff_t nr, bool even_cows) { }
1433static inline void unmap_mapping_range(struct address_space *mapping,
1434 loff_t const holebegin, loff_t const holelen, int even_cows) { }
1435#endif
1436
1437static inline void unmap_shared_mapping_range(struct address_space *mapping,
1438 loff_t const holebegin, loff_t const holelen)
1439{
1440 unmap_mapping_range(mapping, holebegin, holelen, 0);
1441}
1442
1443extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
1444 void *buf, int len, unsigned int gup_flags);
1445extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1446 void *buf, int len, unsigned int gup_flags);
1447extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
1448 unsigned long addr, void *buf, int len, unsigned int gup_flags);
1449
1450long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
1451 unsigned long start, unsigned long nr_pages,
1452 unsigned int gup_flags, struct page **pages,
1453 struct vm_area_struct **vmas, int *locked);
1454long get_user_pages(unsigned long start, unsigned long nr_pages,
1455 unsigned int gup_flags, struct page **pages,
1456 struct vm_area_struct **vmas);
1457long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
1458 unsigned int gup_flags, struct page **pages, int *locked);
1459long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
1460 struct page **pages, unsigned int gup_flags);
1461#ifdef CONFIG_FS_DAX
1462long get_user_pages_longterm(unsigned long start, unsigned long nr_pages,
1463 unsigned int gup_flags, struct page **pages,
1464 struct vm_area_struct **vmas);
1465#else
1466static inline long get_user_pages_longterm(unsigned long start,
1467 unsigned long nr_pages, unsigned int gup_flags,
1468 struct page **pages, struct vm_area_struct **vmas)
1469{
1470 return get_user_pages(start, nr_pages, gup_flags, pages, vmas);
1471}
1472#endif
1473
1474int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1475 struct page **pages);
1476
1477
1478struct frame_vector {
1479 unsigned int nr_allocated;
1480 unsigned int nr_frames;
1481 bool got_ref;
1482 bool is_pfns;
1483 void *ptrs[0];
1484
1485
1486};
1487
1488struct frame_vector *frame_vector_create(unsigned int nr_frames);
1489void frame_vector_destroy(struct frame_vector *vec);
1490int get_vaddr_frames(unsigned long start, unsigned int nr_pfns,
1491 unsigned int gup_flags, struct frame_vector *vec);
1492void put_vaddr_frames(struct frame_vector *vec);
1493int frame_vector_to_pages(struct frame_vector *vec);
1494void frame_vector_to_pfns(struct frame_vector *vec);
1495
1496static inline unsigned int frame_vector_count(struct frame_vector *vec)
1497{
1498 return vec->nr_frames;
1499}
1500
1501static inline struct page **frame_vector_pages(struct frame_vector *vec)
1502{
1503 if (vec->is_pfns) {
1504 int err = frame_vector_to_pages(vec);
1505
1506 if (err)
1507 return ERR_PTR(err);
1508 }
1509 return (struct page **)(vec->ptrs);
1510}
1511
1512static inline unsigned long *frame_vector_pfns(struct frame_vector *vec)
1513{
1514 if (!vec->is_pfns)
1515 frame_vector_to_pfns(vec);
1516 return (unsigned long *)(vec->ptrs);
1517}
1518
1519struct kvec;
1520int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
1521 struct page **pages);
1522int get_kernel_page(unsigned long start, int write, struct page **pages);
1523struct page *get_dump_page(unsigned long addr);
1524
1525extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
1526extern void do_invalidatepage(struct page *page, unsigned int offset,
1527 unsigned int length);
1528
1529void __set_page_dirty(struct page *, struct address_space *, int warn);
1530int __set_page_dirty_nobuffers(struct page *page);
1531int __set_page_dirty_no_writeback(struct page *page);
1532int redirty_page_for_writepage(struct writeback_control *wbc,
1533 struct page *page);
1534void account_page_dirtied(struct page *page, struct address_space *mapping);
1535void account_page_cleaned(struct page *page, struct address_space *mapping,
1536 struct bdi_writeback *wb);
1537int set_page_dirty(struct page *page);
1538int set_page_dirty_lock(struct page *page);
1539void __cancel_dirty_page(struct page *page);
1540static inline void cancel_dirty_page(struct page *page)
1541{
1542
1543 if (PageDirty(page))
1544 __cancel_dirty_page(page);
1545}
1546int clear_page_dirty_for_io(struct page *page);
1547
1548int get_cmdline(struct task_struct *task, char *buffer, int buflen);
1549
1550static inline bool vma_is_anonymous(struct vm_area_struct *vma)
1551{
1552 return !vma->vm_ops;
1553}
1554
1555#ifdef CONFIG_SHMEM
1556
1557
1558
1559
1560bool vma_is_shmem(struct vm_area_struct *vma);
1561#else
1562static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
1563#endif
1564
1565int vma_is_stack_for_current(struct vm_area_struct *vma);
1566
1567extern unsigned long move_page_tables(struct vm_area_struct *vma,
1568 unsigned long old_addr, struct vm_area_struct *new_vma,
1569 unsigned long new_addr, unsigned long len,
1570 bool need_rmap_locks);
1571extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
1572 unsigned long end, pgprot_t newprot,
1573 int dirty_accountable, int prot_numa);
1574extern int mprotect_fixup(struct vm_area_struct *vma,
1575 struct vm_area_struct **pprev, unsigned long start,
1576 unsigned long end, unsigned long newflags);
1577
1578
1579
1580
1581int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1582 struct page **pages);
1583
1584
1585
1586static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
1587{
1588 long val = atomic_long_read(&mm->rss_stat.count[member]);
1589
1590#ifdef SPLIT_RSS_COUNTING
1591
1592
1593
1594
1595 if (val < 0)
1596 val = 0;
1597#endif
1598 return (unsigned long)val;
1599}
1600
1601static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1602{
1603 atomic_long_add(value, &mm->rss_stat.count[member]);
1604}
1605
1606static inline void inc_mm_counter(struct mm_struct *mm, int member)
1607{
1608 atomic_long_inc(&mm->rss_stat.count[member]);
1609}
1610
1611static inline void dec_mm_counter(struct mm_struct *mm, int member)
1612{
1613 atomic_long_dec(&mm->rss_stat.count[member]);
1614}
1615
1616
1617static inline int mm_counter_file(struct page *page)
1618{
1619 if (PageSwapBacked(page))
1620 return MM_SHMEMPAGES;
1621 return MM_FILEPAGES;
1622}
1623
1624static inline int mm_counter(struct page *page)
1625{
1626 if (PageAnon(page))
1627 return MM_ANONPAGES;
1628 return mm_counter_file(page);
1629}
1630
1631static inline unsigned long get_mm_rss(struct mm_struct *mm)
1632{
1633 return get_mm_counter(mm, MM_FILEPAGES) +
1634 get_mm_counter(mm, MM_ANONPAGES) +
1635 get_mm_counter(mm, MM_SHMEMPAGES);
1636}
1637
1638static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
1639{
1640 return max(mm->hiwater_rss, get_mm_rss(mm));
1641}
1642
1643static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
1644{
1645 return max(mm->hiwater_vm, mm->total_vm);
1646}
1647
1648static inline void update_hiwater_rss(struct mm_struct *mm)
1649{
1650 unsigned long _rss = get_mm_rss(mm);
1651
1652 if ((mm)->hiwater_rss < _rss)
1653 (mm)->hiwater_rss = _rss;
1654}
1655
1656static inline void update_hiwater_vm(struct mm_struct *mm)
1657{
1658 if (mm->hiwater_vm < mm->total_vm)
1659 mm->hiwater_vm = mm->total_vm;
1660}
1661
1662static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
1663{
1664 mm->hiwater_rss = get_mm_rss(mm);
1665}
1666
1667static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
1668 struct mm_struct *mm)
1669{
1670 unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
1671
1672 if (*maxrss < hiwater_rss)
1673 *maxrss = hiwater_rss;
1674}
1675
1676#if defined(SPLIT_RSS_COUNTING)
1677void sync_mm_rss(struct mm_struct *mm);
1678#else
1679static inline void sync_mm_rss(struct mm_struct *mm)
1680{
1681}
1682#endif
1683
1684#ifndef __HAVE_ARCH_PTE_DEVMAP
1685static inline int pte_devmap(pte_t pte)
1686{
1687 return 0;
1688}
1689#endif
1690
1691int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
1692
1693extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1694 spinlock_t **ptl);
1695static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
1696 spinlock_t **ptl)
1697{
1698 pte_t *ptep;
1699 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
1700 return ptep;
1701}
1702
1703#ifdef __PAGETABLE_P4D_FOLDED
1704static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
1705 unsigned long address)
1706{
1707 return 0;
1708}
1709#else
1710int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
1711#endif
1712
1713#if defined(__PAGETABLE_PUD_FOLDED) || !defined(CONFIG_MMU)
1714static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d,
1715 unsigned long address)
1716{
1717 return 0;
1718}
1719static inline void mm_inc_nr_puds(struct mm_struct *mm) {}
1720static inline void mm_dec_nr_puds(struct mm_struct *mm) {}
1721
1722#else
1723int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
1724
1725static inline void mm_inc_nr_puds(struct mm_struct *mm)
1726{
1727 atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
1728}
1729
1730static inline void mm_dec_nr_puds(struct mm_struct *mm)
1731{
1732 atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
1733}
1734#endif
1735
1736#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
1737static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
1738 unsigned long address)
1739{
1740 return 0;
1741}
1742
1743static inline void mm_inc_nr_pmds(struct mm_struct *mm) {}
1744static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
1745
1746#else
1747int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
1748
1749static inline void mm_inc_nr_pmds(struct mm_struct *mm)
1750{
1751 atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
1752}
1753
1754static inline void mm_dec_nr_pmds(struct mm_struct *mm)
1755{
1756 atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
1757}
1758#endif
1759
1760#ifdef CONFIG_MMU
1761static inline void mm_pgtables_bytes_init(struct mm_struct *mm)
1762{
1763 atomic_long_set(&mm->pgtables_bytes, 0);
1764}
1765
1766static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
1767{
1768 return atomic_long_read(&mm->pgtables_bytes);
1769}
1770
1771static inline void mm_inc_nr_ptes(struct mm_struct *mm)
1772{
1773 atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
1774}
1775
1776static inline void mm_dec_nr_ptes(struct mm_struct *mm)
1777{
1778 atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
1779}
1780#else
1781
1782static inline void mm_pgtables_bytes_init(struct mm_struct *mm) {}
1783static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
1784{
1785 return 0;
1786}
1787
1788static inline void mm_inc_nr_ptes(struct mm_struct *mm) {}
1789static inline void mm_dec_nr_ptes(struct mm_struct *mm) {}
1790#endif
1791
1792int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address);
1793int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
1794
1795
1796
1797
1798
1799#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
1800
1801#ifndef __ARCH_HAS_5LEVEL_HACK
1802static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
1803 unsigned long address)
1804{
1805 return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ?
1806 NULL : p4d_offset(pgd, address);
1807}
1808
1809static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
1810 unsigned long address)
1811{
1812 return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ?
1813 NULL : pud_offset(p4d, address);
1814}
1815#endif
1816
1817static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
1818{
1819 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
1820 NULL: pmd_offset(pud, address);
1821}
1822#endif
1823
1824#if USE_SPLIT_PTE_PTLOCKS
1825#if ALLOC_SPLIT_PTLOCKS
1826void __init ptlock_cache_init(void);
1827extern bool ptlock_alloc(struct page *page);
1828extern void ptlock_free(struct page *page);
1829
1830static inline spinlock_t *ptlock_ptr(struct page *page)
1831{
1832 return page->ptl;
1833}
1834#else
1835static inline void ptlock_cache_init(void)
1836{
1837}
1838
1839static inline bool ptlock_alloc(struct page *page)
1840{
1841 return true;
1842}
1843
1844static inline void ptlock_free(struct page *page)
1845{
1846}
1847
1848static inline spinlock_t *ptlock_ptr(struct page *page)
1849{
1850 return &page->ptl;
1851}
1852#endif
1853
1854static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1855{
1856 return ptlock_ptr(pmd_page(*pmd));
1857}
1858
1859static inline bool ptlock_init(struct page *page)
1860{
1861
1862
1863
1864
1865
1866
1867
1868 VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
1869 if (!ptlock_alloc(page))
1870 return false;
1871 spin_lock_init(ptlock_ptr(page));
1872 return true;
1873}
1874
1875
1876static inline void pte_lock_deinit(struct page *page)
1877{
1878 page->mapping = NULL;
1879 ptlock_free(page);
1880}
1881
1882#else
1883
1884
1885
1886static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1887{
1888 return &mm->page_table_lock;
1889}
1890static inline void ptlock_cache_init(void) {}
1891static inline bool ptlock_init(struct page *page) { return true; }
1892static inline void pte_lock_deinit(struct page *page) {}
1893#endif
1894
1895static inline void pgtable_init(void)
1896{
1897 ptlock_cache_init();
1898 pgtable_cache_init();
1899}
1900
1901static inline bool pgtable_page_ctor(struct page *page)
1902{
1903 if (!ptlock_init(page))
1904 return false;
1905 __SetPageTable(page);
1906 inc_zone_page_state(page, NR_PAGETABLE);
1907 return true;
1908}
1909
1910static inline void pgtable_page_dtor(struct page *page)
1911{
1912 pte_lock_deinit(page);
1913 __ClearPageTable(page);
1914 dec_zone_page_state(page, NR_PAGETABLE);
1915}
1916
1917#define pte_offset_map_lock(mm, pmd, address, ptlp) \
1918({ \
1919 spinlock_t *__ptl = pte_lockptr(mm, pmd); \
1920 pte_t *__pte = pte_offset_map(pmd, address); \
1921 *(ptlp) = __ptl; \
1922 spin_lock(__ptl); \
1923 __pte; \
1924})
1925
1926#define pte_unmap_unlock(pte, ptl) do { \
1927 spin_unlock(ptl); \
1928 pte_unmap(pte); \
1929} while (0)
1930
1931#define pte_alloc(mm, pmd, address) \
1932 (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd, address))
1933
1934#define pte_alloc_map(mm, pmd, address) \
1935 (pte_alloc(mm, pmd, address) ? NULL : pte_offset_map(pmd, address))
1936
1937#define pte_alloc_map_lock(mm, pmd, address, ptlp) \
1938 (pte_alloc(mm, pmd, address) ? \
1939 NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
1940
1941#define pte_alloc_kernel(pmd, address) \
1942 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
1943 NULL: pte_offset_kernel(pmd, address))
1944
1945#if USE_SPLIT_PMD_PTLOCKS
1946
1947static struct page *pmd_to_page(pmd_t *pmd)
1948{
1949 unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
1950 return virt_to_page((void *)((unsigned long) pmd & mask));
1951}
1952
1953static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
1954{
1955 return ptlock_ptr(pmd_to_page(pmd));
1956}
1957
1958static inline bool pgtable_pmd_page_ctor(struct page *page)
1959{
1960#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1961 page->pmd_huge_pte = NULL;
1962#endif
1963 return ptlock_init(page);
1964}
1965
1966static inline void pgtable_pmd_page_dtor(struct page *page)
1967{
1968#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1969 VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
1970#endif
1971 ptlock_free(page);
1972}
1973
1974#define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte)
1975
1976#else
1977
1978static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
1979{
1980 return &mm->page_table_lock;
1981}
1982
1983static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; }
1984static inline void pgtable_pmd_page_dtor(struct page *page) {}
1985
1986#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
1987
1988#endif
1989
1990static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
1991{
1992 spinlock_t *ptl = pmd_lockptr(mm, pmd);
1993 spin_lock(ptl);
1994 return ptl;
1995}
1996
1997
1998
1999
2000
2001
2002
2003static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud)
2004{
2005 return &mm->page_table_lock;
2006}
2007
2008static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
2009{
2010 spinlock_t *ptl = pud_lockptr(mm, pud);
2011
2012 spin_lock(ptl);
2013 return ptl;
2014}
2015
2016extern void __init pagecache_init(void);
2017extern void free_area_init(unsigned long * zones_size);
2018extern void __init free_area_init_node(int nid, unsigned long * zones_size,
2019 unsigned long zone_start_pfn, unsigned long *zholes_size);
2020extern void free_initmem(void);
2021
2022
2023
2024
2025
2026
2027
2028extern unsigned long free_reserved_area(void *start, void *end,
2029 int poison, char *s);
2030
2031#ifdef CONFIG_HIGHMEM
2032
2033
2034
2035
2036extern void free_highmem_page(struct page *page);
2037#endif
2038
2039extern void adjust_managed_page_count(struct page *page, long count);
2040extern void mem_init_print_info(const char *str);
2041
2042extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end);
2043
2044
2045static inline void __free_reserved_page(struct page *page)
2046{
2047 ClearPageReserved(page);
2048 init_page_count(page);
2049 __free_page(page);
2050}
2051
2052static inline void free_reserved_page(struct page *page)
2053{
2054 __free_reserved_page(page);
2055 adjust_managed_page_count(page, 1);
2056}
2057
2058static inline void mark_page_reserved(struct page *page)
2059{
2060 SetPageReserved(page);
2061 adjust_managed_page_count(page, -1);
2062}
2063
2064
2065
2066
2067
2068
2069
2070static inline unsigned long free_initmem_default(int poison)
2071{
2072 extern char __init_begin[], __init_end[];
2073
2074 return free_reserved_area(&__init_begin, &__init_end,
2075 poison, "unused kernel");
2076}
2077
2078static inline unsigned long get_num_physpages(void)
2079{
2080 int nid;
2081 unsigned long phys_pages = 0;
2082
2083 for_each_online_node(nid)
2084 phys_pages += node_present_pages(nid);
2085
2086 return phys_pages;
2087}
2088
2089#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116extern void free_area_init_nodes(unsigned long *max_zone_pfn);
2117unsigned long node_map_pfn_alignment(void);
2118unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
2119 unsigned long end_pfn);
2120extern unsigned long absent_pages_in_range(unsigned long start_pfn,
2121 unsigned long end_pfn);
2122extern void get_pfn_range_for_nid(unsigned int nid,
2123 unsigned long *start_pfn, unsigned long *end_pfn);
2124extern unsigned long find_min_pfn_with_active_regions(void);
2125extern void free_bootmem_with_active_regions(int nid,
2126 unsigned long max_low_pfn);
2127extern void sparse_memory_present_with_active_regions(int nid);
2128
2129#endif
2130
2131#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
2132 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
2133static inline int __early_pfn_to_nid(unsigned long pfn,
2134 struct mminit_pfnnid_cache *state)
2135{
2136 return 0;
2137}
2138#else
2139
2140extern int __meminit early_pfn_to_nid(unsigned long pfn);
2141
2142extern int __meminit __early_pfn_to_nid(unsigned long pfn,
2143 struct mminit_pfnnid_cache *state);
2144#endif
2145
2146#if defined(CONFIG_HAVE_MEMBLOCK) && !defined(CONFIG_FLAT_NODE_MEM_MAP)
2147void zero_resv_unavail(void);
2148#else
2149static inline void zero_resv_unavail(void) {}
2150#endif
2151
2152extern void set_dma_reserve(unsigned long new_dma_reserve);
2153extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long,
2154 enum memmap_context, struct vmem_altmap *);
2155extern void setup_per_zone_wmarks(void);
2156extern int __meminit init_per_zone_wmark_min(void);
2157extern void mem_init(void);
2158extern void __init mmap_init(void);
2159extern void show_mem(unsigned int flags, nodemask_t *nodemask);
2160extern long si_mem_available(void);
2161extern void si_meminfo(struct sysinfo * val);
2162extern void si_meminfo_node(struct sysinfo *val, int nid);
2163#ifdef __HAVE_ARCH_RESERVED_KERNEL_PAGES
2164extern unsigned long arch_reserved_kernel_pages(void);
2165#endif
2166
2167extern __printf(3, 4)
2168void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);
2169
2170extern void setup_per_cpu_pageset(void);
2171
2172extern void zone_pcp_update(struct zone *zone);
2173extern void zone_pcp_reset(struct zone *zone);
2174
2175
2176extern int min_free_kbytes;
2177extern int watermark_scale_factor;
2178
2179
2180extern atomic_long_t mmap_pages_allocated;
2181extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
2182
2183
2184void vma_interval_tree_insert(struct vm_area_struct *node,
2185 struct rb_root_cached *root);
2186void vma_interval_tree_insert_after(struct vm_area_struct *node,
2187 struct vm_area_struct *prev,
2188 struct rb_root_cached *root);
2189void vma_interval_tree_remove(struct vm_area_struct *node,
2190 struct rb_root_cached *root);
2191struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root,
2192 unsigned long start, unsigned long last);
2193struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
2194 unsigned long start, unsigned long last);
2195
2196#define vma_interval_tree_foreach(vma, root, start, last) \
2197 for (vma = vma_interval_tree_iter_first(root, start, last); \
2198 vma; vma = vma_interval_tree_iter_next(vma, start, last))
2199
2200void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
2201 struct rb_root_cached *root);
2202void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
2203 struct rb_root_cached *root);
2204struct anon_vma_chain *
2205anon_vma_interval_tree_iter_first(struct rb_root_cached *root,
2206 unsigned long start, unsigned long last);
2207struct anon_vma_chain *anon_vma_interval_tree_iter_next(
2208 struct anon_vma_chain *node, unsigned long start, unsigned long last);
2209#ifdef CONFIG_DEBUG_VM_RB
2210void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
2211#endif
2212
2213#define anon_vma_interval_tree_foreach(avc, root, start, last) \
2214 for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
2215 avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
2216
2217
2218extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
2219extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
2220 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
2221 struct vm_area_struct *expand);
2222static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
2223 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
2224{
2225 return __vma_adjust(vma, start, end, pgoff, insert, NULL);
2226}
2227extern struct vm_area_struct *vma_merge(struct mm_struct *,
2228 struct vm_area_struct *prev, unsigned long addr, unsigned long end,
2229 unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
2230 struct mempolicy *, struct vm_userfaultfd_ctx);
2231extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
2232extern int __split_vma(struct mm_struct *, struct vm_area_struct *,
2233 unsigned long addr, int new_below);
2234extern int split_vma(struct mm_struct *, struct vm_area_struct *,
2235 unsigned long addr, int new_below);
2236extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
2237extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
2238 struct rb_node **, struct rb_node *);
2239extern void unlink_file_vma(struct vm_area_struct *);
2240extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
2241 unsigned long addr, unsigned long len, pgoff_t pgoff,
2242 bool *need_rmap_locks);
2243extern void exit_mmap(struct mm_struct *);
2244
2245static inline int check_data_rlimit(unsigned long rlim,
2246 unsigned long new,
2247 unsigned long start,
2248 unsigned long end_data,
2249 unsigned long start_data)
2250{
2251 if (rlim < RLIM_INFINITY) {
2252 if (((new - start) + (end_data - start_data)) > rlim)
2253 return -ENOSPC;
2254 }
2255
2256 return 0;
2257}
2258
2259extern int mm_take_all_locks(struct mm_struct *mm);
2260extern void mm_drop_all_locks(struct mm_struct *mm);
2261
2262extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
2263extern struct file *get_mm_exe_file(struct mm_struct *mm);
2264extern struct file *get_task_exe_file(struct task_struct *task);
2265
2266extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
2267extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
2268
2269extern bool vma_is_special_mapping(const struct vm_area_struct *vma,
2270 const struct vm_special_mapping *sm);
2271extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
2272 unsigned long addr, unsigned long len,
2273 unsigned long flags,
2274 const struct vm_special_mapping *spec);
2275
2276extern int install_special_mapping(struct mm_struct *mm,
2277 unsigned long addr, unsigned long len,
2278 unsigned long flags, struct page **pages);
2279
2280extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
2281
2282extern unsigned long mmap_region(struct file *file, unsigned long addr,
2283 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
2284 struct list_head *uf);
2285extern unsigned long do_mmap(struct file *file, unsigned long addr,
2286 unsigned long len, unsigned long prot, unsigned long flags,
2287 vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
2288 struct list_head *uf);
2289extern int do_munmap(struct mm_struct *, unsigned long, size_t,
2290 struct list_head *uf);
2291
2292static inline unsigned long
2293do_mmap_pgoff(struct file *file, unsigned long addr,
2294 unsigned long len, unsigned long prot, unsigned long flags,
2295 unsigned long pgoff, unsigned long *populate,
2296 struct list_head *uf)
2297{
2298 return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate, uf);
2299}
2300
2301#ifdef CONFIG_MMU
2302extern int __mm_populate(unsigned long addr, unsigned long len,
2303 int ignore_errors);
2304static inline void mm_populate(unsigned long addr, unsigned long len)
2305{
2306
2307 (void) __mm_populate(addr, len, 1);
2308}
2309#else
2310static inline void mm_populate(unsigned long addr, unsigned long len) {}
2311#endif
2312
2313
2314extern int __must_check vm_brk(unsigned long, unsigned long);
2315extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long);
2316extern int vm_munmap(unsigned long, size_t);
2317extern unsigned long __must_check vm_mmap(struct file *, unsigned long,
2318 unsigned long, unsigned long,
2319 unsigned long, unsigned long);
2320
2321struct vm_unmapped_area_info {
2322#define VM_UNMAPPED_AREA_TOPDOWN 1
2323 unsigned long flags;
2324 unsigned long length;
2325 unsigned long low_limit;
2326 unsigned long high_limit;
2327 unsigned long align_mask;
2328 unsigned long align_offset;
2329};
2330
2331extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
2332extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343static inline unsigned long
2344vm_unmapped_area(struct vm_unmapped_area_info *info)
2345{
2346 if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
2347 return unmapped_area_topdown(info);
2348 else
2349 return unmapped_area(info);
2350}
2351
2352
2353extern void truncate_inode_pages(struct address_space *, loff_t);
2354extern void truncate_inode_pages_range(struct address_space *,
2355 loff_t lstart, loff_t lend);
2356extern void truncate_inode_pages_final(struct address_space *);
2357
2358
2359extern vm_fault_t filemap_fault(struct vm_fault *vmf);
2360extern void filemap_map_pages(struct vm_fault *vmf,
2361 pgoff_t start_pgoff, pgoff_t end_pgoff);
2362extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
2363
2364
2365int __must_check write_one_page(struct page *page);
2366void task_dirty_inc(struct task_struct *tsk);
2367
2368
2369#define VM_MAX_READAHEAD 128
2370#define VM_MIN_READAHEAD 16
2371
2372int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
2373 pgoff_t offset, unsigned long nr_to_read);
2374
2375void page_cache_sync_readahead(struct address_space *mapping,
2376 struct file_ra_state *ra,
2377 struct file *filp,
2378 pgoff_t offset,
2379 unsigned long size);
2380
2381void page_cache_async_readahead(struct address_space *mapping,
2382 struct file_ra_state *ra,
2383 struct file *filp,
2384 struct page *pg,
2385 pgoff_t offset,
2386 unsigned long size);
2387
2388extern unsigned long stack_guard_gap;
2389
2390extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
2391
2392
2393extern int expand_downwards(struct vm_area_struct *vma,
2394 unsigned long address);
2395#if VM_GROWSUP
2396extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
2397#else
2398 #define expand_upwards(vma, address) (0)
2399#endif
2400
2401
2402extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
2403extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
2404 struct vm_area_struct **pprev);
2405
2406
2407
2408static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
2409{
2410 struct vm_area_struct * vma = find_vma(mm,start_addr);
2411
2412 if (vma && end_addr <= vma->vm_start)
2413 vma = NULL;
2414 return vma;
2415}
2416
2417static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
2418{
2419 unsigned long vm_start = vma->vm_start;
2420
2421 if (vma->vm_flags & VM_GROWSDOWN) {
2422 vm_start -= stack_guard_gap;
2423 if (vm_start > vma->vm_start)
2424 vm_start = 0;
2425 }
2426 return vm_start;
2427}
2428
2429static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
2430{
2431 unsigned long vm_end = vma->vm_end;
2432
2433 if (vma->vm_flags & VM_GROWSUP) {
2434 vm_end += stack_guard_gap;
2435 if (vm_end < vma->vm_end)
2436 vm_end = -PAGE_SIZE;
2437 }
2438 return vm_end;
2439}
2440
2441static inline unsigned long vma_pages(struct vm_area_struct *vma)
2442{
2443 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
2444}
2445
2446
2447static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
2448 unsigned long vm_start, unsigned long vm_end)
2449{
2450 struct vm_area_struct *vma = find_vma(mm, vm_start);
2451
2452 if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
2453 vma = NULL;
2454
2455 return vma;
2456}
2457
2458static inline bool range_in_vma(struct vm_area_struct *vma,
2459 unsigned long start, unsigned long end)
2460{
2461 return (vma && vma->vm_start <= start && end <= vma->vm_end);
2462}
2463
2464#ifdef CONFIG_MMU
2465pgprot_t vm_get_page_prot(unsigned long vm_flags);
2466void vma_set_page_prot(struct vm_area_struct *vma);
2467#else
2468static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
2469{
2470 return __pgprot(0);
2471}
2472static inline void vma_set_page_prot(struct vm_area_struct *vma)
2473{
2474 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2475}
2476#endif
2477
2478#ifdef CONFIG_NUMA_BALANCING
2479unsigned long change_prot_numa(struct vm_area_struct *vma,
2480 unsigned long start, unsigned long end);
2481#endif
2482
2483struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
2484int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
2485 unsigned long pfn, unsigned long size, pgprot_t);
2486int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
2487int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2488 unsigned long pfn);
2489int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2490 unsigned long pfn, pgprot_t pgprot);
2491int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2492 pfn_t pfn);
2493vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
2494 unsigned long addr, pfn_t pfn);
2495int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
2496
2497static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
2498 unsigned long addr, struct page *page)
2499{
2500 int err = vm_insert_page(vma, addr, page);
2501
2502 if (err == -ENOMEM)
2503 return VM_FAULT_OOM;
2504 if (err < 0 && err != -EBUSY)
2505 return VM_FAULT_SIGBUS;
2506
2507 return VM_FAULT_NOPAGE;
2508}
2509
2510static inline vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma,
2511 unsigned long addr, pfn_t pfn)
2512{
2513 int err = vm_insert_mixed(vma, addr, pfn);
2514
2515 if (err == -ENOMEM)
2516 return VM_FAULT_OOM;
2517 if (err < 0 && err != -EBUSY)
2518 return VM_FAULT_SIGBUS;
2519
2520 return VM_FAULT_NOPAGE;
2521}
2522
2523static inline vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma,
2524 unsigned long addr, unsigned long pfn)
2525{
2526 int err = vm_insert_pfn(vma, addr, pfn);
2527
2528 if (err == -ENOMEM)
2529 return VM_FAULT_OOM;
2530 if (err < 0 && err != -EBUSY)
2531 return VM_FAULT_SIGBUS;
2532
2533 return VM_FAULT_NOPAGE;
2534}
2535
2536static inline vm_fault_t vmf_error(int err)
2537{
2538 if (err == -ENOMEM)
2539 return VM_FAULT_OOM;
2540 return VM_FAULT_SIGBUS;
2541}
2542
2543struct page *follow_page_mask(struct vm_area_struct *vma,
2544 unsigned long address, unsigned int foll_flags,
2545 unsigned int *page_mask);
2546
2547static inline struct page *follow_page(struct vm_area_struct *vma,
2548 unsigned long address, unsigned int foll_flags)
2549{
2550 unsigned int unused_page_mask;
2551 return follow_page_mask(vma, address, foll_flags, &unused_page_mask);
2552}
2553
2554#define FOLL_WRITE 0x01
2555#define FOLL_TOUCH 0x02
2556#define FOLL_GET 0x04
2557#define FOLL_DUMP 0x08
2558#define FOLL_FORCE 0x10
2559#define FOLL_NOWAIT 0x20
2560
2561#define FOLL_POPULATE 0x40
2562#define FOLL_SPLIT 0x80
2563#define FOLL_HWPOISON 0x100
2564#define FOLL_NUMA 0x200
2565#define FOLL_MIGRATION 0x400
2566#define FOLL_TRIED 0x800
2567#define FOLL_MLOCK 0x1000
2568#define FOLL_REMOTE 0x2000
2569#define FOLL_COW 0x4000
2570#define FOLL_ANON 0x8000
2571
2572static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
2573{
2574 if (vm_fault & VM_FAULT_OOM)
2575 return -ENOMEM;
2576 if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
2577 return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT;
2578 if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
2579 return -EFAULT;
2580 return 0;
2581}
2582
2583typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
2584 void *data);
2585extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
2586 unsigned long size, pte_fn_t fn, void *data);
2587
2588
2589#ifdef CONFIG_PAGE_POISONING
2590extern bool page_poisoning_enabled(void);
2591extern void kernel_poison_pages(struct page *page, int numpages, int enable);
2592#else
2593static inline bool page_poisoning_enabled(void) { return false; }
2594static inline void kernel_poison_pages(struct page *page, int numpages,
2595 int enable) { }
2596#endif
2597
2598#ifdef CONFIG_DEBUG_PAGEALLOC
2599extern bool _debug_pagealloc_enabled;
2600extern void __kernel_map_pages(struct page *page, int numpages, int enable);
2601
2602static inline bool debug_pagealloc_enabled(void)
2603{
2604 return _debug_pagealloc_enabled;
2605}
2606
2607static inline void
2608kernel_map_pages(struct page *page, int numpages, int enable)
2609{
2610 if (!debug_pagealloc_enabled())
2611 return;
2612
2613 __kernel_map_pages(page, numpages, enable);
2614}
2615#ifdef CONFIG_HIBERNATION
2616extern bool kernel_page_present(struct page *page);
2617#endif
2618#else
2619static inline void
2620kernel_map_pages(struct page *page, int numpages, int enable) {}
2621#ifdef CONFIG_HIBERNATION
2622static inline bool kernel_page_present(struct page *page) { return true; }
2623#endif
2624static inline bool debug_pagealloc_enabled(void)
2625{
2626 return false;
2627}
2628#endif
2629
2630#ifdef __HAVE_ARCH_GATE_AREA
2631extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
2632extern int in_gate_area_no_mm(unsigned long addr);
2633extern int in_gate_area(struct mm_struct *mm, unsigned long addr);
2634#else
2635static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
2636{
2637 return NULL;
2638}
2639static inline int in_gate_area_no_mm(unsigned long addr) { return 0; }
2640static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
2641{
2642 return 0;
2643}
2644#endif
2645
2646extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);
2647
2648#ifdef CONFIG_SYSCTL
2649extern int sysctl_drop_caches;
2650int drop_caches_sysctl_handler(struct ctl_table *, int,
2651 void __user *, size_t *, loff_t *);
2652#endif
2653
2654void drop_slab(void);
2655void drop_slab_node(int nid);
2656
2657#ifndef CONFIG_MMU
2658#define randomize_va_space 0
2659#else
2660extern int randomize_va_space;
2661#endif
2662
2663const char * arch_vma_name(struct vm_area_struct *vma);
2664void print_vma_addr(char *prefix, unsigned long rip);
2665
2666void *sparse_buffer_alloc(unsigned long size);
2667struct page *sparse_mem_map_populate(unsigned long pnum, int nid,
2668 struct vmem_altmap *altmap);
2669pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
2670p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
2671pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
2672pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
2673pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
2674void *vmemmap_alloc_block(unsigned long size, int node);
2675struct vmem_altmap;
2676void *vmemmap_alloc_block_buf(unsigned long size, int node);
2677void *altmap_alloc_block_buf(unsigned long size, struct vmem_altmap *altmap);
2678void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
2679int vmemmap_populate_basepages(unsigned long start, unsigned long end,
2680 int node);
2681int vmemmap_populate(unsigned long start, unsigned long end, int node,
2682 struct vmem_altmap *altmap);
2683void vmemmap_populate_print_last(void);
2684#ifdef CONFIG_MEMORY_HOTPLUG
2685void vmemmap_free(unsigned long start, unsigned long end,
2686 struct vmem_altmap *altmap);
2687#endif
2688void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
2689 unsigned long nr_pages);
2690
2691enum mf_flags {
2692 MF_COUNT_INCREASED = 1 << 0,
2693 MF_ACTION_REQUIRED = 1 << 1,
2694 MF_MUST_KILL = 1 << 2,
2695 MF_SOFT_OFFLINE = 1 << 3,
2696};
2697extern int memory_failure(unsigned long pfn, int flags);
2698extern void memory_failure_queue(unsigned long pfn, int flags);
2699extern int unpoison_memory(unsigned long pfn);
2700extern int get_hwpoison_page(struct page *page);
2701#define put_hwpoison_page(page) put_page(page)
2702extern int sysctl_memory_failure_early_kill;
2703extern int sysctl_memory_failure_recovery;
2704extern void shake_page(struct page *p, int access);
2705extern atomic_long_t num_poisoned_pages __read_mostly;
2706extern int soft_offline_page(struct page *page, int flags);
2707
2708
2709
2710
2711
2712enum mf_result {
2713 MF_IGNORED,
2714 MF_FAILED,
2715 MF_DELAYED,
2716 MF_RECOVERED,
2717};
2718
2719enum mf_action_page_type {
2720 MF_MSG_KERNEL,
2721 MF_MSG_KERNEL_HIGH_ORDER,
2722 MF_MSG_SLAB,
2723 MF_MSG_DIFFERENT_COMPOUND,
2724 MF_MSG_POISONED_HUGE,
2725 MF_MSG_HUGE,
2726 MF_MSG_FREE_HUGE,
2727 MF_MSG_NON_PMD_HUGE,
2728 MF_MSG_UNMAP_FAILED,
2729 MF_MSG_DIRTY_SWAPCACHE,
2730 MF_MSG_CLEAN_SWAPCACHE,
2731 MF_MSG_DIRTY_MLOCKED_LRU,
2732 MF_MSG_CLEAN_MLOCKED_LRU,
2733 MF_MSG_DIRTY_UNEVICTABLE_LRU,
2734 MF_MSG_CLEAN_UNEVICTABLE_LRU,
2735 MF_MSG_DIRTY_LRU,
2736 MF_MSG_CLEAN_LRU,
2737 MF_MSG_TRUNCATED_LRU,
2738 MF_MSG_BUDDY,
2739 MF_MSG_BUDDY_2ND,
2740 MF_MSG_DAX,
2741 MF_MSG_UNKNOWN,
2742};
2743
2744#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
2745extern void clear_huge_page(struct page *page,
2746 unsigned long addr_hint,
2747 unsigned int pages_per_huge_page);
2748extern void copy_user_huge_page(struct page *dst, struct page *src,
2749 unsigned long addr_hint,
2750 struct vm_area_struct *vma,
2751 unsigned int pages_per_huge_page);
2752extern long copy_huge_page_from_user(struct page *dst_page,
2753 const void __user *usr_src,
2754 unsigned int pages_per_huge_page,
2755 bool allow_pagefault);
2756#endif
2757
2758extern struct page_ext_operations debug_guardpage_ops;
2759
2760#ifdef CONFIG_DEBUG_PAGEALLOC
2761extern unsigned int _debug_guardpage_minorder;
2762extern bool _debug_guardpage_enabled;
2763
2764static inline unsigned int debug_guardpage_minorder(void)
2765{
2766 return _debug_guardpage_minorder;
2767}
2768
2769static inline bool debug_guardpage_enabled(void)
2770{
2771 return _debug_guardpage_enabled;
2772}
2773
2774static inline bool page_is_guard(struct page *page)
2775{
2776 struct page_ext *page_ext;
2777
2778 if (!debug_guardpage_enabled())
2779 return false;
2780
2781 page_ext = lookup_page_ext(page);
2782 if (unlikely(!page_ext))
2783 return false;
2784
2785 return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
2786}
2787#else
2788static inline unsigned int debug_guardpage_minorder(void) { return 0; }
2789static inline bool debug_guardpage_enabled(void) { return false; }
2790static inline bool page_is_guard(struct page *page) { return false; }
2791#endif
2792
2793#if MAX_NUMNODES > 1
2794void __init setup_nr_node_ids(void);
2795#else
2796static inline void setup_nr_node_ids(void) {}
2797#endif
2798
2799#endif
2800#endif
2801