1
2#ifndef _LINUX_MM_H
3#define _LINUX_MM_H
4
5#include <linux/errno.h>
6
7#ifdef __KERNEL__
8
9#include <linux/mmdebug.h>
10#include <linux/gfp.h>
11#include <linux/bug.h>
12#include <linux/list.h>
13#include <linux/mmzone.h>
14#include <linux/rbtree.h>
15#include <linux/atomic.h>
16#include <linux/debug_locks.h>
17#include <linux/mm_types.h>
18#include <linux/range.h>
19#include <linux/pfn.h>
20#include <linux/percpu-refcount.h>
21#include <linux/bit_spinlock.h>
22#include <linux/shrinker.h>
23#include <linux/resource.h>
24#include <linux/page_ext.h>
25#include <linux/err.h>
26#include <linux/page_ref.h>
27#include <linux/memremap.h>
28
29struct mempolicy;
30struct anon_vma;
31struct anon_vma_chain;
32struct file_ra_state;
33struct user_struct;
34struct writeback_control;
35struct bdi_writeback;
36
37void init_mm_internals(void);
38
39#ifndef CONFIG_NEED_MULTIPLE_NODES
40extern unsigned long max_mapnr;
41
42static inline void set_max_mapnr(unsigned long limit)
43{
44 max_mapnr = limit;
45}
46#else
47static inline void set_max_mapnr(unsigned long limit) { }
48#endif
49
50extern unsigned long totalram_pages;
51extern void * high_memory;
52extern int page_cluster;
53
54#ifdef CONFIG_SYSCTL
55extern int sysctl_legacy_va_layout;
56#else
57#define sysctl_legacy_va_layout 0
58#endif
59
60#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
61extern const int mmap_rnd_bits_min;
62extern const int mmap_rnd_bits_max;
63extern int mmap_rnd_bits __read_mostly;
64#endif
65#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
66extern const int mmap_rnd_compat_bits_min;
67extern const int mmap_rnd_compat_bits_max;
68extern int mmap_rnd_compat_bits __read_mostly;
69#endif
70
71#include <asm/page.h>
72#include <asm/pgtable.h>
73#include <asm/processor.h>
74
75#ifndef __pa_symbol
76#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
77#endif
78
79#ifndef page_to_virt
80#define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x)))
81#endif
82
83#ifndef lm_alias
84#define lm_alias(x) __va(__pa_symbol(x))
85#endif
86
87
88
89
90
91
92
93
94#ifndef mm_forbids_zeropage
95#define mm_forbids_zeropage(X) (0)
96#endif
97
98
99
100
101
102
103#ifndef mm_zero_struct_page
104#define mm_zero_struct_page(pp) ((void)memset((pp), 0, sizeof(struct page)))
105#endif
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123#define MAPCOUNT_ELF_CORE_MARGIN (5)
124#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
125
126extern int sysctl_max_map_count;
127
128extern unsigned long sysctl_user_reserve_kbytes;
129extern unsigned long sysctl_admin_reserve_kbytes;
130
131extern int sysctl_overcommit_memory;
132extern int sysctl_overcommit_ratio;
133extern unsigned long sysctl_overcommit_kbytes;
134
135extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *,
136 size_t *, loff_t *);
137extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
138 size_t *, loff_t *);
139
140#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
141
142
143#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
144
145
146#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
147
148
149
150
151
152
153
154
155
156
157extern struct kmem_cache *vm_area_cachep;
158
159#ifndef CONFIG_MMU
160extern struct rb_root nommu_region_tree;
161extern struct rw_semaphore nommu_region_sem;
162
163extern unsigned int kobjsize(const void *objp);
164#endif
165
166
167
168
169
170#define VM_NONE 0x00000000
171
172#define VM_READ 0x00000001
173#define VM_WRITE 0x00000002
174#define VM_EXEC 0x00000004
175#define VM_SHARED 0x00000008
176
177
178#define VM_MAYREAD 0x00000010
179#define VM_MAYWRITE 0x00000020
180#define VM_MAYEXEC 0x00000040
181#define VM_MAYSHARE 0x00000080
182
183#define VM_GROWSDOWN 0x00000100
184#define VM_UFFD_MISSING 0x00000200
185#define VM_PFNMAP 0x00000400
186#define VM_DENYWRITE 0x00000800
187#define VM_UFFD_WP 0x00001000
188
189#define VM_LOCKED 0x00002000
190#define VM_IO 0x00004000
191
192
193#define VM_SEQ_READ 0x00008000
194#define VM_RAND_READ 0x00010000
195
196#define VM_DONTCOPY 0x00020000
197#define VM_DONTEXPAND 0x00040000
198#define VM_LOCKONFAULT 0x00080000
199#define VM_ACCOUNT 0x00100000
200#define VM_NORESERVE 0x00200000
201#define VM_HUGETLB 0x00400000
202#define VM_SYNC 0x00800000
203#define VM_ARCH_1 0x01000000
204#define VM_WIPEONFORK 0x02000000
205#define VM_DONTDUMP 0x04000000
206
207#ifdef CONFIG_MEM_SOFT_DIRTY
208# define VM_SOFTDIRTY 0x08000000
209#else
210# define VM_SOFTDIRTY 0
211#endif
212
213#define VM_MIXEDMAP 0x10000000
214#define VM_HUGEPAGE 0x20000000
215#define VM_NOHUGEPAGE 0x40000000
216#define VM_MERGEABLE 0x80000000
217
218#ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS
219#define VM_HIGH_ARCH_BIT_0 32
220#define VM_HIGH_ARCH_BIT_1 33
221#define VM_HIGH_ARCH_BIT_2 34
222#define VM_HIGH_ARCH_BIT_3 35
223#define VM_HIGH_ARCH_BIT_4 36
224#define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0)
225#define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1)
226#define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2)
227#define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3)
228#define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4)
229#endif
230
231#if defined(CONFIG_X86)
232# define VM_PAT VM_ARCH_1
233#if defined (CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS)
234# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0
235# define VM_PKEY_BIT0 VM_HIGH_ARCH_0
236# define VM_PKEY_BIT1 VM_HIGH_ARCH_1
237# define VM_PKEY_BIT2 VM_HIGH_ARCH_2
238# define VM_PKEY_BIT3 VM_HIGH_ARCH_3
239#endif
240#elif defined(CONFIG_PPC)
241# define VM_SAO VM_ARCH_1
242#elif defined(CONFIG_PARISC)
243# define VM_GROWSUP VM_ARCH_1
244#elif defined(CONFIG_IA64)
245# define VM_GROWSUP VM_ARCH_1
246#elif defined(CONFIG_SPARC64)
247# define VM_SPARC_ADI VM_ARCH_1
248# define VM_ARCH_CLEAR VM_SPARC_ADI
249#elif !defined(CONFIG_MMU)
250# define VM_MAPPED_COPY VM_ARCH_1
251#endif
252
253#if defined(CONFIG_X86_INTEL_MPX)
254
255# define VM_MPX VM_HIGH_ARCH_4
256#else
257# define VM_MPX VM_NONE
258#endif
259
260#ifndef VM_GROWSUP
261# define VM_GROWSUP VM_NONE
262#endif
263
264
265#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ)
266
267#ifndef VM_STACK_DEFAULT_FLAGS
268#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
269#endif
270
271#ifdef CONFIG_STACK_GROWSUP
272#define VM_STACK VM_GROWSUP
273#else
274#define VM_STACK VM_GROWSDOWN
275#endif
276
277#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
278
279
280
281
282
283#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
284
285
286#define VM_INIT_DEF_MASK VM_NOHUGEPAGE
287
288
289#define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT))
290
291
292#ifndef VM_ARCH_CLEAR
293# define VM_ARCH_CLEAR VM_NONE
294#endif
295#define VM_FLAGS_CLEAR (ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR)
296
297
298
299
300
301extern pgprot_t protection_map[16];
302
303#define FAULT_FLAG_WRITE 0x01
304#define FAULT_FLAG_MKWRITE 0x02
305#define FAULT_FLAG_ALLOW_RETRY 0x04
306#define FAULT_FLAG_RETRY_NOWAIT 0x08
307#define FAULT_FLAG_KILLABLE 0x10
308#define FAULT_FLAG_TRIED 0x20
309#define FAULT_FLAG_USER 0x40
310#define FAULT_FLAG_REMOTE 0x80
311#define FAULT_FLAG_INSTRUCTION 0x100
312
313#define FAULT_FLAG_TRACE \
314 { FAULT_FLAG_WRITE, "WRITE" }, \
315 { FAULT_FLAG_MKWRITE, "MKWRITE" }, \
316 { FAULT_FLAG_ALLOW_RETRY, "ALLOW_RETRY" }, \
317 { FAULT_FLAG_RETRY_NOWAIT, "RETRY_NOWAIT" }, \
318 { FAULT_FLAG_KILLABLE, "KILLABLE" }, \
319 { FAULT_FLAG_TRIED, "TRIED" }, \
320 { FAULT_FLAG_USER, "USER" }, \
321 { FAULT_FLAG_REMOTE, "REMOTE" }, \
322 { FAULT_FLAG_INSTRUCTION, "INSTRUCTION" }
323
324
325
326
327
328
329
330
331
332
333
334struct vm_fault {
335 struct vm_area_struct *vma;
336 unsigned int flags;
337 gfp_t gfp_mask;
338 pgoff_t pgoff;
339 unsigned long address;
340 pmd_t *pmd;
341
342 pud_t *pud;
343
344
345 pte_t orig_pte;
346
347 struct page *cow_page;
348 struct mem_cgroup *memcg;
349 struct page *page;
350
351
352
353
354
355 pte_t *pte;
356
357
358
359 spinlock_t *ptl;
360
361
362
363 pgtable_t prealloc_pte;
364
365
366
367
368
369
370};
371
372
373enum page_entry_size {
374 PE_SIZE_PTE = 0,
375 PE_SIZE_PMD,
376 PE_SIZE_PUD,
377};
378
379
380
381
382
383
384struct vm_operations_struct {
385 void (*open)(struct vm_area_struct * area);
386 void (*close)(struct vm_area_struct * area);
387 int (*split)(struct vm_area_struct * area, unsigned long addr);
388 int (*mremap)(struct vm_area_struct * area);
389 vm_fault_t (*fault)(struct vm_fault *vmf);
390 vm_fault_t (*huge_fault)(struct vm_fault *vmf,
391 enum page_entry_size pe_size);
392 void (*map_pages)(struct vm_fault *vmf,
393 pgoff_t start_pgoff, pgoff_t end_pgoff);
394 unsigned long (*pagesize)(struct vm_area_struct * area);
395
396
397
398 vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
399
400
401 vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
402
403
404
405
406 int (*access)(struct vm_area_struct *vma, unsigned long addr,
407 void *buf, int len, int write);
408
409
410
411
412 const char *(*name)(struct vm_area_struct *vma);
413
414#ifdef CONFIG_NUMA
415
416
417
418
419
420
421
422 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
423
424
425
426
427
428
429
430
431
432
433
434 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
435 unsigned long addr);
436#endif
437
438
439
440
441
442 struct page *(*find_special_page)(struct vm_area_struct *vma,
443 unsigned long addr);
444};
445
446struct mmu_gather;
447struct inode;
448
449#define page_private(page) ((page)->private)
450#define set_page_private(page, v) ((page)->private = (v))
451
452#if !defined(__HAVE_ARCH_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
453static inline int pmd_devmap(pmd_t pmd)
454{
455 return 0;
456}
457static inline int pud_devmap(pud_t pud)
458{
459 return 0;
460}
461static inline int pgd_devmap(pgd_t pgd)
462{
463 return 0;
464}
465#endif
466
467
468
469
470
471#include <linux/page-flags.h>
472#include <linux/huge_mm.h>
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490static inline int put_page_testzero(struct page *page)
491{
492 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
493 return page_ref_dec_and_test(page);
494}
495
496
497
498
499
500
501
502static inline int get_page_unless_zero(struct page *page)
503{
504 return page_ref_add_unless(page, 1, 0);
505}
506
507extern int page_is_ram(unsigned long pfn);
508
509enum {
510 REGION_INTERSECTS,
511 REGION_DISJOINT,
512 REGION_MIXED,
513};
514
515int region_intersects(resource_size_t offset, size_t size, unsigned long flags,
516 unsigned long desc);
517
518
519struct page *vmalloc_to_page(const void *addr);
520unsigned long vmalloc_to_pfn(const void *addr);
521
522
523
524
525
526
527
528static inline bool is_vmalloc_addr(const void *x)
529{
530#ifdef CONFIG_MMU
531 unsigned long addr = (unsigned long)x;
532
533 return addr >= VMALLOC_START && addr < VMALLOC_END;
534#else
535 return false;
536#endif
537}
538#ifdef CONFIG_MMU
539extern int is_vmalloc_or_module_addr(const void *x);
540#else
541static inline int is_vmalloc_or_module_addr(const void *x)
542{
543 return 0;
544}
545#endif
546
547extern void *kvmalloc_node(size_t size, gfp_t flags, int node);
548static inline void *kvmalloc(size_t size, gfp_t flags)
549{
550 return kvmalloc_node(size, flags, NUMA_NO_NODE);
551}
552static inline void *kvzalloc_node(size_t size, gfp_t flags, int node)
553{
554 return kvmalloc_node(size, flags | __GFP_ZERO, node);
555}
556static inline void *kvzalloc(size_t size, gfp_t flags)
557{
558 return kvmalloc(size, flags | __GFP_ZERO);
559}
560
561static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
562{
563 if (size != 0 && n > SIZE_MAX / size)
564 return NULL;
565
566 return kvmalloc(n * size, flags);
567}
568
569extern void kvfree(const void *addr);
570
571static inline atomic_t *compound_mapcount_ptr(struct page *page)
572{
573 return &page[1].compound_mapcount;
574}
575
576static inline int compound_mapcount(struct page *page)
577{
578 VM_BUG_ON_PAGE(!PageCompound(page), page);
579 page = compound_head(page);
580 return atomic_read(compound_mapcount_ptr(page)) + 1;
581}
582
583
584
585
586
587
588static inline void page_mapcount_reset(struct page *page)
589{
590 atomic_set(&(page)->_mapcount, -1);
591}
592
593int __page_mapcount(struct page *page);
594
595static inline int page_mapcount(struct page *page)
596{
597 VM_BUG_ON_PAGE(PageSlab(page), page);
598
599 if (unlikely(PageCompound(page)))
600 return __page_mapcount(page);
601 return atomic_read(&page->_mapcount) + 1;
602}
603
604#ifdef CONFIG_TRANSPARENT_HUGEPAGE
605int total_mapcount(struct page *page);
606int page_trans_huge_mapcount(struct page *page, int *total_mapcount);
607#else
608static inline int total_mapcount(struct page *page)
609{
610 return page_mapcount(page);
611}
612static inline int page_trans_huge_mapcount(struct page *page,
613 int *total_mapcount)
614{
615 int mapcount = page_mapcount(page);
616 if (total_mapcount)
617 *total_mapcount = mapcount;
618 return mapcount;
619}
620#endif
621
622static inline struct page *virt_to_head_page(const void *x)
623{
624 struct page *page = virt_to_page(x);
625
626 return compound_head(page);
627}
628
629void __put_page(struct page *page);
630
631void put_pages_list(struct list_head *pages);
632
633void split_page(struct page *page, unsigned int order);
634
635
636
637
638
639
640typedef void compound_page_dtor(struct page *);
641
642
643enum compound_dtor_id {
644 NULL_COMPOUND_DTOR,
645 COMPOUND_PAGE_DTOR,
646#ifdef CONFIG_HUGETLB_PAGE
647 HUGETLB_PAGE_DTOR,
648#endif
649#ifdef CONFIG_TRANSPARENT_HUGEPAGE
650 TRANSHUGE_PAGE_DTOR,
651#endif
652 NR_COMPOUND_DTORS,
653};
654extern compound_page_dtor * const compound_page_dtors[];
655
656static inline void set_compound_page_dtor(struct page *page,
657 enum compound_dtor_id compound_dtor)
658{
659 VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page);
660 page[1].compound_dtor = compound_dtor;
661}
662
663static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
664{
665 VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page);
666 return compound_page_dtors[page[1].compound_dtor];
667}
668
669static inline unsigned int compound_order(struct page *page)
670{
671 if (!PageHead(page))
672 return 0;
673 return page[1].compound_order;
674}
675
676static inline void set_compound_order(struct page *page, unsigned int order)
677{
678 page[1].compound_order = order;
679}
680
681void free_compound_page(struct page *page);
682
683#ifdef CONFIG_MMU
684
685
686
687
688
689
690static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
691{
692 if (likely(vma->vm_flags & VM_WRITE))
693 pte = pte_mkwrite(pte);
694 return pte;
695}
696
697int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
698 struct page *page);
699int finish_fault(struct vm_fault *vmf);
700int finish_mkwrite_fault(struct vm_fault *vmf);
701#endif
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
770#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
771#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
772#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
773
774
775
776
777
778
779#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
780#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
781#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
782#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
783
784
785#ifdef NODE_NOT_IN_PAGE_FLAGS
786#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
787#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \
788 SECTIONS_PGOFF : ZONES_PGOFF)
789#else
790#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
791#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \
792 NODES_PGOFF : ZONES_PGOFF)
793#endif
794
795#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
796
797#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
798#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
799#endif
800
801#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
802#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
803#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
804#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
805#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
806
807static inline enum zone_type page_zonenum(const struct page *page)
808{
809 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
810}
811
812#ifdef CONFIG_ZONE_DEVICE
813static inline bool is_zone_device_page(const struct page *page)
814{
815 return page_zonenum(page) == ZONE_DEVICE;
816}
817#else
818static inline bool is_zone_device_page(const struct page *page)
819{
820 return false;
821}
822#endif
823
824#if defined(CONFIG_DEVICE_PRIVATE) || defined(CONFIG_DEVICE_PUBLIC)
825void put_zone_device_private_or_public_page(struct page *page);
826DECLARE_STATIC_KEY_FALSE(device_private_key);
827#define IS_HMM_ENABLED static_branch_unlikely(&device_private_key)
828static inline bool is_device_private_page(const struct page *page);
829static inline bool is_device_public_page(const struct page *page);
830#else
831static inline void put_zone_device_private_or_public_page(struct page *page)
832{
833}
834#define IS_HMM_ENABLED 0
835static inline bool is_device_private_page(const struct page *page)
836{
837 return false;
838}
839static inline bool is_device_public_page(const struct page *page)
840{
841 return false;
842}
843#endif
844
845
846static inline void get_page(struct page *page)
847{
848 page = compound_head(page);
849
850
851
852
853 VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page);
854 page_ref_inc(page);
855}
856
857static inline void put_page(struct page *page)
858{
859 page = compound_head(page);
860
861
862
863
864
865
866
867 if (IS_HMM_ENABLED && unlikely(is_device_private_page(page) ||
868 unlikely(is_device_public_page(page)))) {
869 put_zone_device_private_or_public_page(page);
870 return;
871 }
872
873 if (put_page_testzero(page))
874 __put_page(page);
875}
876
877#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
878#define SECTION_IN_PAGE_FLAGS
879#endif
880
881
882
883
884
885
886
887
888
889static inline int page_zone_id(struct page *page)
890{
891 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
892}
893
894static inline int zone_to_nid(struct zone *zone)
895{
896#ifdef CONFIG_NUMA
897 return zone->node;
898#else
899 return 0;
900#endif
901}
902
903#ifdef NODE_NOT_IN_PAGE_FLAGS
904extern int page_to_nid(const struct page *page);
905#else
906static inline int page_to_nid(const struct page *page)
907{
908 struct page *p = (struct page *)page;
909
910 return (PF_POISONED_CHECK(p)->flags >> NODES_PGSHIFT) & NODES_MASK;
911}
912#endif
913
914#ifdef CONFIG_NUMA_BALANCING
915static inline int cpu_pid_to_cpupid(int cpu, int pid)
916{
917 return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
918}
919
920static inline int cpupid_to_pid(int cpupid)
921{
922 return cpupid & LAST__PID_MASK;
923}
924
925static inline int cpupid_to_cpu(int cpupid)
926{
927 return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
928}
929
930static inline int cpupid_to_nid(int cpupid)
931{
932 return cpu_to_node(cpupid_to_cpu(cpupid));
933}
934
935static inline bool cpupid_pid_unset(int cpupid)
936{
937 return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
938}
939
940static inline bool cpupid_cpu_unset(int cpupid)
941{
942 return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
943}
944
945static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
946{
947 return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
948}
949
950#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
951#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
952static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
953{
954 return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
955}
956
957static inline int page_cpupid_last(struct page *page)
958{
959 return page->_last_cpupid;
960}
961static inline void page_cpupid_reset_last(struct page *page)
962{
963 page->_last_cpupid = -1 & LAST_CPUPID_MASK;
964}
965#else
966static inline int page_cpupid_last(struct page *page)
967{
968 return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
969}
970
971extern int page_cpupid_xchg_last(struct page *page, int cpupid);
972
973static inline void page_cpupid_reset_last(struct page *page)
974{
975 page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
976}
977#endif
978#else
979static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
980{
981 return page_to_nid(page);
982}
983
984static inline int page_cpupid_last(struct page *page)
985{
986 return page_to_nid(page);
987}
988
989static inline int cpupid_to_nid(int cpupid)
990{
991 return -1;
992}
993
994static inline int cpupid_to_pid(int cpupid)
995{
996 return -1;
997}
998
999static inline int cpupid_to_cpu(int cpupid)
1000{
1001 return -1;
1002}
1003
1004static inline int cpu_pid_to_cpupid(int nid, int pid)
1005{
1006 return -1;
1007}
1008
1009static inline bool cpupid_pid_unset(int cpupid)
1010{
1011 return 1;
1012}
1013
1014static inline void page_cpupid_reset_last(struct page *page)
1015{
1016}
1017
1018static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
1019{
1020 return false;
1021}
1022#endif
1023
1024static inline struct zone *page_zone(const struct page *page)
1025{
1026 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
1027}
1028
1029static inline pg_data_t *page_pgdat(const struct page *page)
1030{
1031 return NODE_DATA(page_to_nid(page));
1032}
1033
1034#ifdef SECTION_IN_PAGE_FLAGS
1035static inline void set_page_section(struct page *page, unsigned long section)
1036{
1037 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
1038 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
1039}
1040
1041static inline unsigned long page_to_section(const struct page *page)
1042{
1043 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
1044}
1045#endif
1046
1047static inline void set_page_zone(struct page *page, enum zone_type zone)
1048{
1049 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
1050 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
1051}
1052
1053static inline void set_page_node(struct page *page, unsigned long node)
1054{
1055 page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
1056 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
1057}
1058
1059static inline void set_page_links(struct page *page, enum zone_type zone,
1060 unsigned long node, unsigned long pfn)
1061{
1062 set_page_zone(page, zone);
1063 set_page_node(page, node);
1064#ifdef SECTION_IN_PAGE_FLAGS
1065 set_page_section(page, pfn_to_section_nr(pfn));
1066#endif
1067}
1068
1069#ifdef CONFIG_MEMCG
1070static inline struct mem_cgroup *page_memcg(struct page *page)
1071{
1072 return page->mem_cgroup;
1073}
1074static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
1075{
1076 WARN_ON_ONCE(!rcu_read_lock_held());
1077 return READ_ONCE(page->mem_cgroup);
1078}
1079#else
1080static inline struct mem_cgroup *page_memcg(struct page *page)
1081{
1082 return NULL;
1083}
1084static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
1085{
1086 WARN_ON_ONCE(!rcu_read_lock_held());
1087 return NULL;
1088}
1089#endif
1090
1091
1092
1093
1094#include <linux/vmstat.h>
1095
1096static __always_inline void *lowmem_page_address(const struct page *page)
1097{
1098 return page_to_virt(page);
1099}
1100
1101#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
1102#define HASHED_PAGE_VIRTUAL
1103#endif
1104
1105#if defined(WANT_PAGE_VIRTUAL)
1106static inline void *page_address(const struct page *page)
1107{
1108 return page->virtual;
1109}
1110static inline void set_page_address(struct page *page, void *address)
1111{
1112 page->virtual = address;
1113}
1114#define page_address_init() do { } while(0)
1115#endif
1116
1117#if defined(HASHED_PAGE_VIRTUAL)
1118void *page_address(const struct page *page);
1119void set_page_address(struct page *page, void *virtual);
1120void page_address_init(void);
1121#endif
1122
1123#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
1124#define page_address(page) lowmem_page_address(page)
1125#define set_page_address(page, address) do { } while(0)
1126#define page_address_init() do { } while(0)
1127#endif
1128
1129extern void *page_rmapping(struct page *page);
1130extern struct anon_vma *page_anon_vma(struct page *page);
1131extern struct address_space *page_mapping(struct page *page);
1132
1133extern struct address_space *__page_file_mapping(struct page *);
1134
1135static inline
1136struct address_space *page_file_mapping(struct page *page)
1137{
1138 if (unlikely(PageSwapCache(page)))
1139 return __page_file_mapping(page);
1140
1141 return page->mapping;
1142}
1143
1144extern pgoff_t __page_file_index(struct page *page);
1145
1146
1147
1148
1149
1150static inline pgoff_t page_index(struct page *page)
1151{
1152 if (unlikely(PageSwapCache(page)))
1153 return __page_file_index(page);
1154 return page->index;
1155}
1156
1157bool page_mapped(struct page *page);
1158struct address_space *page_mapping(struct page *page);
1159struct address_space *page_mapping_file(struct page *page);
1160
1161
1162
1163
1164
1165
1166static inline bool page_is_pfmemalloc(struct page *page)
1167{
1168
1169
1170
1171
1172 return page->index == -1UL;
1173}
1174
1175
1176
1177
1178
1179static inline void set_page_pfmemalloc(struct page *page)
1180{
1181 page->index = -1UL;
1182}
1183
1184static inline void clear_page_pfmemalloc(struct page *page)
1185{
1186 page->index = 0;
1187}
1188
1189
1190
1191
1192
1193
1194
1195#define VM_FAULT_OOM 0x0001
1196#define VM_FAULT_SIGBUS 0x0002
1197#define VM_FAULT_MAJOR 0x0004
1198#define VM_FAULT_WRITE 0x0008
1199#define VM_FAULT_HWPOISON 0x0010
1200#define VM_FAULT_HWPOISON_LARGE 0x0020
1201#define VM_FAULT_SIGSEGV 0x0040
1202
1203#define VM_FAULT_NOPAGE 0x0100
1204#define VM_FAULT_LOCKED 0x0200
1205#define VM_FAULT_RETRY 0x0400
1206#define VM_FAULT_FALLBACK 0x0800
1207#define VM_FAULT_DONE_COW 0x1000
1208#define VM_FAULT_NEEDDSYNC 0x2000
1209
1210
1211
1212#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \
1213 VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \
1214 VM_FAULT_FALLBACK)
1215
1216#define VM_FAULT_RESULT_TRACE \
1217 { VM_FAULT_OOM, "OOM" }, \
1218 { VM_FAULT_SIGBUS, "SIGBUS" }, \
1219 { VM_FAULT_MAJOR, "MAJOR" }, \
1220 { VM_FAULT_WRITE, "WRITE" }, \
1221 { VM_FAULT_HWPOISON, "HWPOISON" }, \
1222 { VM_FAULT_HWPOISON_LARGE, "HWPOISON_LARGE" }, \
1223 { VM_FAULT_SIGSEGV, "SIGSEGV" }, \
1224 { VM_FAULT_NOPAGE, "NOPAGE" }, \
1225 { VM_FAULT_LOCKED, "LOCKED" }, \
1226 { VM_FAULT_RETRY, "RETRY" }, \
1227 { VM_FAULT_FALLBACK, "FALLBACK" }, \
1228 { VM_FAULT_DONE_COW, "DONE_COW" }, \
1229 { VM_FAULT_NEEDDSYNC, "NEEDDSYNC" }
1230
1231
1232#define VM_FAULT_SET_HINDEX(x) ((x) << 12)
1233#define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf)
1234
1235
1236
1237
1238extern void pagefault_out_of_memory(void);
1239
1240#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
1241
1242
1243
1244
1245
1246#define SHOW_MEM_FILTER_NODES (0x0001u)
1247
1248extern void show_free_areas(unsigned int flags, nodemask_t *nodemask);
1249
1250extern bool can_do_mlock(void);
1251extern int user_shm_lock(size_t, struct user_struct *);
1252extern void user_shm_unlock(size_t, struct user_struct *);
1253
1254
1255
1256
1257struct zap_details {
1258 struct address_space *check_mapping;
1259 pgoff_t first_index;
1260 pgoff_t last_index;
1261};
1262
1263struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
1264 pte_t pte, bool with_public_device);
1265#define vm_normal_page(vma, addr, pte) _vm_normal_page(vma, addr, pte, false)
1266
1267struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
1268 pmd_t pmd);
1269
1270int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1271 unsigned long size);
1272void zap_page_range(struct vm_area_struct *vma, unsigned long address,
1273 unsigned long size);
1274void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
1275 unsigned long start, unsigned long end);
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301struct mm_walk {
1302 int (*pud_entry)(pud_t *pud, unsigned long addr,
1303 unsigned long next, struct mm_walk *walk);
1304 int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
1305 unsigned long next, struct mm_walk *walk);
1306 int (*pte_entry)(pte_t *pte, unsigned long addr,
1307 unsigned long next, struct mm_walk *walk);
1308 int (*pte_hole)(unsigned long addr, unsigned long next,
1309 struct mm_walk *walk);
1310 int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
1311 unsigned long addr, unsigned long next,
1312 struct mm_walk *walk);
1313 int (*test_walk)(unsigned long addr, unsigned long next,
1314 struct mm_walk *walk);
1315 struct mm_struct *mm;
1316 struct vm_area_struct *vma;
1317 void *private;
1318};
1319
1320int walk_page_range(unsigned long addr, unsigned long end,
1321 struct mm_walk *walk);
1322int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk);
1323void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
1324 unsigned long end, unsigned long floor, unsigned long ceiling);
1325int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
1326 struct vm_area_struct *vma);
1327int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
1328 unsigned long *start, unsigned long *end,
1329 pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
1330int follow_pfn(struct vm_area_struct *vma, unsigned long address,
1331 unsigned long *pfn);
1332int follow_phys(struct vm_area_struct *vma, unsigned long address,
1333 unsigned int flags, unsigned long *prot, resource_size_t *phys);
1334int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
1335 void *buf, int len, int write);
1336
1337extern void truncate_pagecache(struct inode *inode, loff_t new);
1338extern void truncate_setsize(struct inode *inode, loff_t newsize);
1339void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
1340void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
1341int truncate_inode_page(struct address_space *mapping, struct page *page);
1342int generic_error_remove_page(struct address_space *mapping, struct page *page);
1343int invalidate_inode_page(struct page *page);
1344
1345#ifdef CONFIG_MMU
1346extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
1347 unsigned int flags);
1348extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
1349 unsigned long address, unsigned int fault_flags,
1350 bool *unlocked);
1351void unmap_mapping_pages(struct address_space *mapping,
1352 pgoff_t start, pgoff_t nr, bool even_cows);
1353void unmap_mapping_range(struct address_space *mapping,
1354 loff_t const holebegin, loff_t const holelen, int even_cows);
1355#else
1356static inline int handle_mm_fault(struct vm_area_struct *vma,
1357 unsigned long address, unsigned int flags)
1358{
1359
1360 BUG();
1361 return VM_FAULT_SIGBUS;
1362}
1363static inline int fixup_user_fault(struct task_struct *tsk,
1364 struct mm_struct *mm, unsigned long address,
1365 unsigned int fault_flags, bool *unlocked)
1366{
1367
1368 BUG();
1369 return -EFAULT;
1370}
1371static inline void unmap_mapping_pages(struct address_space *mapping,
1372 pgoff_t start, pgoff_t nr, bool even_cows) { }
1373static inline void unmap_mapping_range(struct address_space *mapping,
1374 loff_t const holebegin, loff_t const holelen, int even_cows) { }
1375#endif
1376
1377static inline void unmap_shared_mapping_range(struct address_space *mapping,
1378 loff_t const holebegin, loff_t const holelen)
1379{
1380 unmap_mapping_range(mapping, holebegin, holelen, 0);
1381}
1382
1383extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
1384 void *buf, int len, unsigned int gup_flags);
1385extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1386 void *buf, int len, unsigned int gup_flags);
1387extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
1388 unsigned long addr, void *buf, int len, unsigned int gup_flags);
1389
1390long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
1391 unsigned long start, unsigned long nr_pages,
1392 unsigned int gup_flags, struct page **pages,
1393 struct vm_area_struct **vmas, int *locked);
1394long get_user_pages(unsigned long start, unsigned long nr_pages,
1395 unsigned int gup_flags, struct page **pages,
1396 struct vm_area_struct **vmas);
1397long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
1398 unsigned int gup_flags, struct page **pages, int *locked);
1399long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
1400 struct page **pages, unsigned int gup_flags);
1401#ifdef CONFIG_FS_DAX
1402long get_user_pages_longterm(unsigned long start, unsigned long nr_pages,
1403 unsigned int gup_flags, struct page **pages,
1404 struct vm_area_struct **vmas);
1405#else
1406static inline long get_user_pages_longterm(unsigned long start,
1407 unsigned long nr_pages, unsigned int gup_flags,
1408 struct page **pages, struct vm_area_struct **vmas)
1409{
1410 return get_user_pages(start, nr_pages, gup_flags, pages, vmas);
1411}
1412#endif
1413
1414int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1415 struct page **pages);
1416
1417
1418struct frame_vector {
1419 unsigned int nr_allocated;
1420 unsigned int nr_frames;
1421 bool got_ref;
1422 bool is_pfns;
1423 void *ptrs[0];
1424
1425
1426};
1427
1428struct frame_vector *frame_vector_create(unsigned int nr_frames);
1429void frame_vector_destroy(struct frame_vector *vec);
1430int get_vaddr_frames(unsigned long start, unsigned int nr_pfns,
1431 unsigned int gup_flags, struct frame_vector *vec);
1432void put_vaddr_frames(struct frame_vector *vec);
1433int frame_vector_to_pages(struct frame_vector *vec);
1434void frame_vector_to_pfns(struct frame_vector *vec);
1435
1436static inline unsigned int frame_vector_count(struct frame_vector *vec)
1437{
1438 return vec->nr_frames;
1439}
1440
1441static inline struct page **frame_vector_pages(struct frame_vector *vec)
1442{
1443 if (vec->is_pfns) {
1444 int err = frame_vector_to_pages(vec);
1445
1446 if (err)
1447 return ERR_PTR(err);
1448 }
1449 return (struct page **)(vec->ptrs);
1450}
1451
1452static inline unsigned long *frame_vector_pfns(struct frame_vector *vec)
1453{
1454 if (!vec->is_pfns)
1455 frame_vector_to_pfns(vec);
1456 return (unsigned long *)(vec->ptrs);
1457}
1458
1459struct kvec;
1460int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
1461 struct page **pages);
1462int get_kernel_page(unsigned long start, int write, struct page **pages);
1463struct page *get_dump_page(unsigned long addr);
1464
1465extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
1466extern void do_invalidatepage(struct page *page, unsigned int offset,
1467 unsigned int length);
1468
1469void __set_page_dirty(struct page *, struct address_space *, int warn);
1470int __set_page_dirty_nobuffers(struct page *page);
1471int __set_page_dirty_no_writeback(struct page *page);
1472int redirty_page_for_writepage(struct writeback_control *wbc,
1473 struct page *page);
1474void account_page_dirtied(struct page *page, struct address_space *mapping);
1475void account_page_cleaned(struct page *page, struct address_space *mapping,
1476 struct bdi_writeback *wb);
1477int set_page_dirty(struct page *page);
1478int set_page_dirty_lock(struct page *page);
1479void __cancel_dirty_page(struct page *page);
1480static inline void cancel_dirty_page(struct page *page)
1481{
1482
1483 if (PageDirty(page))
1484 __cancel_dirty_page(page);
1485}
1486int clear_page_dirty_for_io(struct page *page);
1487
1488int get_cmdline(struct task_struct *task, char *buffer, int buflen);
1489
1490static inline bool vma_is_anonymous(struct vm_area_struct *vma)
1491{
1492 return !vma->vm_ops;
1493}
1494
1495#ifdef CONFIG_SHMEM
1496
1497
1498
1499
1500bool vma_is_shmem(struct vm_area_struct *vma);
1501#else
1502static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
1503#endif
1504
1505int vma_is_stack_for_current(struct vm_area_struct *vma);
1506
1507extern unsigned long move_page_tables(struct vm_area_struct *vma,
1508 unsigned long old_addr, struct vm_area_struct *new_vma,
1509 unsigned long new_addr, unsigned long len,
1510 bool need_rmap_locks);
1511extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
1512 unsigned long end, pgprot_t newprot,
1513 int dirty_accountable, int prot_numa);
1514extern int mprotect_fixup(struct vm_area_struct *vma,
1515 struct vm_area_struct **pprev, unsigned long start,
1516 unsigned long end, unsigned long newflags);
1517
1518
1519
1520
1521int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1522 struct page **pages);
1523
1524
1525
1526static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
1527{
1528 long val = atomic_long_read(&mm->rss_stat.count[member]);
1529
1530#ifdef SPLIT_RSS_COUNTING
1531
1532
1533
1534
1535 if (val < 0)
1536 val = 0;
1537#endif
1538 return (unsigned long)val;
1539}
1540
1541static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1542{
1543 atomic_long_add(value, &mm->rss_stat.count[member]);
1544}
1545
1546static inline void inc_mm_counter(struct mm_struct *mm, int member)
1547{
1548 atomic_long_inc(&mm->rss_stat.count[member]);
1549}
1550
1551static inline void dec_mm_counter(struct mm_struct *mm, int member)
1552{
1553 atomic_long_dec(&mm->rss_stat.count[member]);
1554}
1555
1556
1557static inline int mm_counter_file(struct page *page)
1558{
1559 if (PageSwapBacked(page))
1560 return MM_SHMEMPAGES;
1561 return MM_FILEPAGES;
1562}
1563
1564static inline int mm_counter(struct page *page)
1565{
1566 if (PageAnon(page))
1567 return MM_ANONPAGES;
1568 return mm_counter_file(page);
1569}
1570
1571static inline unsigned long get_mm_rss(struct mm_struct *mm)
1572{
1573 return get_mm_counter(mm, MM_FILEPAGES) +
1574 get_mm_counter(mm, MM_ANONPAGES) +
1575 get_mm_counter(mm, MM_SHMEMPAGES);
1576}
1577
1578static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
1579{
1580 return max(mm->hiwater_rss, get_mm_rss(mm));
1581}
1582
1583static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
1584{
1585 return max(mm->hiwater_vm, mm->total_vm);
1586}
1587
1588static inline void update_hiwater_rss(struct mm_struct *mm)
1589{
1590 unsigned long _rss = get_mm_rss(mm);
1591
1592 if ((mm)->hiwater_rss < _rss)
1593 (mm)->hiwater_rss = _rss;
1594}
1595
1596static inline void update_hiwater_vm(struct mm_struct *mm)
1597{
1598 if (mm->hiwater_vm < mm->total_vm)
1599 mm->hiwater_vm = mm->total_vm;
1600}
1601
1602static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
1603{
1604 mm->hiwater_rss = get_mm_rss(mm);
1605}
1606
1607static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
1608 struct mm_struct *mm)
1609{
1610 unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
1611
1612 if (*maxrss < hiwater_rss)
1613 *maxrss = hiwater_rss;
1614}
1615
1616#if defined(SPLIT_RSS_COUNTING)
1617void sync_mm_rss(struct mm_struct *mm);
1618#else
1619static inline void sync_mm_rss(struct mm_struct *mm)
1620{
1621}
1622#endif
1623
1624#ifndef __HAVE_ARCH_PTE_DEVMAP
1625static inline int pte_devmap(pte_t pte)
1626{
1627 return 0;
1628}
1629#endif
1630
1631int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
1632
1633extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1634 spinlock_t **ptl);
1635static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
1636 spinlock_t **ptl)
1637{
1638 pte_t *ptep;
1639 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
1640 return ptep;
1641}
1642
1643#ifdef __PAGETABLE_P4D_FOLDED
1644static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
1645 unsigned long address)
1646{
1647 return 0;
1648}
1649#else
1650int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
1651#endif
1652
1653#if defined(__PAGETABLE_PUD_FOLDED) || !defined(CONFIG_MMU)
1654static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d,
1655 unsigned long address)
1656{
1657 return 0;
1658}
1659static inline void mm_inc_nr_puds(struct mm_struct *mm) {}
1660static inline void mm_dec_nr_puds(struct mm_struct *mm) {}
1661
1662#else
1663int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
1664
1665static inline void mm_inc_nr_puds(struct mm_struct *mm)
1666{
1667 atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
1668}
1669
1670static inline void mm_dec_nr_puds(struct mm_struct *mm)
1671{
1672 atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
1673}
1674#endif
1675
1676#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
1677static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
1678 unsigned long address)
1679{
1680 return 0;
1681}
1682
1683static inline void mm_inc_nr_pmds(struct mm_struct *mm) {}
1684static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
1685
1686#else
1687int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
1688
1689static inline void mm_inc_nr_pmds(struct mm_struct *mm)
1690{
1691 atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
1692}
1693
1694static inline void mm_dec_nr_pmds(struct mm_struct *mm)
1695{
1696 atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
1697}
1698#endif
1699
1700#ifdef CONFIG_MMU
1701static inline void mm_pgtables_bytes_init(struct mm_struct *mm)
1702{
1703 atomic_long_set(&mm->pgtables_bytes, 0);
1704}
1705
1706static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
1707{
1708 return atomic_long_read(&mm->pgtables_bytes);
1709}
1710
1711static inline void mm_inc_nr_ptes(struct mm_struct *mm)
1712{
1713 atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
1714}
1715
1716static inline void mm_dec_nr_ptes(struct mm_struct *mm)
1717{
1718 atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
1719}
1720#else
1721
1722static inline void mm_pgtables_bytes_init(struct mm_struct *mm) {}
1723static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
1724{
1725 return 0;
1726}
1727
1728static inline void mm_inc_nr_ptes(struct mm_struct *mm) {}
1729static inline void mm_dec_nr_ptes(struct mm_struct *mm) {}
1730#endif
1731
1732int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address);
1733int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
1734
1735
1736
1737
1738
1739#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
1740
1741#ifndef __ARCH_HAS_5LEVEL_HACK
1742static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
1743 unsigned long address)
1744{
1745 return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ?
1746 NULL : p4d_offset(pgd, address);
1747}
1748
1749static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
1750 unsigned long address)
1751{
1752 return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ?
1753 NULL : pud_offset(p4d, address);
1754}
1755#endif
1756
1757static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
1758{
1759 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
1760 NULL: pmd_offset(pud, address);
1761}
1762#endif
1763
1764#if USE_SPLIT_PTE_PTLOCKS
1765#if ALLOC_SPLIT_PTLOCKS
1766void __init ptlock_cache_init(void);
1767extern bool ptlock_alloc(struct page *page);
1768extern void ptlock_free(struct page *page);
1769
1770static inline spinlock_t *ptlock_ptr(struct page *page)
1771{
1772 return page->ptl;
1773}
1774#else
1775static inline void ptlock_cache_init(void)
1776{
1777}
1778
1779static inline bool ptlock_alloc(struct page *page)
1780{
1781 return true;
1782}
1783
1784static inline void ptlock_free(struct page *page)
1785{
1786}
1787
1788static inline spinlock_t *ptlock_ptr(struct page *page)
1789{
1790 return &page->ptl;
1791}
1792#endif
1793
1794static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1795{
1796 return ptlock_ptr(pmd_page(*pmd));
1797}
1798
1799static inline bool ptlock_init(struct page *page)
1800{
1801
1802
1803
1804
1805
1806
1807
1808 VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
1809 if (!ptlock_alloc(page))
1810 return false;
1811 spin_lock_init(ptlock_ptr(page));
1812 return true;
1813}
1814
1815
1816static inline void pte_lock_deinit(struct page *page)
1817{
1818 page->mapping = NULL;
1819 ptlock_free(page);
1820}
1821
1822#else
1823
1824
1825
1826static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1827{
1828 return &mm->page_table_lock;
1829}
1830static inline void ptlock_cache_init(void) {}
1831static inline bool ptlock_init(struct page *page) { return true; }
1832static inline void pte_lock_deinit(struct page *page) {}
1833#endif
1834
1835static inline void pgtable_init(void)
1836{
1837 ptlock_cache_init();
1838 pgtable_cache_init();
1839}
1840
1841static inline bool pgtable_page_ctor(struct page *page)
1842{
1843 if (!ptlock_init(page))
1844 return false;
1845 inc_zone_page_state(page, NR_PAGETABLE);
1846 return true;
1847}
1848
1849static inline void pgtable_page_dtor(struct page *page)
1850{
1851 pte_lock_deinit(page);
1852 dec_zone_page_state(page, NR_PAGETABLE);
1853}
1854
1855#define pte_offset_map_lock(mm, pmd, address, ptlp) \
1856({ \
1857 spinlock_t *__ptl = pte_lockptr(mm, pmd); \
1858 pte_t *__pte = pte_offset_map(pmd, address); \
1859 *(ptlp) = __ptl; \
1860 spin_lock(__ptl); \
1861 __pte; \
1862})
1863
1864#define pte_unmap_unlock(pte, ptl) do { \
1865 spin_unlock(ptl); \
1866 pte_unmap(pte); \
1867} while (0)
1868
1869#define pte_alloc(mm, pmd, address) \
1870 (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd, address))
1871
1872#define pte_alloc_map(mm, pmd, address) \
1873 (pte_alloc(mm, pmd, address) ? NULL : pte_offset_map(pmd, address))
1874
1875#define pte_alloc_map_lock(mm, pmd, address, ptlp) \
1876 (pte_alloc(mm, pmd, address) ? \
1877 NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
1878
1879#define pte_alloc_kernel(pmd, address) \
1880 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
1881 NULL: pte_offset_kernel(pmd, address))
1882
1883#if USE_SPLIT_PMD_PTLOCKS
1884
1885static struct page *pmd_to_page(pmd_t *pmd)
1886{
1887 unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
1888 return virt_to_page((void *)((unsigned long) pmd & mask));
1889}
1890
1891static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
1892{
1893 return ptlock_ptr(pmd_to_page(pmd));
1894}
1895
1896static inline bool pgtable_pmd_page_ctor(struct page *page)
1897{
1898#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1899 page->pmd_huge_pte = NULL;
1900#endif
1901 return ptlock_init(page);
1902}
1903
1904static inline void pgtable_pmd_page_dtor(struct page *page)
1905{
1906#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1907 VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
1908#endif
1909 ptlock_free(page);
1910}
1911
1912#define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte)
1913
1914#else
1915
1916static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
1917{
1918 return &mm->page_table_lock;
1919}
1920
1921static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; }
1922static inline void pgtable_pmd_page_dtor(struct page *page) {}
1923
1924#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
1925
1926#endif
1927
1928static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
1929{
1930 spinlock_t *ptl = pmd_lockptr(mm, pmd);
1931 spin_lock(ptl);
1932 return ptl;
1933}
1934
1935
1936
1937
1938
1939
1940
1941static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud)
1942{
1943 return &mm->page_table_lock;
1944}
1945
1946static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
1947{
1948 spinlock_t *ptl = pud_lockptr(mm, pud);
1949
1950 spin_lock(ptl);
1951 return ptl;
1952}
1953
1954extern void __init pagecache_init(void);
1955extern void free_area_init(unsigned long * zones_size);
1956extern void free_area_init_node(int nid, unsigned long * zones_size,
1957 unsigned long zone_start_pfn, unsigned long *zholes_size);
1958extern void free_initmem(void);
1959
1960
1961
1962
1963
1964
1965
1966extern unsigned long free_reserved_area(void *start, void *end,
1967 int poison, char *s);
1968
1969#ifdef CONFIG_HIGHMEM
1970
1971
1972
1973
1974extern void free_highmem_page(struct page *page);
1975#endif
1976
1977extern void adjust_managed_page_count(struct page *page, long count);
1978extern void mem_init_print_info(const char *str);
1979
1980extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end);
1981
1982
1983static inline void __free_reserved_page(struct page *page)
1984{
1985 ClearPageReserved(page);
1986 init_page_count(page);
1987 __free_page(page);
1988}
1989
1990static inline void free_reserved_page(struct page *page)
1991{
1992 __free_reserved_page(page);
1993 adjust_managed_page_count(page, 1);
1994}
1995
1996static inline void mark_page_reserved(struct page *page)
1997{
1998 SetPageReserved(page);
1999 adjust_managed_page_count(page, -1);
2000}
2001
2002
2003
2004
2005
2006
2007
2008static inline unsigned long free_initmem_default(int poison)
2009{
2010 extern char __init_begin[], __init_end[];
2011
2012 return free_reserved_area(&__init_begin, &__init_end,
2013 poison, "unused kernel");
2014}
2015
2016static inline unsigned long get_num_physpages(void)
2017{
2018 int nid;
2019 unsigned long phys_pages = 0;
2020
2021 for_each_online_node(nid)
2022 phys_pages += node_present_pages(nid);
2023
2024 return phys_pages;
2025}
2026
2027#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054extern void free_area_init_nodes(unsigned long *max_zone_pfn);
2055unsigned long node_map_pfn_alignment(void);
2056unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
2057 unsigned long end_pfn);
2058extern unsigned long absent_pages_in_range(unsigned long start_pfn,
2059 unsigned long end_pfn);
2060extern void get_pfn_range_for_nid(unsigned int nid,
2061 unsigned long *start_pfn, unsigned long *end_pfn);
2062extern unsigned long find_min_pfn_with_active_regions(void);
2063extern void free_bootmem_with_active_regions(int nid,
2064 unsigned long max_low_pfn);
2065extern void sparse_memory_present_with_active_regions(int nid);
2066
2067#endif
2068
2069#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
2070 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
2071static inline int __early_pfn_to_nid(unsigned long pfn,
2072 struct mminit_pfnnid_cache *state)
2073{
2074 return 0;
2075}
2076#else
2077
2078extern int __meminit early_pfn_to_nid(unsigned long pfn);
2079
2080extern int __meminit __early_pfn_to_nid(unsigned long pfn,
2081 struct mminit_pfnnid_cache *state);
2082#endif
2083
2084#ifdef CONFIG_HAVE_MEMBLOCK
2085void zero_resv_unavail(void);
2086#else
2087static inline void zero_resv_unavail(void) {}
2088#endif
2089
2090extern void set_dma_reserve(unsigned long new_dma_reserve);
2091extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long,
2092 enum memmap_context, struct vmem_altmap *);
2093extern void setup_per_zone_wmarks(void);
2094extern int __meminit init_per_zone_wmark_min(void);
2095extern void mem_init(void);
2096extern void __init mmap_init(void);
2097extern void show_mem(unsigned int flags, nodemask_t *nodemask);
2098extern long si_mem_available(void);
2099extern void si_meminfo(struct sysinfo * val);
2100extern void si_meminfo_node(struct sysinfo *val, int nid);
2101#ifdef __HAVE_ARCH_RESERVED_KERNEL_PAGES
2102extern unsigned long arch_reserved_kernel_pages(void);
2103#endif
2104
2105extern __printf(3, 4)
2106void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);
2107
2108extern void setup_per_cpu_pageset(void);
2109
2110extern void zone_pcp_update(struct zone *zone);
2111extern void zone_pcp_reset(struct zone *zone);
2112
2113
2114extern int min_free_kbytes;
2115extern int watermark_scale_factor;
2116
2117
2118extern atomic_long_t mmap_pages_allocated;
2119extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
2120
2121
2122void vma_interval_tree_insert(struct vm_area_struct *node,
2123 struct rb_root_cached *root);
2124void vma_interval_tree_insert_after(struct vm_area_struct *node,
2125 struct vm_area_struct *prev,
2126 struct rb_root_cached *root);
2127void vma_interval_tree_remove(struct vm_area_struct *node,
2128 struct rb_root_cached *root);
2129struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root,
2130 unsigned long start, unsigned long last);
2131struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
2132 unsigned long start, unsigned long last);
2133
2134#define vma_interval_tree_foreach(vma, root, start, last) \
2135 for (vma = vma_interval_tree_iter_first(root, start, last); \
2136 vma; vma = vma_interval_tree_iter_next(vma, start, last))
2137
2138void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
2139 struct rb_root_cached *root);
2140void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
2141 struct rb_root_cached *root);
2142struct anon_vma_chain *
2143anon_vma_interval_tree_iter_first(struct rb_root_cached *root,
2144 unsigned long start, unsigned long last);
2145struct anon_vma_chain *anon_vma_interval_tree_iter_next(
2146 struct anon_vma_chain *node, unsigned long start, unsigned long last);
2147#ifdef CONFIG_DEBUG_VM_RB
2148void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
2149#endif
2150
2151#define anon_vma_interval_tree_foreach(avc, root, start, last) \
2152 for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
2153 avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
2154
2155
2156extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
2157extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
2158 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
2159 struct vm_area_struct *expand);
2160static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
2161 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
2162{
2163 return __vma_adjust(vma, start, end, pgoff, insert, NULL);
2164}
2165extern struct vm_area_struct *vma_merge(struct mm_struct *,
2166 struct vm_area_struct *prev, unsigned long addr, unsigned long end,
2167 unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
2168 struct mempolicy *, struct vm_userfaultfd_ctx);
2169extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
2170extern int __split_vma(struct mm_struct *, struct vm_area_struct *,
2171 unsigned long addr, int new_below);
2172extern int split_vma(struct mm_struct *, struct vm_area_struct *,
2173 unsigned long addr, int new_below);
2174extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
2175extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
2176 struct rb_node **, struct rb_node *);
2177extern void unlink_file_vma(struct vm_area_struct *);
2178extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
2179 unsigned long addr, unsigned long len, pgoff_t pgoff,
2180 bool *need_rmap_locks);
2181extern void exit_mmap(struct mm_struct *);
2182
2183static inline int check_data_rlimit(unsigned long rlim,
2184 unsigned long new,
2185 unsigned long start,
2186 unsigned long end_data,
2187 unsigned long start_data)
2188{
2189 if (rlim < RLIM_INFINITY) {
2190 if (((new - start) + (end_data - start_data)) > rlim)
2191 return -ENOSPC;
2192 }
2193
2194 return 0;
2195}
2196
2197extern int mm_take_all_locks(struct mm_struct *mm);
2198extern void mm_drop_all_locks(struct mm_struct *mm);
2199
2200extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
2201extern struct file *get_mm_exe_file(struct mm_struct *mm);
2202extern struct file *get_task_exe_file(struct task_struct *task);
2203
2204extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
2205extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
2206
2207extern bool vma_is_special_mapping(const struct vm_area_struct *vma,
2208 const struct vm_special_mapping *sm);
2209extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
2210 unsigned long addr, unsigned long len,
2211 unsigned long flags,
2212 const struct vm_special_mapping *spec);
2213
2214extern int install_special_mapping(struct mm_struct *mm,
2215 unsigned long addr, unsigned long len,
2216 unsigned long flags, struct page **pages);
2217
2218extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
2219
2220extern unsigned long mmap_region(struct file *file, unsigned long addr,
2221 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
2222 struct list_head *uf);
2223extern unsigned long do_mmap(struct file *file, unsigned long addr,
2224 unsigned long len, unsigned long prot, unsigned long flags,
2225 vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
2226 struct list_head *uf);
2227extern int do_munmap(struct mm_struct *, unsigned long, size_t,
2228 struct list_head *uf);
2229
2230static inline unsigned long
2231do_mmap_pgoff(struct file *file, unsigned long addr,
2232 unsigned long len, unsigned long prot, unsigned long flags,
2233 unsigned long pgoff, unsigned long *populate,
2234 struct list_head *uf)
2235{
2236 return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate, uf);
2237}
2238
2239#ifdef CONFIG_MMU
2240extern int __mm_populate(unsigned long addr, unsigned long len,
2241 int ignore_errors);
2242static inline void mm_populate(unsigned long addr, unsigned long len)
2243{
2244
2245 (void) __mm_populate(addr, len, 1);
2246}
2247#else
2248static inline void mm_populate(unsigned long addr, unsigned long len) {}
2249#endif
2250
2251
2252extern int __must_check vm_brk(unsigned long, unsigned long);
2253extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long);
2254extern int vm_munmap(unsigned long, size_t);
2255extern unsigned long __must_check vm_mmap(struct file *, unsigned long,
2256 unsigned long, unsigned long,
2257 unsigned long, unsigned long);
2258
2259struct vm_unmapped_area_info {
2260#define VM_UNMAPPED_AREA_TOPDOWN 1
2261 unsigned long flags;
2262 unsigned long length;
2263 unsigned long low_limit;
2264 unsigned long high_limit;
2265 unsigned long align_mask;
2266 unsigned long align_offset;
2267};
2268
2269extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
2270extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281static inline unsigned long
2282vm_unmapped_area(struct vm_unmapped_area_info *info)
2283{
2284 if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
2285 return unmapped_area_topdown(info);
2286 else
2287 return unmapped_area(info);
2288}
2289
2290
2291extern void truncate_inode_pages(struct address_space *, loff_t);
2292extern void truncate_inode_pages_range(struct address_space *,
2293 loff_t lstart, loff_t lend);
2294extern void truncate_inode_pages_final(struct address_space *);
2295
2296
2297extern int filemap_fault(struct vm_fault *vmf);
2298extern void filemap_map_pages(struct vm_fault *vmf,
2299 pgoff_t start_pgoff, pgoff_t end_pgoff);
2300extern int filemap_page_mkwrite(struct vm_fault *vmf);
2301
2302
2303int __must_check write_one_page(struct page *page);
2304void task_dirty_inc(struct task_struct *tsk);
2305
2306
2307#define VM_MAX_READAHEAD 128
2308#define VM_MIN_READAHEAD 16
2309
2310int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
2311 pgoff_t offset, unsigned long nr_to_read);
2312
2313void page_cache_sync_readahead(struct address_space *mapping,
2314 struct file_ra_state *ra,
2315 struct file *filp,
2316 pgoff_t offset,
2317 unsigned long size);
2318
2319void page_cache_async_readahead(struct address_space *mapping,
2320 struct file_ra_state *ra,
2321 struct file *filp,
2322 struct page *pg,
2323 pgoff_t offset,
2324 unsigned long size);
2325
2326extern unsigned long stack_guard_gap;
2327
2328extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
2329
2330
2331extern int expand_downwards(struct vm_area_struct *vma,
2332 unsigned long address);
2333#if VM_GROWSUP
2334extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
2335#else
2336 #define expand_upwards(vma, address) (0)
2337#endif
2338
2339
2340extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
2341extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
2342 struct vm_area_struct **pprev);
2343
2344
2345
2346static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
2347{
2348 struct vm_area_struct * vma = find_vma(mm,start_addr);
2349
2350 if (vma && end_addr <= vma->vm_start)
2351 vma = NULL;
2352 return vma;
2353}
2354
2355static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
2356{
2357 unsigned long vm_start = vma->vm_start;
2358
2359 if (vma->vm_flags & VM_GROWSDOWN) {
2360 vm_start -= stack_guard_gap;
2361 if (vm_start > vma->vm_start)
2362 vm_start = 0;
2363 }
2364 return vm_start;
2365}
2366
2367static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
2368{
2369 unsigned long vm_end = vma->vm_end;
2370
2371 if (vma->vm_flags & VM_GROWSUP) {
2372 vm_end += stack_guard_gap;
2373 if (vm_end < vma->vm_end)
2374 vm_end = -PAGE_SIZE;
2375 }
2376 return vm_end;
2377}
2378
2379static inline unsigned long vma_pages(struct vm_area_struct *vma)
2380{
2381 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
2382}
2383
2384
2385static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
2386 unsigned long vm_start, unsigned long vm_end)
2387{
2388 struct vm_area_struct *vma = find_vma(mm, vm_start);
2389
2390 if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
2391 vma = NULL;
2392
2393 return vma;
2394}
2395
2396#ifdef CONFIG_MMU
2397pgprot_t vm_get_page_prot(unsigned long vm_flags);
2398void vma_set_page_prot(struct vm_area_struct *vma);
2399#else
2400static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
2401{
2402 return __pgprot(0);
2403}
2404static inline void vma_set_page_prot(struct vm_area_struct *vma)
2405{
2406 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2407}
2408#endif
2409
2410#ifdef CONFIG_NUMA_BALANCING
2411unsigned long change_prot_numa(struct vm_area_struct *vma,
2412 unsigned long start, unsigned long end);
2413#endif
2414
2415struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
2416int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
2417 unsigned long pfn, unsigned long size, pgprot_t);
2418int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
2419int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2420 unsigned long pfn);
2421int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2422 unsigned long pfn, pgprot_t pgprot);
2423int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2424 pfn_t pfn);
2425int vm_insert_mixed_mkwrite(struct vm_area_struct *vma, unsigned long addr,
2426 pfn_t pfn);
2427int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
2428
2429static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
2430 unsigned long addr, struct page *page)
2431{
2432 int err = vm_insert_page(vma, addr, page);
2433
2434 if (err == -ENOMEM)
2435 return VM_FAULT_OOM;
2436 if (err < 0 && err != -EBUSY)
2437 return VM_FAULT_SIGBUS;
2438
2439 return VM_FAULT_NOPAGE;
2440}
2441
2442static inline vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma,
2443 unsigned long addr, pfn_t pfn)
2444{
2445 int err = vm_insert_mixed(vma, addr, pfn);
2446
2447 if (err == -ENOMEM)
2448 return VM_FAULT_OOM;
2449 if (err < 0 && err != -EBUSY)
2450 return VM_FAULT_SIGBUS;
2451
2452 return VM_FAULT_NOPAGE;
2453}
2454
2455static inline vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma,
2456 unsigned long addr, unsigned long pfn)
2457{
2458 int err = vm_insert_pfn(vma, addr, pfn);
2459
2460 if (err == -ENOMEM)
2461 return VM_FAULT_OOM;
2462 if (err < 0 && err != -EBUSY)
2463 return VM_FAULT_SIGBUS;
2464
2465 return VM_FAULT_NOPAGE;
2466}
2467
2468static inline vm_fault_t vmf_error(int err)
2469{
2470 if (err == -ENOMEM)
2471 return VM_FAULT_OOM;
2472 return VM_FAULT_SIGBUS;
2473}
2474
2475struct page *follow_page_mask(struct vm_area_struct *vma,
2476 unsigned long address, unsigned int foll_flags,
2477 unsigned int *page_mask);
2478
2479static inline struct page *follow_page(struct vm_area_struct *vma,
2480 unsigned long address, unsigned int foll_flags)
2481{
2482 unsigned int unused_page_mask;
2483 return follow_page_mask(vma, address, foll_flags, &unused_page_mask);
2484}
2485
2486#define FOLL_WRITE 0x01
2487#define FOLL_TOUCH 0x02
2488#define FOLL_GET 0x04
2489#define FOLL_DUMP 0x08
2490#define FOLL_FORCE 0x10
2491#define FOLL_NOWAIT 0x20
2492
2493#define FOLL_POPULATE 0x40
2494#define FOLL_SPLIT 0x80
2495#define FOLL_HWPOISON 0x100
2496#define FOLL_NUMA 0x200
2497#define FOLL_MIGRATION 0x400
2498#define FOLL_TRIED 0x800
2499#define FOLL_MLOCK 0x1000
2500#define FOLL_REMOTE 0x2000
2501#define FOLL_COW 0x4000
2502#define FOLL_ANON 0x8000
2503
2504static inline int vm_fault_to_errno(int vm_fault, int foll_flags)
2505{
2506 if (vm_fault & VM_FAULT_OOM)
2507 return -ENOMEM;
2508 if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
2509 return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT;
2510 if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
2511 return -EFAULT;
2512 return 0;
2513}
2514
2515typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
2516 void *data);
2517extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
2518 unsigned long size, pte_fn_t fn, void *data);
2519
2520
2521#ifdef CONFIG_PAGE_POISONING
2522extern bool page_poisoning_enabled(void);
2523extern void kernel_poison_pages(struct page *page, int numpages, int enable);
2524extern bool page_is_poisoned(struct page *page);
2525#else
2526static inline bool page_poisoning_enabled(void) { return false; }
2527static inline void kernel_poison_pages(struct page *page, int numpages,
2528 int enable) { }
2529static inline bool page_is_poisoned(struct page *page) { return false; }
2530#endif
2531
2532#ifdef CONFIG_DEBUG_PAGEALLOC
2533extern bool _debug_pagealloc_enabled;
2534extern void __kernel_map_pages(struct page *page, int numpages, int enable);
2535
2536static inline bool debug_pagealloc_enabled(void)
2537{
2538 return _debug_pagealloc_enabled;
2539}
2540
2541static inline void
2542kernel_map_pages(struct page *page, int numpages, int enable)
2543{
2544 if (!debug_pagealloc_enabled())
2545 return;
2546
2547 __kernel_map_pages(page, numpages, enable);
2548}
2549#ifdef CONFIG_HIBERNATION
2550extern bool kernel_page_present(struct page *page);
2551#endif
2552#else
2553static inline void
2554kernel_map_pages(struct page *page, int numpages, int enable) {}
2555#ifdef CONFIG_HIBERNATION
2556static inline bool kernel_page_present(struct page *page) { return true; }
2557#endif
2558static inline bool debug_pagealloc_enabled(void)
2559{
2560 return false;
2561}
2562#endif
2563
2564#ifdef __HAVE_ARCH_GATE_AREA
2565extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
2566extern int in_gate_area_no_mm(unsigned long addr);
2567extern int in_gate_area(struct mm_struct *mm, unsigned long addr);
2568#else
2569static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
2570{
2571 return NULL;
2572}
2573static inline int in_gate_area_no_mm(unsigned long addr) { return 0; }
2574static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
2575{
2576 return 0;
2577}
2578#endif
2579
2580extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);
2581
2582#ifdef CONFIG_SYSCTL
2583extern int sysctl_drop_caches;
2584int drop_caches_sysctl_handler(struct ctl_table *, int,
2585 void __user *, size_t *, loff_t *);
2586#endif
2587
2588void drop_slab(void);
2589void drop_slab_node(int nid);
2590
2591#ifndef CONFIG_MMU
2592#define randomize_va_space 0
2593#else
2594extern int randomize_va_space;
2595#endif
2596
2597const char * arch_vma_name(struct vm_area_struct *vma);
2598void print_vma_addr(char *prefix, unsigned long rip);
2599
2600void sparse_mem_maps_populate_node(struct page **map_map,
2601 unsigned long pnum_begin,
2602 unsigned long pnum_end,
2603 unsigned long map_count,
2604 int nodeid);
2605
2606struct page *sparse_mem_map_populate(unsigned long pnum, int nid,
2607 struct vmem_altmap *altmap);
2608pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
2609p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
2610pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
2611pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
2612pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
2613void *vmemmap_alloc_block(unsigned long size, int node);
2614struct vmem_altmap;
2615void *vmemmap_alloc_block_buf(unsigned long size, int node);
2616void *altmap_alloc_block_buf(unsigned long size, struct vmem_altmap *altmap);
2617void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
2618int vmemmap_populate_basepages(unsigned long start, unsigned long end,
2619 int node);
2620int vmemmap_populate(unsigned long start, unsigned long end, int node,
2621 struct vmem_altmap *altmap);
2622void vmemmap_populate_print_last(void);
2623#ifdef CONFIG_MEMORY_HOTPLUG
2624void vmemmap_free(unsigned long start, unsigned long end,
2625 struct vmem_altmap *altmap);
2626#endif
2627void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
2628 unsigned long nr_pages);
2629
2630enum mf_flags {
2631 MF_COUNT_INCREASED = 1 << 0,
2632 MF_ACTION_REQUIRED = 1 << 1,
2633 MF_MUST_KILL = 1 << 2,
2634 MF_SOFT_OFFLINE = 1 << 3,
2635};
2636extern int memory_failure(unsigned long pfn, int flags);
2637extern void memory_failure_queue(unsigned long pfn, int flags);
2638extern int unpoison_memory(unsigned long pfn);
2639extern int get_hwpoison_page(struct page *page);
2640#define put_hwpoison_page(page) put_page(page)
2641extern int sysctl_memory_failure_early_kill;
2642extern int sysctl_memory_failure_recovery;
2643extern void shake_page(struct page *p, int access);
2644extern atomic_long_t num_poisoned_pages __read_mostly;
2645extern int soft_offline_page(struct page *page, int flags);
2646
2647
2648
2649
2650
2651enum mf_result {
2652 MF_IGNORED,
2653 MF_FAILED,
2654 MF_DELAYED,
2655 MF_RECOVERED,
2656};
2657
2658enum mf_action_page_type {
2659 MF_MSG_KERNEL,
2660 MF_MSG_KERNEL_HIGH_ORDER,
2661 MF_MSG_SLAB,
2662 MF_MSG_DIFFERENT_COMPOUND,
2663 MF_MSG_POISONED_HUGE,
2664 MF_MSG_HUGE,
2665 MF_MSG_FREE_HUGE,
2666 MF_MSG_NON_PMD_HUGE,
2667 MF_MSG_UNMAP_FAILED,
2668 MF_MSG_DIRTY_SWAPCACHE,
2669 MF_MSG_CLEAN_SWAPCACHE,
2670 MF_MSG_DIRTY_MLOCKED_LRU,
2671 MF_MSG_CLEAN_MLOCKED_LRU,
2672 MF_MSG_DIRTY_UNEVICTABLE_LRU,
2673 MF_MSG_CLEAN_UNEVICTABLE_LRU,
2674 MF_MSG_DIRTY_LRU,
2675 MF_MSG_CLEAN_LRU,
2676 MF_MSG_TRUNCATED_LRU,
2677 MF_MSG_BUDDY,
2678 MF_MSG_BUDDY_2ND,
2679 MF_MSG_UNKNOWN,
2680};
2681
2682#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
2683extern void clear_huge_page(struct page *page,
2684 unsigned long addr_hint,
2685 unsigned int pages_per_huge_page);
2686extern void copy_user_huge_page(struct page *dst, struct page *src,
2687 unsigned long addr, struct vm_area_struct *vma,
2688 unsigned int pages_per_huge_page);
2689extern long copy_huge_page_from_user(struct page *dst_page,
2690 const void __user *usr_src,
2691 unsigned int pages_per_huge_page,
2692 bool allow_pagefault);
2693#endif
2694
2695extern struct page_ext_operations debug_guardpage_ops;
2696
2697#ifdef CONFIG_DEBUG_PAGEALLOC
2698extern unsigned int _debug_guardpage_minorder;
2699extern bool _debug_guardpage_enabled;
2700
2701static inline unsigned int debug_guardpage_minorder(void)
2702{
2703 return _debug_guardpage_minorder;
2704}
2705
2706static inline bool debug_guardpage_enabled(void)
2707{
2708 return _debug_guardpage_enabled;
2709}
2710
2711static inline bool page_is_guard(struct page *page)
2712{
2713 struct page_ext *page_ext;
2714
2715 if (!debug_guardpage_enabled())
2716 return false;
2717
2718 page_ext = lookup_page_ext(page);
2719 if (unlikely(!page_ext))
2720 return false;
2721
2722 return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
2723}
2724#else
2725static inline unsigned int debug_guardpage_minorder(void) { return 0; }
2726static inline bool debug_guardpage_enabled(void) { return false; }
2727static inline bool page_is_guard(struct page *page) { return false; }
2728#endif
2729
2730#if MAX_NUMNODES > 1
2731void __init setup_nr_node_ids(void);
2732#else
2733static inline void setup_nr_node_ids(void) {}
2734#endif
2735
2736#endif
2737#endif
2738