1#ifndef _LINUX_MM_H
2#define _LINUX_MM_H
3
4#include <linux/errno.h>
5
6#ifdef __KERNEL__
7
8#include <linux/mmdebug.h>
9#include <linux/gfp.h>
10#include <linux/bug.h>
11#include <linux/list.h>
12#include <linux/mmzone.h>
13#include <linux/rbtree.h>
14#include <linux/atomic.h>
15#include <linux/debug_locks.h>
16#include <linux/mm_types.h>
17#include <linux/range.h>
18#include <linux/pfn.h>
19#include <linux/percpu-refcount.h>
20#include <linux/bit_spinlock.h>
21#include <linux/shrinker.h>
22#include <linux/resource.h>
23#include <linux/err.h>
24#include <linux/page_ref.h>
25#include <linux/page_ext.h>
26
27struct mempolicy;
28struct anon_vma;
29struct anon_vma_chain;
30struct file_ra_state;
31struct user_struct;
32struct writeback_control;
33
34#ifndef CONFIG_DISCONTIGMEM
35extern unsigned long max_mapnr;
36#endif
37
38extern unsigned long num_physpages;
39extern unsigned long totalram_pages;
40extern unsigned long totalcma_pages;
41extern void * high_memory;
42extern int page_cluster;
43
44#ifdef CONFIG_SYSCTL
45extern int sysctl_legacy_va_layout;
46#else
47#define sysctl_legacy_va_layout 0
48#endif
49
50#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
51extern const int mmap_rnd_bits_min;
52extern const int mmap_rnd_bits_max;
53extern int mmap_rnd_bits __read_mostly;
54#endif
55#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
56extern const int mmap_rnd_compat_bits_min;
57extern const int mmap_rnd_compat_bits_max;
58extern int mmap_rnd_compat_bits __read_mostly;
59#endif
60
61#include <asm/page.h>
62#include <asm/pgtable.h>
63#include <asm/processor.h>
64
65#ifndef __pa_symbol
66#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
67#endif
68
69extern unsigned long sysctl_user_reserve_kbytes;
70extern unsigned long sysctl_admin_reserve_kbytes;
71
72extern int sysctl_overcommit_memory;
73extern int sysctl_overcommit_ratio;
74extern unsigned long sysctl_overcommit_kbytes;
75
76extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *,
77 size_t *, loff_t *);
78extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
79 size_t *, loff_t *);
80
81#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
82
83
84#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
85
86
87#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)addr, PAGE_SIZE)
88
89
90
91
92
93
94
95
96
97
98extern struct kmem_cache *vm_area_cachep;
99
100#ifndef CONFIG_MMU
101extern struct rb_root nommu_region_tree;
102extern struct rw_semaphore nommu_region_sem;
103
104extern unsigned int kobjsize(const void *objp);
105#endif
106
107
108
109
110#define VM_NONE 0x00000000
111
112#define VM_READ 0x00000001
113#define VM_WRITE 0x00000002
114#define VM_EXEC 0x00000004
115#define VM_SHARED 0x00000008
116
117
118#define VM_MAYREAD 0x00000010
119#define VM_MAYWRITE 0x00000020
120#define VM_MAYEXEC 0x00000040
121#define VM_MAYSHARE 0x00000080
122
123#define VM_GROWSDOWN 0x00000100
124#define VM_UFFD_MISSING 0x00000200
125#define VM_PFNMAP 0x00000400
126#define VM_DENYWRITE 0x00000800
127#define VM_UFFD_WP 0x00001000
128
129#define VM_LOCKED 0x00002000
130#define VM_IO 0x00004000
131
132
133#define VM_SEQ_READ 0x00008000
134#define VM_RAND_READ 0x00010000
135
136#define VM_DONTCOPY 0x00020000
137#define VM_DONTEXPAND 0x00040000
138#define VM_FOP_EXTEND 0x00080000
139
140#define VM_ACCOUNT 0x00100000
141#define VM_NORESERVE 0x00200000
142#define VM_HUGETLB 0x00400000
143#define VM_NONLINEAR 0x00800000
144#define VM_ARCH_1 0x01000000
145#define VM_ARCH_2 0x02000000
146#define VM_DONTDUMP 0x04000000
147
148#ifdef CONFIG_MEM_SOFT_DIRTY
149# define VM_SOFTDIRTY 0x08000000
150#else
151# define VM_SOFTDIRTY 0
152#endif
153
154#define VM_MIXEDMAP 0x10000000
155#define VM_HUGEPAGE 0x20000000
156#define VM_NOHUGEPAGE 0x40000000
157#define VM_MERGEABLE 0x80000000
158
159#ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS
160#define VM_HIGH_ARCH_BIT_0 32
161#define VM_HIGH_ARCH_BIT_1 33
162#define VM_HIGH_ARCH_BIT_2 34
163#define VM_HIGH_ARCH_BIT_3 35
164#define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0)
165#define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1)
166#define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2)
167#define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3)
168#endif
169
170
171
172
173#define VM_PFN_MKWRITE 0x00000001
174#define VM_HUGE_FAULT 0x00000002
175#define VM_SPLIT 0x00000004
176
177#if defined(CONFIG_X86)
178# define VM_PAT VM_ARCH_1
179#if defined (CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS)
180# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0
181# define VM_PKEY_BIT0 VM_HIGH_ARCH_0
182# define VM_PKEY_BIT1 VM_HIGH_ARCH_1
183# define VM_PKEY_BIT2 VM_HIGH_ARCH_2
184# define VM_PKEY_BIT3 VM_HIGH_ARCH_3
185#endif
186#elif defined(CONFIG_PPC)
187# define VM_SAO VM_ARCH_1
188#elif defined(CONFIG_PARISC)
189# define VM_GROWSUP VM_ARCH_1
190#elif defined(CONFIG_METAG)
191# define VM_GROWSUP VM_ARCH_1
192#elif defined(CONFIG_IA64)
193# define VM_GROWSUP VM_ARCH_1
194#elif !defined(CONFIG_MMU)
195# define VM_MAPPED_COPY VM_ARCH_1
196#endif
197
198#if defined(CONFIG_X86)
199
200# define VM_MPX VM_ARCH_2
201#endif
202
203#ifndef VM_GROWSUP
204# define VM_GROWSUP VM_NONE
205#endif
206
207
208#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ)
209
210#ifndef VM_STACK_DEFAULT_FLAGS
211#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
212#endif
213
214#ifdef CONFIG_STACK_GROWSUP
215#define VM_STACK_FLAGS (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
216#else
217#define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
218#endif
219
220#define VM_READHINTMASK (VM_SEQ_READ | VM_RAND_READ)
221#define VM_ClearReadHint(v) (v)->vm_flags &= ~VM_READHINTMASK
222#define VM_NormalReadHint(v) (!((v)->vm_flags & VM_READHINTMASK))
223#define VM_SequentialReadHint(v) ((v)->vm_flags & VM_SEQ_READ)
224#define VM_RandomReadHint(v) ((v)->vm_flags & VM_RAND_READ)
225
226
227
228
229
230#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP)
231
232
233#define VM_INIT_DEF_MASK VM_NOHUGEPAGE
234
235
236
237
238
239extern pgprot_t protection_map[16];
240
241#define FAULT_FLAG_WRITE 0x01
242#define FAULT_FLAG_NONLINEAR 0x02
243#define FAULT_FLAG_MKWRITE 0x04
244#define FAULT_FLAG_ALLOW_RETRY 0x08
245#define FAULT_FLAG_RETRY_NOWAIT 0x10
246#define FAULT_FLAG_KILLABLE 0x20
247#define FAULT_FLAG_TRIED 0x40
248#define FAULT_FLAG_USER 0x80
249#define FAULT_FLAG_REMOTE 0x100
250#define FAULT_FLAG_INSTRUCTION 0x200
251
252#define FAULT_FLAG_TRACE \
253 { FAULT_FLAG_WRITE, "WRITE" }, \
254 { FAULT_FLAG_MKWRITE, "MKWRITE" }, \
255 { FAULT_FLAG_ALLOW_RETRY, "ALLOW_RETRY" }, \
256 { FAULT_FLAG_RETRY_NOWAIT, "RETRY_NOWAIT" }, \
257 { FAULT_FLAG_KILLABLE, "KILLABLE" }, \
258 { FAULT_FLAG_TRIED, "TRIED" }, \
259 { FAULT_FLAG_USER, "USER" }, \
260 { FAULT_FLAG_REMOTE, "REMOTE" }, \
261 { FAULT_FLAG_INSTRUCTION, "INSTRUCTION" }
262
263
264
265
266
267
268
269
270
271
272
273
274struct vm_fault {
275 unsigned int flags;
276 pgoff_t pgoff;
277 void __user *virtual_address;
278
279 struct page *page;
280
281
282
283
284 RH_KABI_EXTEND(struct page *cow_page)
285 RH_KABI_EXTEND(pte_t orig_pte)
286 RH_KABI_EXTEND(pmd_t *pmd)
287
288
289 RH_KABI_EXTEND(struct vm_area_struct *vma)
290 RH_KABI_EXTEND(gfp_t gfp_mask)
291 RH_KABI_EXTEND(pte_t *pte)
292 RH_KABI_EXTEND(pud_t *pud)
293
294
295};
296
297
298enum page_entry_size {
299 PE_SIZE_PTE = 0,
300 PE_SIZE_PMD,
301 PE_SIZE_PUD,
302};
303
304
305
306
307
308
309struct vm_operations_struct {
310 void (*open)(struct vm_area_struct * area);
311 void (*close)(struct vm_area_struct * area);
312 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
313
314
315
316 int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
317
318
319
320
321 int (*access)(struct vm_area_struct *vma, unsigned long addr,
322 void *buf, int len, int write);
323#ifdef CONFIG_NUMA
324
325
326
327
328
329
330
331 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
332
333
334
335
336
337
338
339
340
341
342
343 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
344 unsigned long addr);
345 int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
346 const nodemask_t *to, unsigned long flags);
347#endif
348
349 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
350 unsigned long size, pgoff_t pgoff);
351
352
353 RH_KABI_EXTEND(int (*pfn_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf))
354 RH_KABI_EXTEND(int (*huge_fault)(struct vm_fault *vmf,
355 enum page_entry_size pe_size))
356 RH_KABI_EXTEND(int (*split)(struct vm_area_struct *area,
357 unsigned long addr))
358};
359
360struct mmu_gather;
361struct inode;
362
363#define page_private(page) ((page)->private)
364#define set_page_private(page, v) ((page)->private = (v))
365
366
367static inline void set_freepage_migratetype(struct page *page, int migratetype)
368{
369 page->index = migratetype;
370}
371
372
373static inline int get_freepage_migratetype(struct page *page)
374{
375 return page->index;
376}
377
378#if !defined(__HAVE_ARCH_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
379static inline int pmd_devmap(pmd_t pmd)
380{
381 return 0;
382}
383static inline int pud_devmap(pud_t pud)
384{
385 return 0;
386}
387#endif
388
389
390
391
392
393#include <linux/page-flags.h>
394#include <linux/huge_mm.h>
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412static inline int put_page_testzero(struct page *page)
413{
414 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
415 return page_ref_dec_and_test(page);
416}
417
418
419
420
421
422static inline int get_page_unless_zero(struct page *page)
423{
424 return page_ref_add_unless(page, 1, 0);
425}
426
427extern int page_is_ram(unsigned long pfn);
428
429enum {
430 REGION_INTERSECTS,
431 REGION_DISJOINT,
432 REGION_MIXED,
433};
434
435int region_intersects(resource_size_t offset, size_t size, const char *type,
436 unsigned long flags);
437int region_intersects_ram(resource_size_t offset, size_t size);
438int region_intersects_pmem(resource_size_t offset, size_t size);
439
440
441struct page *vmalloc_to_page(const void *addr);
442unsigned long vmalloc_to_pfn(const void *addr);
443
444
445
446
447
448
449
450static inline int is_vmalloc_addr(const void *x)
451{
452#ifdef CONFIG_MMU
453 unsigned long addr = (unsigned long)x;
454
455 return addr >= VMALLOC_START && addr < VMALLOC_END;
456#else
457 return 0;
458#endif
459}
460#ifdef CONFIG_MMU
461extern int is_vmalloc_or_module_addr(const void *x);
462#else
463static inline int is_vmalloc_or_module_addr(const void *x)
464{
465 return 0;
466}
467#endif
468
469extern void *kvmalloc_node(size_t size, gfp_t flags, int node);
470static inline void *kvmalloc(size_t size, gfp_t flags)
471{
472 return kvmalloc_node(size, flags, NUMA_NO_NODE);
473}
474static inline void *kvzalloc_node(size_t size, gfp_t flags, int node)
475{
476 return kvmalloc_node(size, flags | __GFP_ZERO, node);
477}
478static inline void *kvzalloc(size_t size, gfp_t flags)
479{
480 return kvmalloc(size, flags | __GFP_ZERO);
481}
482
483static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
484{
485 if (size != 0 && n > SIZE_MAX / size)
486 return NULL;
487
488 return kvmalloc(n * size, flags);
489}
490
491extern void kvfree(const void *addr);
492
493static inline void compound_lock(struct page *page)
494{
495#ifdef CONFIG_TRANSPARENT_HUGEPAGE
496 VM_BUG_ON_PAGE(PageSlab(page), page);
497 bit_spin_lock(PG_compound_lock, &page->flags);
498#endif
499}
500
501static inline void compound_unlock(struct page *page)
502{
503#ifdef CONFIG_TRANSPARENT_HUGEPAGE
504 VM_BUG_ON_PAGE(PageSlab(page), page);
505 bit_spin_unlock(PG_compound_lock, &page->flags);
506#endif
507}
508
509static inline unsigned long compound_lock_irqsave(struct page *page)
510{
511 unsigned long uninitialized_var(flags);
512#ifdef CONFIG_TRANSPARENT_HUGEPAGE
513 local_irq_save(flags);
514 compound_lock(page);
515#endif
516 return flags;
517}
518
519static inline void compound_unlock_irqrestore(struct page *page,
520 unsigned long flags)
521{
522#ifdef CONFIG_TRANSPARENT_HUGEPAGE
523 compound_unlock(page);
524 local_irq_restore(flags);
525#endif
526}
527
528
529
530
531
532
533static inline void page_mapcount_reset(struct page *page)
534{
535 atomic_set(&(page)->_mapcount, -1);
536}
537
538static inline int page_mapcount(struct page *page)
539{
540 return atomic_read(&(page)->_mapcount) + 1;
541}
542
543#ifdef CONFIG_HUGETLB_PAGE
544extern int PageHeadHuge(struct page *page_head);
545#else
546static inline int PageHeadHuge(struct page *page_head)
547{
548 return 0;
549}
550#endif
551
552static inline bool __compound_tail_refcounted(struct page *page)
553{
554 return !PageSlab(page) && !PageHeadHuge(page);
555}
556
557
558
559
560
561
562
563
564
565static inline bool compound_tail_refcounted(struct page *page)
566{
567 VM_BUG_ON_PAGE(!PageHead(page), page);
568 return __compound_tail_refcounted(page);
569}
570
571static inline void get_huge_page_tail(struct page *page)
572{
573
574
575
576 VM_BUG_ON_PAGE(!PageTail(page), page);
577 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
578 VM_BUG_ON_PAGE(page_ref_count(page) != 0, page);
579 if (compound_tail_refcounted(page->first_page))
580 atomic_inc(&page->_mapcount);
581}
582
583static inline struct page *virt_to_head_page(const void *x)
584{
585 struct page *page = virt_to_page(x);
586 return compound_head(page);
587}
588
589
590
591
592
593
594
595
596
597
598#define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
599
600static inline int PageBuddy(struct page *page)
601{
602 return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE;
603}
604
605static inline void __SetPageBuddy(struct page *page)
606{
607 VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
608 atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
609}
610
611static inline void __ClearPageBuddy(struct page *page)
612{
613 VM_BUG_ON_PAGE(!PageBuddy(page), page);
614 atomic_set(&page->_mapcount, -1);
615}
616
617#define PAGE_BALLOON_MAPCOUNT_VALUE (-256)
618
619static inline int PageBalloon(struct page *page)
620{
621 return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE;
622}
623
624static inline void __SetPageBalloon(struct page *page)
625{
626 VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
627 atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE);
628}
629
630static inline void __ClearPageBalloon(struct page *page)
631{
632 VM_BUG_ON_PAGE(!PageBalloon(page), page);
633 atomic_set(&page->_mapcount, -1);
634}
635
636void put_pages_list(struct list_head *pages);
637
638void split_page(struct page *page, unsigned int order);
639int split_free_page(struct page *page);
640
641
642
643
644
645
646typedef void compound_page_dtor(struct page *);
647
648static inline void set_compound_page_dtor(struct page *page,
649 compound_page_dtor *dtor)
650{
651 page[1].lru.next = (void *)dtor;
652}
653
654static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
655{
656 return (compound_page_dtor *)page[1].lru.next;
657}
658
659static inline int compound_order(struct page *page)
660{
661 if (!PageHead(page))
662 return 0;
663 return (unsigned long)page[1].lru.prev;
664}
665
666static inline void set_compound_order(struct page *page, unsigned long order)
667{
668 page[1].lru.prev = (void *)order;
669}
670
671#ifdef CONFIG_MMU
672
673
674
675
676
677
678static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
679{
680 if (likely(vma->vm_flags & VM_WRITE))
681 pte = pte_mkwrite(pte);
682 return pte;
683}
684int finish_fault(struct vm_fault *vmf);
685int finish_mkwrite_fault(struct vm_fault *vmf);
686#endif
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
755#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
756#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
757#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
758#define ZONE_DEVICE_PGOFF (LAST_CPUPID_PGOFF - ZONE_DEVICE_WIDTH)
759
760
761
762
763
764
765#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
766#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
767#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
768#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
769#define ZONE_DEVICE_PGSHIFT (ZONE_DEVICE_PGOFF * (ZONE_DEVICE_WIDTH != 0))
770
771
772#ifdef NODE_NOT_IN_PAGE_FLAGS
773#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
774#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \
775 SECTIONS_PGOFF : ZONES_PGOFF)
776#else
777#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
778#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \
779 NODES_PGOFF : ZONES_PGOFF)
780#endif
781
782#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
783
784#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
785#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
786#endif
787
788#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
789#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
790#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
791#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_WIDTH) - 1)
792#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
793
794#define ZONE_DEVICE_FLAG (1UL << ZONE_DEVICE_PGSHIFT)
795
796static inline enum zone_type page_zonenum(const struct page *page)
797{
798#ifdef CONFIG_ZONE_DEVICE
799 if (page->flags & ZONE_DEVICE_FLAG)
800 return ZONE_DEVICE;
801#endif
802 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
803}
804
805#ifdef CONFIG_ZONE_DEVICE
806void get_zone_device_page(struct page *page);
807void put_zone_device_page(struct page *page);
808static inline bool is_zone_device_page(const struct page *page)
809{
810 return page_zonenum(page) == ZONE_DEVICE;
811}
812#else
813static inline void get_zone_device_page(struct page *page)
814{
815}
816static inline void put_zone_device_page(struct page *page)
817{
818}
819static inline bool is_zone_device_page(const struct page *page)
820{
821 return false;
822}
823#endif
824
825extern bool __get_page_tail(struct page *page);
826
827static inline void get_page(struct page *page)
828{
829 if (unlikely(PageTail(page)))
830 if (likely(__get_page_tail(page)))
831 return;
832
833
834
835
836 VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page);
837
838 page_ref_inc(page);
839
840 if (unlikely(is_zone_device_page(page)))
841 get_zone_device_page(page);
842}
843
844void put_page(struct page *page);
845
846#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
847#define SECTION_IN_PAGE_FLAGS
848#endif
849
850
851
852
853
854
855
856
857
858static inline int page_zone_id(struct page *page)
859{
860 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
861}
862
863static inline int zone_to_nid(struct zone *zone)
864{
865#ifdef CONFIG_NUMA
866 return zone->node;
867#else
868 return 0;
869#endif
870}
871
872#ifdef NODE_NOT_IN_PAGE_FLAGS
873extern int page_to_nid(const struct page *page);
874#else
875static inline int page_to_nid(const struct page *page)
876{
877 return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
878}
879#endif
880
881#ifdef CONFIG_NUMA_BALANCING
882static inline int cpu_pid_to_cpupid(int cpu, int pid)
883{
884 return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
885}
886
887static inline int cpupid_to_pid(int cpupid)
888{
889 return cpupid & LAST__PID_MASK;
890}
891
892static inline int cpupid_to_cpu(int cpupid)
893{
894 return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
895}
896
897static inline int cpupid_to_nid(int cpupid)
898{
899 return cpu_to_node(cpupid_to_cpu(cpupid));
900}
901
902static inline bool cpupid_pid_unset(int cpupid)
903{
904 return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
905}
906
907static inline bool cpupid_cpu_unset(int cpupid)
908{
909 return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
910}
911
912static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
913{
914 return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
915}
916
917#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
918#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
919static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
920{
921 return xchg(&page->_last_cpupid, cpupid);
922}
923
924static inline int page_cpupid_last(struct page *page)
925{
926 return page->_last_cpupid;
927}
928static inline void page_cpupid_reset_last(struct page *page)
929{
930 page->_last_cpupid = -1;
931}
932#else
933static inline int page_cpupid_last(struct page *page)
934{
935 return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
936}
937
938extern int page_cpupid_xchg_last(struct page *page, int cpupid);
939
940static inline void page_cpupid_reset_last(struct page *page)
941{
942 int cpupid = (1 << LAST_CPUPID_SHIFT) - 1;
943
944 page->flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
945 page->flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
946}
947#endif
948#else
949static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
950{
951 return page_to_nid(page);
952}
953
954static inline int page_cpupid_last(struct page *page)
955{
956 return page_to_nid(page);
957}
958
959static inline int cpupid_to_nid(int cpupid)
960{
961 return -1;
962}
963
964static inline int cpupid_to_pid(int cpupid)
965{
966 return -1;
967}
968
969static inline int cpupid_to_cpu(int cpupid)
970{
971 return -1;
972}
973
974static inline int cpu_pid_to_cpupid(int nid, int pid)
975{
976 return -1;
977}
978
979static inline bool cpupid_pid_unset(int cpupid)
980{
981 return 1;
982}
983
984static inline void page_cpupid_reset_last(struct page *page)
985{
986}
987
988static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
989{
990 return false;
991}
992#endif
993
994static inline struct zone *page_zone(const struct page *page)
995{
996#ifdef CONFIG_ZONE_DEVICE
997 if (page->flags & ZONE_DEVICE_FLAG)
998 return NODE_DATA(page_to_nid(page))->zone_device;
999#endif
1000 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
1001}
1002
1003#ifdef SECTION_IN_PAGE_FLAGS
1004static inline void set_page_section(struct page *page, unsigned long section)
1005{
1006 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
1007 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
1008}
1009
1010static inline unsigned long page_to_section(const struct page *page)
1011{
1012 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
1013}
1014#endif
1015
1016static inline void set_page_zone(struct page *page, enum zone_type zone)
1017{
1018 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
1019#ifdef CONFIG_ZONE_DEVICE
1020 page->flags &= ~ZONE_DEVICE_FLAG;
1021 if (zone == ZONE_DEVICE)
1022 page->flags |= ZONE_DEVICE_FLAG;
1023 else
1024#endif
1025 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
1026}
1027
1028static inline void set_page_node(struct page *page, unsigned long node)
1029{
1030 page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
1031 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
1032}
1033
1034static inline void set_page_links(struct page *page, enum zone_type zone,
1035 unsigned long node, unsigned long pfn)
1036{
1037 set_page_zone(page, zone);
1038 set_page_node(page, node);
1039#ifdef SECTION_IN_PAGE_FLAGS
1040 set_page_section(page, pfn_to_section_nr(pfn));
1041#endif
1042}
1043
1044
1045
1046
1047#include <linux/vmstat.h>
1048
1049static __always_inline void *lowmem_page_address(const struct page *page)
1050{
1051 return __va(PFN_PHYS(page_to_pfn(page)));
1052}
1053
1054#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
1055#define HASHED_PAGE_VIRTUAL
1056#endif
1057
1058#if defined(WANT_PAGE_VIRTUAL)
1059#define page_address(page) ((page)->virtual)
1060#define set_page_address(page, address) \
1061 do { \
1062 (page)->virtual = (address); \
1063 } while(0)
1064#define page_address_init() do { } while(0)
1065#endif
1066
1067#if defined(HASHED_PAGE_VIRTUAL)
1068void *page_address(const struct page *page);
1069void set_page_address(struct page *page, void *virtual);
1070void page_address_init(void);
1071#endif
1072
1073#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
1074#define page_address(page) lowmem_page_address(page)
1075#define set_page_address(page, address) do { } while(0)
1076#define page_address_init() do { } while(0)
1077#endif
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095#define PAGE_MAPPING_ANON 1
1096#define PAGE_MAPPING_KSM 2
1097#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
1098
1099extern struct address_space *page_mapping(struct page *page);
1100
1101
1102static inline void *page_rmapping(struct page *page)
1103{
1104 return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
1105}
1106
1107extern struct address_space *__page_file_mapping(struct page *);
1108
1109static inline
1110struct address_space *page_file_mapping(struct page *page)
1111{
1112 if (unlikely(PageSwapCache(page)))
1113 return __page_file_mapping(page);
1114
1115 return page->mapping;
1116}
1117
1118static inline int PageAnon(struct page *page)
1119{
1120 return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
1121}
1122
1123
1124
1125
1126
1127static inline pgoff_t page_index(struct page *page)
1128{
1129 if (unlikely(PageSwapCache(page)))
1130 return page_private(page);
1131 return page->index;
1132}
1133
1134extern pgoff_t __page_file_index(struct page *page);
1135
1136
1137
1138
1139
1140static inline pgoff_t page_file_index(struct page *page)
1141{
1142 if (unlikely(PageSwapCache(page)))
1143 return __page_file_index(page);
1144
1145 return page->index;
1146}
1147
1148
1149
1150
1151static inline int page_mapped(struct page *page)
1152{
1153 return atomic_read(&(page)->_mapcount) >= 0;
1154}
1155
1156
1157
1158
1159
1160
1161static inline bool page_is_pfmemalloc(struct page *page)
1162{
1163
1164
1165
1166
1167 return page->index == -1UL;
1168}
1169
1170
1171
1172
1173
1174static inline void set_page_pfmemalloc(struct page *page)
1175{
1176 page->index = -1UL;
1177}
1178
1179static inline void clear_page_pfmemalloc(struct page *page)
1180{
1181 page->index = 0;
1182}
1183
1184
1185
1186
1187
1188
1189
1190#define VM_FAULT_MINOR 0
1191
1192#define VM_FAULT_OOM 0x0001
1193#define VM_FAULT_SIGBUS 0x0002
1194#define VM_FAULT_MAJOR 0x0004
1195#define VM_FAULT_WRITE 0x0008
1196#define VM_FAULT_HWPOISON 0x0010
1197#define VM_FAULT_HWPOISON_LARGE 0x0020
1198#define VM_FAULT_SIGSEGV 0x0040
1199
1200#define VM_FAULT_NOPAGE 0x0100
1201#define VM_FAULT_LOCKED 0x0200
1202#define VM_FAULT_RETRY 0x0400
1203#define VM_FAULT_FALLBACK 0x0800
1204#define VM_FAULT_DONE_COW 0x1000
1205
1206#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000
1207
1208#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \
1209 VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \
1210 VM_FAULT_FALLBACK)
1211
1212#define VM_FAULT_RESULT_TRACE \
1213 { VM_FAULT_OOM, "OOM" }, \
1214 { VM_FAULT_SIGBUS, "SIGBUS" }, \
1215 { VM_FAULT_MAJOR, "MAJOR" }, \
1216 { VM_FAULT_WRITE, "WRITE" }, \
1217 { VM_FAULT_HWPOISON, "HWPOISON" }, \
1218 { VM_FAULT_HWPOISON_LARGE, "HWPOISON_LARGE" }, \
1219 { VM_FAULT_SIGSEGV, "SIGSEGV" }, \
1220 { VM_FAULT_NOPAGE, "NOPAGE" }, \
1221 { VM_FAULT_LOCKED, "LOCKED" }, \
1222 { VM_FAULT_RETRY, "RETRY" }, \
1223 { VM_FAULT_FALLBACK, "FALLBACK" }, \
1224 { VM_FAULT_DONE_COW, "DONE_COW" }
1225
1226
1227#define VM_FAULT_SET_HINDEX(x) ((x) << 12)
1228#define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf)
1229
1230
1231
1232
1233extern void pagefault_out_of_memory(void);
1234
1235#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
1236
1237
1238
1239
1240
1241#define SHOW_MEM_FILTER_NODES (0x0001u)
1242#define SHOW_MEM_FILTER_PAGE_COUNT (0x0002u)
1243
1244extern void show_free_areas(unsigned int flags);
1245extern bool skip_free_areas_node(unsigned int flags, int nid);
1246
1247int shmem_zero_setup(struct vm_area_struct *);
1248#ifdef CONFIG_SHMEM
1249bool shmem_mapping(struct address_space *mapping);
1250#else
1251static inline bool shmem_mapping(struct address_space *mapping)
1252{
1253 return false;
1254}
1255#endif
1256
1257extern int can_do_mlock(void);
1258extern int user_shm_lock(size_t, struct user_struct *);
1259extern void user_shm_unlock(size_t, struct user_struct *);
1260
1261
1262
1263
1264struct zap_details {
1265 struct vm_area_struct *nonlinear_vma;
1266 struct address_space *check_mapping;
1267 pgoff_t first_index;
1268 pgoff_t last_index;
1269};
1270
1271struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
1272 pte_t pte);
1273
1274int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1275 unsigned long size);
1276void zap_page_range(struct vm_area_struct *vma, unsigned long address,
1277 unsigned long size, struct zap_details *);
1278void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
1279 unsigned long start, unsigned long end);
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297struct mm_walk {
1298 int (*pgd_entry)(pgd_t *pgd, unsigned long addr,
1299 unsigned long next, struct mm_walk *walk);
1300 int (*pud_entry)(pud_t *pud, unsigned long addr,
1301 unsigned long next, struct mm_walk *walk);
1302 int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
1303 unsigned long next, struct mm_walk *walk);
1304 int (*pte_entry)(pte_t *pte, unsigned long addr,
1305 unsigned long next, struct mm_walk *walk);
1306 int (*pte_hole)(unsigned long addr, unsigned long next,
1307 struct mm_walk *walk);
1308 int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
1309 unsigned long addr, unsigned long next,
1310 struct mm_walk *walk);
1311 struct mm_struct *mm;
1312 void *private;
1313};
1314
1315int walk_page_range(unsigned long addr, unsigned long end,
1316 struct mm_walk *walk);
1317void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
1318 unsigned long end, unsigned long floor, unsigned long ceiling);
1319int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
1320 struct vm_area_struct *vma);
1321void unmap_mapping_range(struct address_space *mapping,
1322 loff_t const holebegin, loff_t const holelen, int even_cows);
1323int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
1324 pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
1325int follow_pfn(struct vm_area_struct *vma, unsigned long address,
1326 unsigned long *pfn);
1327int follow_phys(struct vm_area_struct *vma, unsigned long address,
1328 unsigned int flags, unsigned long *prot, resource_size_t *phys);
1329int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
1330 void *buf, int len, int write);
1331
1332static inline void unmap_shared_mapping_range(struct address_space *mapping,
1333 loff_t const holebegin, loff_t const holelen)
1334{
1335 unmap_mapping_range(mapping, holebegin, holelen, 0);
1336}
1337
1338extern void truncate_pagecache(struct inode *inode, loff_t new);
1339extern void truncate_setsize(struct inode *inode, loff_t newsize);
1340void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
1341void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
1342int truncate_inode_page(struct address_space *mapping, struct page *page);
1343int generic_error_remove_page(struct address_space *mapping, struct page *page);
1344int invalidate_inode_page(struct page *page);
1345
1346#ifdef CONFIG_MMU
1347extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
1348 unsigned int flags);
1349extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
1350 unsigned long address, unsigned int fault_flags);
1351#else
1352static inline int handle_mm_fault(struct vm_area_struct *vma,
1353 unsigned long address, unsigned int flags)
1354{
1355
1356 BUG();
1357 return VM_FAULT_SIGBUS;
1358}
1359static inline int fixup_user_fault(struct task_struct *tsk,
1360 struct mm_struct *mm, unsigned long address,
1361 unsigned int fault_flags)
1362{
1363
1364 BUG();
1365 return -EFAULT;
1366}
1367#endif
1368
1369extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
1370extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1371 void *buf, int len, int write);
1372
1373long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1374 unsigned long start, unsigned long nr_pages,
1375 unsigned int foll_flags, struct page **pages,
1376 struct vm_area_struct **vmas, int *nonblocking);
1377long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
1378 unsigned long start, unsigned long nr_pages,
1379 int write, int force, struct page **pages,
1380 struct vm_area_struct **vmas);
1381long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1382 unsigned long start, unsigned long nr_pages,
1383 int write, int force, struct page **pages,
1384 struct vm_area_struct **vmas);
1385long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm,
1386 unsigned long start, unsigned long nr_pages,
1387 int write, int force, struct page **pages,
1388 int *locked);
1389long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
1390 unsigned long start, unsigned long nr_pages,
1391 int write, int force, struct page **pages,
1392 unsigned int gup_flags);
1393long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
1394 unsigned long start, unsigned long nr_pages,
1395 int write, int force, struct page **pages);
1396int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1397 struct page **pages);
1398struct kvec;
1399int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
1400 struct page **pages);
1401int get_kernel_page(unsigned long start, int write, struct page **pages);
1402struct page *get_dump_page(unsigned long addr);
1403
1404extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
1405extern void do_invalidatepage(struct page *page, unsigned long offset);
1406extern void do_invalidatepage_range(struct page *page, unsigned int offset,
1407 unsigned int length);
1408
1409int __set_page_dirty_nobuffers(struct page *page);
1410int __set_page_dirty_no_writeback(struct page *page);
1411int redirty_page_for_writepage(struct writeback_control *wbc,
1412 struct page *page);
1413void account_page_dirtied(struct page *page, struct address_space *mapping);
1414void account_page_writeback(struct page *page);
1415int set_page_dirty(struct page *page);
1416int set_page_dirty_lock(struct page *page);
1417int clear_page_dirty_for_io(struct page *page);
1418int get_cmdline(struct task_struct *task, char *buffer, int buflen);
1419
1420static inline bool vma_is_anonymous(struct vm_area_struct *vma)
1421{
1422 return !vma->vm_ops;
1423}
1424
1425#ifdef CONFIG_SHMEM
1426
1427
1428
1429
1430bool vma_is_shmem(struct vm_area_struct *vma);
1431#else
1432static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
1433#endif
1434
1435int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t);
1436
1437extern unsigned long move_page_tables(struct vm_area_struct *vma,
1438 unsigned long old_addr, struct vm_area_struct *new_vma,
1439 unsigned long new_addr, unsigned long len,
1440 bool need_rmap_locks);
1441extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
1442 unsigned long end, pgprot_t newprot,
1443 int dirty_accountable, int prot_numa);
1444extern int mprotect_fixup(struct vm_area_struct *vma,
1445 struct vm_area_struct **pprev, unsigned long start,
1446 unsigned long end, unsigned long newflags);
1447
1448
1449
1450
1451int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1452 struct page **pages);
1453
1454
1455
1456static inline atomic_long_t *__get_mm_counter(struct mm_struct *mm, int member)
1457{
1458 if (member == MM_SHMEMPAGES)
1459 return &mm->mm_shmempages;
1460 else
1461 return &mm->rss_stat.count[member];
1462}
1463
1464static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
1465{
1466 long val = atomic_long_read(__get_mm_counter(mm, member));
1467
1468#ifdef SPLIT_RSS_COUNTING
1469
1470
1471
1472
1473 if (val < 0)
1474 val = 0;
1475#endif
1476 return (unsigned long)val;
1477}
1478
1479static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1480{
1481 atomic_long_add(value, __get_mm_counter(mm, member));
1482}
1483
1484static inline void inc_mm_counter(struct mm_struct *mm, int member)
1485{
1486 atomic_long_inc(__get_mm_counter(mm, member));
1487}
1488
1489static inline void dec_mm_counter(struct mm_struct *mm, int member)
1490{
1491 atomic_long_dec(__get_mm_counter(mm, member));
1492}
1493
1494
1495static inline int mm_counter_file(struct page *page)
1496{
1497 if (PageSwapBacked(page))
1498 return MM_SHMEMPAGES;
1499 return MM_FILEPAGES;
1500}
1501
1502static inline int mm_counter(struct page *page)
1503{
1504 if (PageAnon(page))
1505 return MM_ANONPAGES;
1506 return mm_counter_file(page);
1507}
1508
1509static inline unsigned long get_mm_rss(struct mm_struct *mm)
1510{
1511 return get_mm_counter(mm, MM_FILEPAGES) +
1512 get_mm_counter(mm, MM_ANONPAGES) +
1513 get_mm_counter(mm, MM_SHMEMPAGES);
1514}
1515
1516static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
1517{
1518 return max(mm->hiwater_rss, get_mm_rss(mm));
1519}
1520
1521static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
1522{
1523 return max(mm->hiwater_vm, mm->total_vm);
1524}
1525
1526static inline void update_hiwater_rss(struct mm_struct *mm)
1527{
1528 unsigned long _rss = get_mm_rss(mm);
1529
1530 if ((mm)->hiwater_rss < _rss)
1531 (mm)->hiwater_rss = _rss;
1532}
1533
1534static inline void update_hiwater_vm(struct mm_struct *mm)
1535{
1536 if (mm->hiwater_vm < mm->total_vm)
1537 mm->hiwater_vm = mm->total_vm;
1538}
1539
1540static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
1541 struct mm_struct *mm)
1542{
1543 unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
1544
1545 if (*maxrss < hiwater_rss)
1546 *maxrss = hiwater_rss;
1547}
1548
1549#if defined(SPLIT_RSS_COUNTING)
1550void sync_mm_rss(struct mm_struct *mm);
1551#else
1552static inline void sync_mm_rss(struct mm_struct *mm)
1553{
1554}
1555#endif
1556
1557#ifndef __HAVE_ARCH_PTE_DEVMAP
1558static inline int pte_devmap(pte_t pte)
1559{
1560 return 0;
1561}
1562#endif
1563
1564int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
1565
1566extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1567 spinlock_t **ptl);
1568static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
1569 spinlock_t **ptl)
1570{
1571 pte_t *ptep;
1572 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
1573 return ptep;
1574}
1575
1576#ifdef __PAGETABLE_PUD_FOLDED
1577static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
1578 unsigned long address)
1579{
1580 return 0;
1581}
1582#else
1583int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
1584#endif
1585
1586#ifdef __PAGETABLE_PMD_FOLDED
1587static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
1588 unsigned long address)
1589{
1590 return 0;
1591}
1592#else
1593int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
1594#endif
1595
1596int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
1597 pmd_t *pmd, unsigned long address);
1598int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
1599
1600
1601
1602
1603
1604#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
1605static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
1606{
1607 return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))?
1608 NULL: pud_offset(pgd, address);
1609}
1610
1611static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
1612{
1613 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
1614 NULL: pmd_offset(pud, address);
1615}
1616#endif
1617
1618#if USE_SPLIT_PTE_PTLOCKS
1619#if BLOATED_SPINLOCKS
1620void __init ptlock_cache_init(void);
1621extern bool ptlock_alloc(struct page *page);
1622extern void ptlock_free(struct page *page);
1623
1624static inline spinlock_t *ptlock_ptr(struct page *page)
1625{
1626 return page->ptl;
1627}
1628#else
1629static inline void ptlock_cache_init(void) {}
1630static inline bool ptlock_alloc(struct page *page)
1631{
1632 return true;
1633}
1634
1635static inline void ptlock_free(struct page *page)
1636{
1637}
1638
1639static inline spinlock_t *ptlock_ptr(struct page *page)
1640{
1641 return &page->ptl;
1642}
1643#endif
1644
1645static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1646{
1647 return ptlock_ptr(pmd_page(*pmd));
1648}
1649
1650static inline bool ptlock_init(struct page *page)
1651{
1652
1653
1654
1655
1656
1657
1658
1659
1660 VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
1661 if (!ptlock_alloc(page))
1662 return false;
1663 spin_lock_init(ptlock_ptr(page));
1664 return true;
1665}
1666
1667
1668static inline void pte_lock_deinit(struct page *page)
1669{
1670 page->mapping = NULL;
1671 ptlock_free(page);
1672}
1673
1674#else
1675
1676
1677
1678static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1679{
1680 return &mm->page_table_lock;
1681}
1682static inline void ptlock_cache_init(void) {}
1683static inline bool ptlock_init(struct page *page) { return true; }
1684static inline void pte_lock_deinit(struct page *page) {}
1685#endif
1686
1687static inline void pgtable_init(void)
1688{
1689 ptlock_cache_init();
1690 pgtable_cache_init();
1691}
1692
1693static inline bool pgtable_page_ctor(struct page *page)
1694{
1695 inc_zone_page_state(page, NR_PAGETABLE);
1696 return ptlock_init(page);
1697}
1698
1699static inline void pgtable_page_dtor(struct page *page)
1700{
1701 pte_lock_deinit(page);
1702 dec_zone_page_state(page, NR_PAGETABLE);
1703}
1704
1705#define pte_offset_map_lock(mm, pmd, address, ptlp) \
1706({ \
1707 spinlock_t *__ptl = pte_lockptr(mm, pmd); \
1708 pte_t *__pte = pte_offset_map(pmd, address); \
1709 *(ptlp) = __ptl; \
1710 spin_lock(__ptl); \
1711 __pte; \
1712})
1713
1714#define pte_unmap_unlock(pte, ptl) do { \
1715 spin_unlock(ptl); \
1716 pte_unmap(pte); \
1717} while (0)
1718
1719#define pte_alloc_map(mm, vma, pmd, address) \
1720 ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, vma, \
1721 pmd, address))? \
1722 NULL: pte_offset_map(pmd, address))
1723
1724#define pte_alloc_map_lock(mm, pmd, address, ptlp) \
1725 ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, NULL, \
1726 pmd, address))? \
1727 NULL: pte_offset_map_lock(mm, pmd, address, ptlp))
1728
1729#define pte_alloc_kernel(pmd, address) \
1730 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
1731 NULL: pte_offset_kernel(pmd, address))
1732
1733#if USE_SPLIT_PMD_PTLOCKS
1734
1735static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
1736{
1737 return ptlock_ptr(virt_to_page(pmd));
1738}
1739
1740static inline bool pgtable_pmd_page_ctor(struct page *page)
1741{
1742#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1743 page->pmd_huge_pte = NULL;
1744#endif
1745 return ptlock_init(page);
1746}
1747
1748static inline void pgtable_pmd_page_dtor(struct page *page)
1749{
1750#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1751 VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
1752#endif
1753 ptlock_free(page);
1754}
1755
1756#define pmd_huge_pte(mm, pmd) (virt_to_page(pmd)->pmd_huge_pte)
1757
1758#else
1759
1760static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
1761{
1762 return &mm->page_table_lock;
1763}
1764
1765static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; }
1766static inline void pgtable_pmd_page_dtor(struct page *page) {}
1767
1768#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
1769
1770#endif
1771
1772static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
1773{
1774 spinlock_t *ptl = pmd_lockptr(mm, pmd);
1775 spin_lock(ptl);
1776 return ptl;
1777}
1778
1779
1780
1781
1782
1783
1784
1785static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud)
1786{
1787 return &mm->page_table_lock;
1788}
1789
1790static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
1791{
1792 spinlock_t *ptl = pud_lockptr(mm, pud);
1793
1794 spin_lock(ptl);
1795 return ptl;
1796}
1797
1798extern void free_area_init(unsigned long * zones_size);
1799extern void free_area_init_node(int nid, unsigned long * zones_size,
1800 unsigned long zone_start_pfn, unsigned long *zholes_size);
1801extern void free_initmem(void);
1802
1803
1804
1805
1806
1807
1808
1809extern unsigned long free_reserved_area(unsigned long start, unsigned long end,
1810 int poison, char *s);
1811
1812#ifdef CONFIG_HIGHMEM
1813
1814
1815
1816
1817extern void free_highmem_page(struct page *page);
1818#endif
1819
1820extern void adjust_managed_page_count(struct page *page, long count);
1821extern void mem_init_print_info(const char *str);
1822
1823extern void reserve_bootmem_region(unsigned long start, unsigned long end);
1824
1825
1826static inline void __free_reserved_page(struct page *page)
1827{
1828 ClearPageReserved(page);
1829 init_page_count(page);
1830 __free_page(page);
1831}
1832
1833static inline void free_reserved_page(struct page *page)
1834{
1835 __free_reserved_page(page);
1836 adjust_managed_page_count(page, 1);
1837}
1838
1839static inline void mark_page_reserved(struct page *page)
1840{
1841 SetPageReserved(page);
1842 adjust_managed_page_count(page, -1);
1843}
1844
1845
1846
1847
1848
1849
1850static inline unsigned long free_initmem_default(int poison)
1851{
1852 extern char __init_begin[], __init_end[];
1853
1854 return free_reserved_area(PAGE_ALIGN((unsigned long)&__init_begin) ,
1855 ((unsigned long)&__init_end) & PAGE_MASK,
1856 poison, "unused kernel");
1857}
1858
1859static inline unsigned long get_num_physpages(void)
1860{
1861 int nid;
1862 unsigned long phys_pages = 0;
1863
1864 for_each_online_node(nid)
1865 phys_pages += node_present_pages(nid);
1866
1867 return phys_pages;
1868}
1869
1870#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897extern void free_area_init_nodes(unsigned long *max_zone_pfn);
1898unsigned long node_map_pfn_alignment(void);
1899unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
1900 unsigned long end_pfn);
1901extern unsigned long absent_pages_in_range(unsigned long start_pfn,
1902 unsigned long end_pfn);
1903extern void get_pfn_range_for_nid(unsigned int nid,
1904 unsigned long *start_pfn, unsigned long *end_pfn);
1905extern unsigned long find_min_pfn_with_active_regions(void);
1906extern void free_bootmem_with_active_regions(int nid,
1907 unsigned long max_low_pfn);
1908extern void sparse_memory_present_with_active_regions(int nid);
1909
1910#endif
1911
1912#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
1913 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
1914static inline int __early_pfn_to_nid(unsigned long pfn,
1915 struct mminit_pfnnid_cache *state)
1916{
1917 return 0;
1918}
1919#else
1920
1921extern int __meminit early_pfn_to_nid(unsigned long pfn);
1922
1923extern int __meminit __early_pfn_to_nid(unsigned long pfn,
1924 struct mminit_pfnnid_cache *state);
1925#endif
1926
1927extern void set_dma_reserve(unsigned long new_dma_reserve);
1928extern void memmap_init_zone(unsigned long, int, unsigned long,
1929 unsigned long, enum memmap_context);
1930extern void setup_per_zone_wmarks(void);
1931extern int __meminit init_per_zone_wmark_min(void);
1932extern void mem_init(void);
1933extern void __init mmap_init(void);
1934extern void show_mem(unsigned int flags);
1935extern long si_mem_available(void);
1936extern void si_meminfo(struct sysinfo * val);
1937extern void si_meminfo_node(struct sysinfo *val, int nid);
1938
1939extern __printf(3, 4)
1940void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...);
1941
1942extern void setup_per_cpu_pageset(void);
1943
1944extern void zone_pcp_update(struct zone *zone);
1945extern void zone_pcp_reset(struct zone *zone);
1946
1947
1948extern int min_free_kbytes;
1949
1950
1951extern atomic_long_t mmap_pages_allocated;
1952extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
1953
1954
1955void vma_interval_tree_insert(struct vm_area_struct *node,
1956 struct rb_root *root);
1957void vma_interval_tree_insert_after(struct vm_area_struct *node,
1958 struct vm_area_struct *prev,
1959 struct rb_root *root);
1960void vma_interval_tree_remove(struct vm_area_struct *node,
1961 struct rb_root *root);
1962struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root *root,
1963 unsigned long start, unsigned long last);
1964struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
1965 unsigned long start, unsigned long last);
1966
1967#define vma_interval_tree_foreach(vma, root, start, last) \
1968 for (vma = vma_interval_tree_iter_first(root, start, last); \
1969 vma; vma = vma_interval_tree_iter_next(vma, start, last))
1970
1971static inline void vma_nonlinear_insert(struct vm_area_struct *vma,
1972 struct list_head *list)
1973{
1974 list_add_tail(&vma->shared.nonlinear, list);
1975}
1976
1977void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
1978 struct rb_root *root);
1979void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
1980 struct rb_root *root);
1981struct anon_vma_chain *anon_vma_interval_tree_iter_first(
1982 struct rb_root *root, unsigned long start, unsigned long last);
1983struct anon_vma_chain *anon_vma_interval_tree_iter_next(
1984 struct anon_vma_chain *node, unsigned long start, unsigned long last);
1985#ifdef CONFIG_DEBUG_VM_RB
1986void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
1987#endif
1988
1989#define anon_vma_interval_tree_foreach(avc, root, start, last) \
1990 for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
1991 avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
1992
1993
1994extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
1995extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
1996 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
1997 struct vm_area_struct *expand);
1998static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
1999 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
2000{
2001 return __vma_adjust(vma, start, end, pgoff, insert, NULL);
2002}
2003extern struct vm_area_struct *vma_merge(struct mm_struct *,
2004 struct vm_area_struct *prev, unsigned long addr, unsigned long end,
2005 unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
2006 struct mempolicy *, struct vm_userfaultfd_ctx);
2007extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
2008extern int split_vma(struct mm_struct *,
2009 struct vm_area_struct *, unsigned long addr, int new_below);
2010extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
2011extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
2012 struct rb_node **, struct rb_node *);
2013extern void unlink_file_vma(struct vm_area_struct *);
2014extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
2015 unsigned long addr, unsigned long len, pgoff_t pgoff,
2016 bool *need_rmap_locks);
2017extern void exit_mmap(struct mm_struct *);
2018
2019static inline int check_data_rlimit(unsigned long rlim,
2020 unsigned long new,
2021 unsigned long start,
2022 unsigned long end_data,
2023 unsigned long start_data)
2024{
2025 if (rlim < RLIM_INFINITY) {
2026 if (((new - start) + (end_data - start_data)) > rlim)
2027 return -ENOSPC;
2028 }
2029
2030 return 0;
2031}
2032
2033extern int mm_take_all_locks(struct mm_struct *mm);
2034extern void mm_drop_all_locks(struct mm_struct *mm);
2035
2036extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
2037extern struct file *get_mm_exe_file(struct mm_struct *mm);
2038extern struct file *get_task_exe_file(struct task_struct *task);
2039
2040extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
2041extern int install_special_mapping(struct mm_struct *mm,
2042 unsigned long addr, unsigned long len,
2043 unsigned long flags, struct page **pages);
2044
2045extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
2046
2047extern unsigned long mmap_region(struct file *file, unsigned long addr,
2048 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
2049 struct list_head *uf);
2050extern unsigned long do_mmap(struct file *file, unsigned long addr,
2051 unsigned long len, unsigned long prot, unsigned long flags,
2052 vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
2053 struct list_head *uf);
2054extern int do_munmap(struct mm_struct *, unsigned long, size_t,
2055 struct list_head *uf);
2056
2057static inline unsigned long
2058do_mmap_pgoff(struct file *file, unsigned long addr,
2059 unsigned long len, unsigned long prot, unsigned long flags,
2060 unsigned long pgoff, unsigned long *populate, struct list_head *uf)
2061{
2062 return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate, NULL);
2063}
2064
2065#ifdef CONFIG_MMU
2066extern int __mm_populate(unsigned long addr, unsigned long len,
2067 int ignore_errors);
2068static inline void mm_populate(unsigned long addr, unsigned long len)
2069{
2070
2071 (void) __mm_populate(addr, len, 1);
2072}
2073#else
2074static inline void mm_populate(unsigned long addr, unsigned long len) {}
2075#endif
2076
2077
2078extern unsigned long vm_brk(unsigned long, unsigned long);
2079extern unsigned long vm_brk_flags(unsigned long, unsigned long, unsigned long);
2080extern int vm_munmap(unsigned long, size_t);
2081extern unsigned long vm_mmap(struct file *, unsigned long,
2082 unsigned long, unsigned long,
2083 unsigned long, unsigned long);
2084
2085struct vm_unmapped_area_info {
2086#define VM_UNMAPPED_AREA_TOPDOWN 1
2087 unsigned long flags;
2088 unsigned long length;
2089 unsigned long low_limit;
2090 unsigned long high_limit;
2091 unsigned long align_mask;
2092 unsigned long align_offset;
2093};
2094
2095extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
2096extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107static inline unsigned long
2108vm_unmapped_area(struct vm_unmapped_area_info *info)
2109{
2110 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
2111 return unmapped_area(info);
2112 else
2113 return unmapped_area_topdown(info);
2114}
2115
2116
2117extern void truncate_inode_pages(struct address_space *, loff_t);
2118extern void truncate_inode_pages_range(struct address_space *,
2119 loff_t lstart, loff_t lend);
2120extern void truncate_inode_pages_final(struct address_space *);
2121
2122
2123extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
2124extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
2125
2126
2127int write_one_page(struct page *page, int wait);
2128void task_dirty_inc(struct task_struct *tsk);
2129
2130
2131#define VM_MAX_READAHEAD 128
2132#define VM_MIN_READAHEAD 16
2133
2134int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
2135 pgoff_t offset, unsigned long nr_to_read);
2136
2137void page_cache_sync_readahead(struct address_space *mapping,
2138 struct file_ra_state *ra,
2139 struct file *filp,
2140 pgoff_t offset,
2141 unsigned long size);
2142
2143void page_cache_async_readahead(struct address_space *mapping,
2144 struct file_ra_state *ra,
2145 struct file *filp,
2146 struct page *pg,
2147 pgoff_t offset,
2148 unsigned long size);
2149
2150unsigned long ra_submit(struct file_ra_state *ra,
2151 struct address_space *mapping,
2152 struct file *filp);
2153
2154extern unsigned long stack_guard_gap;
2155
2156
2157extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
2158
2159
2160extern int expand_downwards(struct vm_area_struct *vma,
2161 unsigned long address);
2162#if VM_GROWSUP
2163extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
2164#else
2165 #define expand_upwards(vma, address) do { } while (0)
2166#endif
2167
2168
2169extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
2170extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
2171 struct vm_area_struct **pprev);
2172
2173
2174
2175static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
2176{
2177 struct vm_area_struct * vma = find_vma(mm,start_addr);
2178
2179 if (vma && end_addr <= vma->vm_start)
2180 vma = NULL;
2181 return vma;
2182}
2183
2184static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
2185{
2186 unsigned long vm_start = vma->vm_start;
2187
2188 if (vma->vm_flags & VM_GROWSDOWN) {
2189 vm_start -= stack_guard_gap;
2190 if (vm_start > vma->vm_start)
2191 vm_start = 0;
2192 }
2193 return vm_start;
2194}
2195
2196static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
2197{
2198 unsigned long vm_end = vma->vm_end;
2199
2200 if (vma->vm_flags & VM_GROWSUP) {
2201 vm_end += stack_guard_gap;
2202 if (vm_end < vma->vm_end)
2203 vm_end = -PAGE_SIZE;
2204 }
2205 return vm_end;
2206}
2207
2208static inline unsigned long vma_pages(struct vm_area_struct *vma)
2209{
2210 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
2211}
2212
2213
2214static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
2215 unsigned long vm_start, unsigned long vm_end)
2216{
2217 struct vm_area_struct *vma = find_vma(mm, vm_start);
2218
2219 if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
2220 vma = NULL;
2221
2222 return vma;
2223}
2224
2225#ifdef CONFIG_MMU
2226pgprot_t vm_get_page_prot(unsigned long vm_flags);
2227void vma_set_page_prot(struct vm_area_struct *vma);
2228#else
2229static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
2230{
2231 return __pgprot(0);
2232}
2233static inline void vma_set_page_prot(struct vm_area_struct *vma)
2234{
2235 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2236}
2237#endif
2238
2239#ifdef CONFIG_NUMA_BALANCING
2240unsigned long change_prot_numa(struct vm_area_struct *vma,
2241 unsigned long start, unsigned long end);
2242#endif
2243
2244struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
2245int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
2246 unsigned long pfn, unsigned long size, pgprot_t);
2247int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
2248int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2249 unsigned long pfn);
2250int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2251 pfn_t pfn);
2252int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
2253
2254
2255struct page *follow_page_mask(struct vm_area_struct *vma,
2256 unsigned long address, unsigned int foll_flags,
2257 unsigned int *page_mask);
2258
2259static inline struct page *follow_page(struct vm_area_struct *vma,
2260 unsigned long address, unsigned int foll_flags)
2261{
2262 unsigned int unused_page_mask;
2263 return follow_page_mask(vma, address, foll_flags, &unused_page_mask);
2264}
2265
2266#define FOLL_WRITE 0x01
2267#define FOLL_TOUCH 0x02
2268#define FOLL_GET 0x04
2269#define FOLL_DUMP 0x08
2270#define FOLL_FORCE 0x10
2271#define FOLL_NOWAIT 0x20
2272
2273#define FOLL_MLOCK 0x40
2274#define FOLL_SPLIT 0x80
2275#define FOLL_HWPOISON 0x100
2276#define FOLL_NUMA 0x200
2277#define FOLL_MIGRATION 0x400
2278#define FOLL_TRIED 0x800
2279#define FOLL_REMOTE 0x2000
2280#define FOLL_COW 0x4000
2281
2282typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
2283 void *data);
2284extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
2285 unsigned long size, pte_fn_t fn, void *data);
2286
2287#ifdef CONFIG_PROC_FS
2288void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
2289#else
2290static inline void vm_stat_account(struct mm_struct *mm,
2291 unsigned long flags, struct file *file, long pages)
2292{
2293 mm->total_vm += pages;
2294}
2295#endif
2296
2297#ifdef CONFIG_DEBUG_PAGEALLOC
2298extern bool _debug_pagealloc_enabled;
2299extern void __kernel_map_pages(struct page *page, int numpages, int enable);
2300
2301static inline bool debug_pagealloc_enabled(void)
2302{
2303 return _debug_pagealloc_enabled;
2304}
2305
2306static inline void
2307kernel_map_pages(struct page *page, int numpages, int enable)
2308{
2309 if (!debug_pagealloc_enabled())
2310 return;
2311
2312 __kernel_map_pages(page, numpages, enable);
2313}
2314#ifdef CONFIG_HIBERNATION
2315extern bool kernel_page_present(struct page *page);
2316#endif
2317#else
2318static inline void
2319kernel_map_pages(struct page *page, int numpages, int enable) {}
2320#ifdef CONFIG_HIBERNATION
2321static inline bool kernel_page_present(struct page *page) { return true; }
2322#endif
2323static inline bool debug_pagealloc_enabled(void)
2324{
2325 return false;
2326}
2327#endif
2328
2329extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
2330#ifdef __HAVE_ARCH_GATE_AREA
2331int in_gate_area_no_mm(unsigned long addr);
2332int in_gate_area(struct mm_struct *mm, unsigned long addr);
2333#else
2334int in_gate_area_no_mm(unsigned long addr);
2335#define in_gate_area(mm, addr) ({(void)mm; in_gate_area_no_mm(addr);})
2336#endif
2337
2338#ifdef CONFIG_SYSCTL
2339extern int sysctl_drop_caches;
2340int drop_caches_sysctl_handler(struct ctl_table *, int,
2341 void __user *, size_t *, loff_t *);
2342#endif
2343
2344unsigned long shrink_slab(struct shrink_control *shrink,
2345 unsigned long nr_pages_scanned,
2346 unsigned long lru_pages);
2347
2348#ifndef CONFIG_MMU
2349#define randomize_va_space 0
2350#else
2351extern int randomize_va_space;
2352#endif
2353
2354const char * arch_vma_name(struct vm_area_struct *vma);
2355void print_vma_addr(char *prefix, unsigned long rip);
2356
2357void sparse_mem_maps_populate_node(struct page **map_map,
2358 unsigned long pnum_begin,
2359 unsigned long pnum_end,
2360 unsigned long map_count,
2361 int nodeid);
2362
2363struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
2364pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
2365pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
2366pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
2367pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
2368void *vmemmap_alloc_block(unsigned long size, int node);
2369struct vmem_altmap;
2370void *__vmemmap_alloc_block_buf(unsigned long size, int node,
2371 struct vmem_altmap *altmap);
2372static inline void *vmemmap_alloc_block_buf(unsigned long size, int node)
2373{
2374 return __vmemmap_alloc_block_buf(size, node, NULL);
2375}
2376
2377void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
2378int vmemmap_populate_basepages(unsigned long start, unsigned long end,
2379 int node);
2380int vmemmap_populate(unsigned long start, unsigned long end, int node);
2381void vmemmap_populate_print_last(void);
2382#ifdef CONFIG_MEMORY_HOTPLUG
2383void vmemmap_free(unsigned long start, unsigned long end);
2384#endif
2385void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
2386 unsigned long size);
2387
2388enum mf_flags {
2389 MF_COUNT_INCREASED = 1 << 0,
2390 MF_ACTION_REQUIRED = 1 << 1,
2391 MF_MUST_KILL = 1 << 2,
2392 MF_SOFT_OFFLINE = 1 << 3,
2393};
2394extern int memory_failure(unsigned long pfn, int trapno, int flags);
2395extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
2396extern int unpoison_memory(unsigned long pfn);
2397extern int get_hwpoison_page(struct page *page);
2398extern int sysctl_memory_failure_early_kill;
2399extern int sysctl_memory_failure_recovery;
2400extern void shake_page(struct page *p, int access);
2401extern atomic_long_t num_poisoned_pages;
2402extern int soft_offline_page(struct page *page, int flags);
2403
2404#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
2405extern void clear_huge_page(struct page *page,
2406 unsigned long addr,
2407 unsigned int pages_per_huge_page);
2408extern void copy_user_huge_page(struct page *dst, struct page *src,
2409 unsigned long addr, struct vm_area_struct *vma,
2410 unsigned int pages_per_huge_page);
2411extern long copy_huge_page_from_user(struct page *dst_page,
2412 const void __user *usr_src,
2413 unsigned int pages_per_huge_page,
2414 bool allow_pagefault);
2415#endif
2416
2417extern struct page_ext_operations debug_guardpage_ops;
2418extern struct page_ext_operations page_poisoning_ops;
2419
2420#ifdef CONFIG_DEBUG_PAGEALLOC
2421extern unsigned int _debug_guardpage_minorder;
2422extern bool _debug_guardpage_enabled;
2423
2424static inline unsigned int debug_guardpage_minorder(void)
2425{
2426 return _debug_guardpage_minorder;
2427}
2428
2429static inline bool debug_guardpage_enabled(void)
2430{
2431 return _debug_guardpage_enabled;
2432}
2433
2434static inline bool page_is_guard(struct page *page)
2435{
2436 struct page_ext *page_ext;
2437
2438 if (!debug_guardpage_enabled())
2439 return false;
2440
2441 page_ext = lookup_page_ext(page);
2442 return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
2443}
2444#else
2445static inline unsigned int debug_guardpage_minorder(void) { return 0; }
2446static inline bool debug_guardpage_enabled(void) { return false; }
2447static inline bool page_is_guard(struct page *page) { return false; }
2448#endif
2449
2450#if MAX_NUMNODES > 1
2451void __init setup_nr_node_ids(void);
2452#else
2453static inline void setup_nr_node_ids(void) {}
2454#endif
2455
2456#endif
2457#endif
2458