1#ifndef _LINUX_MM_H
2#define _LINUX_MM_H
3
4#include <linux/errno.h>
5
6#ifdef __KERNEL__
7
8#include <linux/gfp.h>
9#include <linux/bug.h>
10#include <linux/list.h>
11#include <linux/mmzone.h>
12#include <linux/rbtree.h>
13#include <linux/prio_tree.h>
14#include <linux/atomic.h>
15#include <linux/debug_locks.h>
16#include <linux/mm_types.h>
17#include <linux/range.h>
18#include <linux/pfn.h>
19#include <linux/bit_spinlock.h>
20#include <linux/shrinker.h>
21
22struct mempolicy;
23struct anon_vma;
24struct file_ra_state;
25struct user_struct;
26struct writeback_control;
27
28#ifndef CONFIG_DISCONTIGMEM
29extern unsigned long max_mapnr;
30#endif
31
32extern unsigned long num_physpages;
33extern unsigned long totalram_pages;
34extern void * high_memory;
35extern int page_cluster;
36
37#ifdef CONFIG_SYSCTL
38extern int sysctl_legacy_va_layout;
39#else
40#define sysctl_legacy_va_layout 0
41#endif
42
43#include <asm/page.h>
44#include <asm/pgtable.h>
45#include <asm/processor.h>
46
47#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
48
49
50#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
51
52
53
54
55
56
57
58
59
60
61extern struct kmem_cache *vm_area_cachep;
62
63#ifndef CONFIG_MMU
64extern struct rb_root nommu_region_tree;
65extern struct rw_semaphore nommu_region_sem;
66
67extern unsigned int kobjsize(const void *objp);
68#endif
69
70
71
72
73#define VM_READ 0x00000001
74#define VM_WRITE 0x00000002
75#define VM_EXEC 0x00000004
76#define VM_SHARED 0x00000008
77
78
79#define VM_MAYREAD 0x00000010
80#define VM_MAYWRITE 0x00000020
81#define VM_MAYEXEC 0x00000040
82#define VM_MAYSHARE 0x00000080
83
84#define VM_GROWSDOWN 0x00000100
85#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
86#define VM_GROWSUP 0x00000200
87#else
88#define VM_GROWSUP 0x00000000
89#define VM_NOHUGEPAGE 0x00000200
90#endif
91#define VM_PFNMAP 0x00000400
92#define VM_DENYWRITE 0x00000800
93
94#define VM_EXECUTABLE 0x00001000
95#define VM_LOCKED 0x00002000
96#define VM_IO 0x00004000
97
98
99#define VM_SEQ_READ 0x00008000
100#define VM_RAND_READ 0x00010000
101
102#define VM_DONTCOPY 0x00020000
103#define VM_DONTEXPAND 0x00040000
104#define VM_RESERVED 0x00080000
105#define VM_ACCOUNT 0x00100000
106#define VM_NORESERVE 0x00200000
107#define VM_HUGETLB 0x00400000
108#define VM_NONLINEAR 0x00800000
109#ifndef CONFIG_TRANSPARENT_HUGEPAGE
110#define VM_MAPPED_COPY 0x01000000
111#else
112#define VM_HUGEPAGE 0x01000000
113#endif
114#define VM_INSERTPAGE 0x02000000
115#define VM_NODUMP 0x04000000
116
117#define VM_CAN_NONLINEAR 0x08000000
118#define VM_MIXEDMAP 0x10000000
119#define VM_SAO 0x20000000
120#define VM_PFN_AT_MMAP 0x40000000
121#define VM_MERGEABLE 0x80000000
122
123
124#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ)
125
126#ifndef VM_STACK_DEFAULT_FLAGS
127#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
128#endif
129
130#ifdef CONFIG_STACK_GROWSUP
131#define VM_STACK_FLAGS (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
132#else
133#define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
134#endif
135
136#define VM_READHINTMASK (VM_SEQ_READ | VM_RAND_READ)
137#define VM_ClearReadHint(v) (v)->vm_flags &= ~VM_READHINTMASK
138#define VM_NormalReadHint(v) (!((v)->vm_flags & VM_READHINTMASK))
139#define VM_SequentialReadHint(v) ((v)->vm_flags & VM_SEQ_READ)
140#define VM_RandomReadHint(v) ((v)->vm_flags & VM_RAND_READ)
141
142
143
144
145
146#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP)
147
148
149
150
151
152extern pgprot_t protection_map[16];
153
154#define FAULT_FLAG_WRITE 0x01
155#define FAULT_FLAG_NONLINEAR 0x02
156#define FAULT_FLAG_MKWRITE 0x04
157#define FAULT_FLAG_ALLOW_RETRY 0x08
158#define FAULT_FLAG_RETRY_NOWAIT 0x10
159#define FAULT_FLAG_KILLABLE 0x20
160
161
162
163
164
165
166
167
168
169static inline int is_linear_pfn_mapping(struct vm_area_struct *vma)
170{
171 return !!(vma->vm_flags & VM_PFN_AT_MMAP);
172}
173
174static inline int is_pfn_mapping(struct vm_area_struct *vma)
175{
176 return !!(vma->vm_flags & VM_PFNMAP);
177}
178
179
180
181
182
183
184
185
186
187
188struct vm_fault {
189 unsigned int flags;
190 pgoff_t pgoff;
191 void __user *virtual_address;
192
193 struct page *page;
194
195
196
197
198};
199
200
201
202
203
204
205struct vm_operations_struct {
206 void (*open)(struct vm_area_struct * area);
207 void (*close)(struct vm_area_struct * area);
208 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
209
210
211
212 int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
213
214
215
216
217 int (*access)(struct vm_area_struct *vma, unsigned long addr,
218 void *buf, int len, int write);
219#ifdef CONFIG_NUMA
220
221
222
223
224
225
226
227 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
228
229
230
231
232
233
234
235
236
237
238
239 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
240 unsigned long addr);
241 int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
242 const nodemask_t *to, unsigned long flags);
243#endif
244};
245
246struct mmu_gather;
247struct inode;
248
249#define page_private(page) ((page)->private)
250#define set_page_private(page, v) ((page)->private = (v))
251
252
253
254
255
256#include <linux/page-flags.h>
257#include <linux/huge_mm.h>
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275static inline int put_page_testzero(struct page *page)
276{
277 VM_BUG_ON(atomic_read(&page->_count) == 0);
278 return atomic_dec_and_test(&page->_count);
279}
280
281
282
283
284
285static inline int get_page_unless_zero(struct page *page)
286{
287 return atomic_inc_not_zero(&page->_count);
288}
289
290extern int page_is_ram(unsigned long pfn);
291
292
293struct page *vmalloc_to_page(const void *addr);
294unsigned long vmalloc_to_pfn(const void *addr);
295
296
297
298
299
300
301
302static inline int is_vmalloc_addr(const void *x)
303{
304#ifdef CONFIG_MMU
305 unsigned long addr = (unsigned long)x;
306
307 return addr >= VMALLOC_START && addr < VMALLOC_END;
308#else
309 return 0;
310#endif
311}
312#ifdef CONFIG_MMU
313extern int is_vmalloc_or_module_addr(const void *x);
314#else
315static inline int is_vmalloc_or_module_addr(const void *x)
316{
317 return 0;
318}
319#endif
320
321static inline void compound_lock(struct page *page)
322{
323#ifdef CONFIG_TRANSPARENT_HUGEPAGE
324 bit_spin_lock(PG_compound_lock, &page->flags);
325#endif
326}
327
328static inline void compound_unlock(struct page *page)
329{
330#ifdef CONFIG_TRANSPARENT_HUGEPAGE
331 bit_spin_unlock(PG_compound_lock, &page->flags);
332#endif
333}
334
335static inline unsigned long compound_lock_irqsave(struct page *page)
336{
337 unsigned long uninitialized_var(flags);
338#ifdef CONFIG_TRANSPARENT_HUGEPAGE
339 local_irq_save(flags);
340 compound_lock(page);
341#endif
342 return flags;
343}
344
345static inline void compound_unlock_irqrestore(struct page *page,
346 unsigned long flags)
347{
348#ifdef CONFIG_TRANSPARENT_HUGEPAGE
349 compound_unlock(page);
350 local_irq_restore(flags);
351#endif
352}
353
354static inline struct page *compound_head(struct page *page)
355{
356 if (unlikely(PageTail(page)))
357 return page->first_page;
358 return page;
359}
360
361
362
363
364
365
366static inline void reset_page_mapcount(struct page *page)
367{
368 atomic_set(&(page)->_mapcount, -1);
369}
370
371static inline int page_mapcount(struct page *page)
372{
373 return atomic_read(&(page)->_mapcount) + 1;
374}
375
376static inline int page_count(struct page *page)
377{
378 return atomic_read(&compound_head(page)->_count);
379}
380
381static inline void get_huge_page_tail(struct page *page)
382{
383
384
385
386
387 VM_BUG_ON(page_mapcount(page) < 0);
388 VM_BUG_ON(atomic_read(&page->_count) != 0);
389 atomic_inc(&page->_mapcount);
390}
391
392extern bool __get_page_tail(struct page *page);
393
394static inline void get_page(struct page *page)
395{
396 if (unlikely(PageTail(page)))
397 if (likely(__get_page_tail(page)))
398 return;
399
400
401
402
403 VM_BUG_ON(atomic_read(&page->_count) <= 0);
404 atomic_inc(&page->_count);
405}
406
407static inline struct page *virt_to_head_page(const void *x)
408{
409 struct page *page = virt_to_page(x);
410 return compound_head(page);
411}
412
413
414
415
416
417static inline void init_page_count(struct page *page)
418{
419 atomic_set(&page->_count, 1);
420}
421
422
423
424
425
426
427
428
429
430
431#define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
432
433static inline int PageBuddy(struct page *page)
434{
435 return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE;
436}
437
438static inline void __SetPageBuddy(struct page *page)
439{
440 VM_BUG_ON(atomic_read(&page->_mapcount) != -1);
441 atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
442}
443
444static inline void __ClearPageBuddy(struct page *page)
445{
446 VM_BUG_ON(!PageBuddy(page));
447 atomic_set(&page->_mapcount, -1);
448}
449
450void put_page(struct page *page);
451void put_pages_list(struct list_head *pages);
452
453void split_page(struct page *page, unsigned int order);
454int split_free_page(struct page *page);
455
456
457
458
459
460
461typedef void compound_page_dtor(struct page *);
462
463static inline void set_compound_page_dtor(struct page *page,
464 compound_page_dtor *dtor)
465{
466 page[1].lru.next = (void *)dtor;
467}
468
469static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
470{
471 return (compound_page_dtor *)page[1].lru.next;
472}
473
474static inline int compound_order(struct page *page)
475{
476 if (!PageHead(page))
477 return 0;
478 return (unsigned long)page[1].lru.prev;
479}
480
481static inline int compound_trans_order(struct page *page)
482{
483 int order;
484 unsigned long flags;
485
486 if (!PageHead(page))
487 return 0;
488
489 flags = compound_lock_irqsave(page);
490 order = compound_order(page);
491 compound_unlock_irqrestore(page, flags);
492 return order;
493}
494
495static inline void set_compound_order(struct page *page, unsigned long order)
496{
497 page[1].lru.prev = (void *)order;
498}
499
500#ifdef CONFIG_MMU
501
502
503
504
505
506
507static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
508{
509 if (likely(vma->vm_flags & VM_WRITE))
510 pte = pte_mkwrite(pte);
511 return pte;
512}
513#endif
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
596#define SECTIONS_WIDTH SECTIONS_SHIFT
597#else
598#define SECTIONS_WIDTH 0
599#endif
600
601#define ZONES_WIDTH ZONES_SHIFT
602
603#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
604#define NODES_WIDTH NODES_SHIFT
605#else
606#ifdef CONFIG_SPARSEMEM_VMEMMAP
607#error "Vmemmap: No space for nodes field in page flags"
608#endif
609#define NODES_WIDTH 0
610#endif
611
612
613#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
614#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
615#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
616
617
618
619
620
621#if !(NODES_WIDTH > 0 || NODES_SHIFT == 0)
622#define NODE_NOT_IN_PAGE_FLAGS
623#endif
624
625
626
627
628
629
630#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
631#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
632#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
633
634
635#ifdef NODE_NOT_IN_PAGE_FLAGS
636#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
637#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \
638 SECTIONS_PGOFF : ZONES_PGOFF)
639#else
640#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
641#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \
642 NODES_PGOFF : ZONES_PGOFF)
643#endif
644
645#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
646
647#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
648#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
649#endif
650
651#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
652#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
653#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
654#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
655
656static inline enum zone_type page_zonenum(const struct page *page)
657{
658 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
659}
660
661
662
663
664
665
666
667
668
669static inline int page_zone_id(struct page *page)
670{
671 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
672}
673
674static inline int zone_to_nid(struct zone *zone)
675{
676#ifdef CONFIG_NUMA
677 return zone->node;
678#else
679 return 0;
680#endif
681}
682
683#ifdef NODE_NOT_IN_PAGE_FLAGS
684extern int page_to_nid(const struct page *page);
685#else
686static inline int page_to_nid(const struct page *page)
687{
688 return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
689}
690#endif
691
692static inline struct zone *page_zone(const struct page *page)
693{
694 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
695}
696
697#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
698static inline void set_page_section(struct page *page, unsigned long section)
699{
700 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
701 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
702}
703
704static inline unsigned long page_to_section(const struct page *page)
705{
706 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
707}
708#endif
709
710static inline void set_page_zone(struct page *page, enum zone_type zone)
711{
712 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
713 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
714}
715
716static inline void set_page_node(struct page *page, unsigned long node)
717{
718 page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
719 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
720}
721
722static inline void set_page_links(struct page *page, enum zone_type zone,
723 unsigned long node, unsigned long pfn)
724{
725 set_page_zone(page, zone);
726 set_page_node(page, node);
727#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
728 set_page_section(page, pfn_to_section_nr(pfn));
729#endif
730}
731
732
733
734
735#include <linux/vmstat.h>
736
737static __always_inline void *lowmem_page_address(const struct page *page)
738{
739 return __va(PFN_PHYS(page_to_pfn(page)));
740}
741
742#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
743#define HASHED_PAGE_VIRTUAL
744#endif
745
746#if defined(WANT_PAGE_VIRTUAL)
747#define page_address(page) ((page)->virtual)
748#define set_page_address(page, address) \
749 do { \
750 (page)->virtual = (address); \
751 } while(0)
752#define page_address_init() do { } while(0)
753#endif
754
755#if defined(HASHED_PAGE_VIRTUAL)
756void *page_address(const struct page *page);
757void set_page_address(struct page *page, void *virtual);
758void page_address_init(void);
759#endif
760
761#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
762#define page_address(page) lowmem_page_address(page)
763#define set_page_address(page, address) do { } while(0)
764#define page_address_init() do { } while(0)
765#endif
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783#define PAGE_MAPPING_ANON 1
784#define PAGE_MAPPING_KSM 2
785#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
786
787extern struct address_space swapper_space;
788static inline struct address_space *page_mapping(struct page *page)
789{
790 struct address_space *mapping = page->mapping;
791
792 VM_BUG_ON(PageSlab(page));
793 if (unlikely(PageSwapCache(page)))
794 mapping = &swapper_space;
795 else if ((unsigned long)mapping & PAGE_MAPPING_ANON)
796 mapping = NULL;
797 return mapping;
798}
799
800
801static inline void *page_rmapping(struct page *page)
802{
803 return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
804}
805
806static inline int PageAnon(struct page *page)
807{
808 return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
809}
810
811
812
813
814
815static inline pgoff_t page_index(struct page *page)
816{
817 if (unlikely(PageSwapCache(page)))
818 return page_private(page);
819 return page->index;
820}
821
822
823
824
825static inline int page_mapped(struct page *page)
826{
827 return atomic_read(&(page)->_mapcount) >= 0;
828}
829
830
831
832
833
834
835
836#define VM_FAULT_MINOR 0
837
838#define VM_FAULT_OOM 0x0001
839#define VM_FAULT_SIGBUS 0x0002
840#define VM_FAULT_MAJOR 0x0004
841#define VM_FAULT_WRITE 0x0008
842#define VM_FAULT_HWPOISON 0x0010
843#define VM_FAULT_HWPOISON_LARGE 0x0020
844
845#define VM_FAULT_NOPAGE 0x0100
846#define VM_FAULT_LOCKED 0x0200
847#define VM_FAULT_RETRY 0x0400
848
849#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000
850
851#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \
852 VM_FAULT_HWPOISON_LARGE)
853
854
855#define VM_FAULT_SET_HINDEX(x) ((x) << 12)
856#define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf)
857
858
859
860
861extern void pagefault_out_of_memory(void);
862
863#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
864
865
866
867
868
869#define SHOW_MEM_FILTER_NODES (0x0001u)
870
871extern void show_free_areas(unsigned int flags);
872extern bool skip_free_areas_node(unsigned int flags, int nid);
873
874int shmem_lock(struct file *file, int lock, struct user_struct *user);
875struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags);
876int shmem_zero_setup(struct vm_area_struct *);
877
878extern int can_do_mlock(void);
879extern int user_shm_lock(size_t, struct user_struct *);
880extern void user_shm_unlock(size_t, struct user_struct *);
881
882
883
884
885struct zap_details {
886 struct vm_area_struct *nonlinear_vma;
887 struct address_space *check_mapping;
888 pgoff_t first_index;
889 pgoff_t last_index;
890};
891
892struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
893 pte_t pte);
894
895int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
896 unsigned long size);
897void zap_page_range(struct vm_area_struct *vma, unsigned long address,
898 unsigned long size, struct zap_details *);
899void unmap_vmas(struct mmu_gather *tlb,
900 struct vm_area_struct *start_vma, unsigned long start_addr,
901 unsigned long end_addr, unsigned long *nr_accounted,
902 struct zap_details *);
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920struct mm_walk {
921 int (*pgd_entry)(pgd_t *, unsigned long, unsigned long, struct mm_walk *);
922 int (*pud_entry)(pud_t *, unsigned long, unsigned long, struct mm_walk *);
923 int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *);
924 int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *);
925 int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *);
926 int (*hugetlb_entry)(pte_t *, unsigned long,
927 unsigned long, unsigned long, struct mm_walk *);
928 struct mm_struct *mm;
929 void *private;
930};
931
932int walk_page_range(unsigned long addr, unsigned long end,
933 struct mm_walk *walk);
934void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
935 unsigned long end, unsigned long floor, unsigned long ceiling);
936int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
937 struct vm_area_struct *vma);
938void unmap_mapping_range(struct address_space *mapping,
939 loff_t const holebegin, loff_t const holelen, int even_cows);
940int follow_pfn(struct vm_area_struct *vma, unsigned long address,
941 unsigned long *pfn);
942int follow_phys(struct vm_area_struct *vma, unsigned long address,
943 unsigned int flags, unsigned long *prot, resource_size_t *phys);
944int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
945 void *buf, int len, int write);
946
947static inline void unmap_shared_mapping_range(struct address_space *mapping,
948 loff_t const holebegin, loff_t const holelen)
949{
950 unmap_mapping_range(mapping, holebegin, holelen, 0);
951}
952
953extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new);
954extern void truncate_setsize(struct inode *inode, loff_t newsize);
955extern int vmtruncate(struct inode *inode, loff_t offset);
956extern int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end);
957void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
958int truncate_inode_page(struct address_space *mapping, struct page *page);
959int generic_error_remove_page(struct address_space *mapping, struct page *page);
960
961int invalidate_inode_page(struct page *page);
962
963#ifdef CONFIG_MMU
964extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
965 unsigned long address, unsigned int flags);
966extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
967 unsigned long address, unsigned int fault_flags);
968#else
969static inline int handle_mm_fault(struct mm_struct *mm,
970 struct vm_area_struct *vma, unsigned long address,
971 unsigned int flags)
972{
973
974 BUG();
975 return VM_FAULT_SIGBUS;
976}
977static inline int fixup_user_fault(struct task_struct *tsk,
978 struct mm_struct *mm, unsigned long address,
979 unsigned int fault_flags)
980{
981
982 BUG();
983 return -EFAULT;
984}
985#endif
986
987extern int make_pages_present(unsigned long addr, unsigned long end);
988extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
989extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
990 void *buf, int len, int write);
991
992int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
993 unsigned long start, int len, unsigned int foll_flags,
994 struct page **pages, struct vm_area_struct **vmas,
995 int *nonblocking);
996int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
997 unsigned long start, int nr_pages, int write, int force,
998 struct page **pages, struct vm_area_struct **vmas);
999int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1000 struct page **pages);
1001struct page *get_dump_page(unsigned long addr);
1002
1003extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
1004extern void do_invalidatepage(struct page *page, unsigned long offset);
1005
1006int __set_page_dirty_nobuffers(struct page *page);
1007int __set_page_dirty_no_writeback(struct page *page);
1008int redirty_page_for_writepage(struct writeback_control *wbc,
1009 struct page *page);
1010void account_page_dirtied(struct page *page, struct address_space *mapping);
1011void account_page_writeback(struct page *page);
1012int set_page_dirty(struct page *page);
1013int set_page_dirty_lock(struct page *page);
1014int clear_page_dirty_for_io(struct page *page);
1015
1016
1017static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
1018{
1019 return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
1020}
1021
1022static inline int stack_guard_page_start(struct vm_area_struct *vma,
1023 unsigned long addr)
1024{
1025 return (vma->vm_flags & VM_GROWSDOWN) &&
1026 (vma->vm_start == addr) &&
1027 !vma_growsdown(vma->vm_prev, addr);
1028}
1029
1030
1031static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
1032{
1033 return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
1034}
1035
1036static inline int stack_guard_page_end(struct vm_area_struct *vma,
1037 unsigned long addr)
1038{
1039 return (vma->vm_flags & VM_GROWSUP) &&
1040 (vma->vm_end == addr) &&
1041 !vma_growsup(vma->vm_next, addr);
1042}
1043
1044extern pid_t
1045vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
1046
1047extern unsigned long move_page_tables(struct vm_area_struct *vma,
1048 unsigned long old_addr, struct vm_area_struct *new_vma,
1049 unsigned long new_addr, unsigned long len);
1050extern unsigned long do_mremap(unsigned long addr,
1051 unsigned long old_len, unsigned long new_len,
1052 unsigned long flags, unsigned long new_addr);
1053extern int mprotect_fixup(struct vm_area_struct *vma,
1054 struct vm_area_struct **pprev, unsigned long start,
1055 unsigned long end, unsigned long newflags);
1056
1057
1058
1059
1060int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1061 struct page **pages);
1062
1063
1064
1065static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
1066{
1067 long val = atomic_long_read(&mm->rss_stat.count[member]);
1068
1069#ifdef SPLIT_RSS_COUNTING
1070
1071
1072
1073
1074 if (val < 0)
1075 val = 0;
1076#endif
1077 return (unsigned long)val;
1078}
1079
1080static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1081{
1082 atomic_long_add(value, &mm->rss_stat.count[member]);
1083}
1084
1085static inline void inc_mm_counter(struct mm_struct *mm, int member)
1086{
1087 atomic_long_inc(&mm->rss_stat.count[member]);
1088}
1089
1090static inline void dec_mm_counter(struct mm_struct *mm, int member)
1091{
1092 atomic_long_dec(&mm->rss_stat.count[member]);
1093}
1094
1095static inline unsigned long get_mm_rss(struct mm_struct *mm)
1096{
1097 return get_mm_counter(mm, MM_FILEPAGES) +
1098 get_mm_counter(mm, MM_ANONPAGES);
1099}
1100
1101static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
1102{
1103 return max(mm->hiwater_rss, get_mm_rss(mm));
1104}
1105
1106static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
1107{
1108 return max(mm->hiwater_vm, mm->total_vm);
1109}
1110
1111static inline void update_hiwater_rss(struct mm_struct *mm)
1112{
1113 unsigned long _rss = get_mm_rss(mm);
1114
1115 if ((mm)->hiwater_rss < _rss)
1116 (mm)->hiwater_rss = _rss;
1117}
1118
1119static inline void update_hiwater_vm(struct mm_struct *mm)
1120{
1121 if (mm->hiwater_vm < mm->total_vm)
1122 mm->hiwater_vm = mm->total_vm;
1123}
1124
1125static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
1126 struct mm_struct *mm)
1127{
1128 unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
1129
1130 if (*maxrss < hiwater_rss)
1131 *maxrss = hiwater_rss;
1132}
1133
1134#if defined(SPLIT_RSS_COUNTING)
1135void sync_mm_rss(struct mm_struct *mm);
1136#else
1137static inline void sync_mm_rss(struct mm_struct *mm)
1138{
1139}
1140#endif
1141
1142int vma_wants_writenotify(struct vm_area_struct *vma);
1143
1144extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1145 spinlock_t **ptl);
1146static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
1147 spinlock_t **ptl)
1148{
1149 pte_t *ptep;
1150 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
1151 return ptep;
1152}
1153
1154#ifdef __PAGETABLE_PUD_FOLDED
1155static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
1156 unsigned long address)
1157{
1158 return 0;
1159}
1160#else
1161int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
1162#endif
1163
1164#ifdef __PAGETABLE_PMD_FOLDED
1165static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
1166 unsigned long address)
1167{
1168 return 0;
1169}
1170#else
1171int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
1172#endif
1173
1174int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
1175 pmd_t *pmd, unsigned long address);
1176int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
1177
1178
1179
1180
1181
1182#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
1183static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
1184{
1185 return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))?
1186 NULL: pud_offset(pgd, address);
1187}
1188
1189static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
1190{
1191 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
1192 NULL: pmd_offset(pud, address);
1193}
1194#endif
1195
1196#if USE_SPLIT_PTLOCKS
1197
1198
1199
1200
1201
1202
1203#define __pte_lockptr(page) &((page)->ptl)
1204#define pte_lock_init(_page) do { \
1205 spin_lock_init(__pte_lockptr(_page)); \
1206} while (0)
1207#define pte_lock_deinit(page) ((page)->mapping = NULL)
1208#define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));})
1209#else
1210
1211
1212
1213#define pte_lock_init(page) do {} while (0)
1214#define pte_lock_deinit(page) do {} while (0)
1215#define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;})
1216#endif
1217
1218static inline void pgtable_page_ctor(struct page *page)
1219{
1220 pte_lock_init(page);
1221 inc_zone_page_state(page, NR_PAGETABLE);
1222}
1223
1224static inline void pgtable_page_dtor(struct page *page)
1225{
1226 pte_lock_deinit(page);
1227 dec_zone_page_state(page, NR_PAGETABLE);
1228}
1229
1230#define pte_offset_map_lock(mm, pmd, address, ptlp) \
1231({ \
1232 spinlock_t *__ptl = pte_lockptr(mm, pmd); \
1233 pte_t *__pte = pte_offset_map(pmd, address); \
1234 *(ptlp) = __ptl; \
1235 spin_lock(__ptl); \
1236 __pte; \
1237})
1238
1239#define pte_unmap_unlock(pte, ptl) do { \
1240 spin_unlock(ptl); \
1241 pte_unmap(pte); \
1242} while (0)
1243
1244#define pte_alloc_map(mm, vma, pmd, address) \
1245 ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, vma, \
1246 pmd, address))? \
1247 NULL: pte_offset_map(pmd, address))
1248
1249#define pte_alloc_map_lock(mm, pmd, address, ptlp) \
1250 ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, NULL, \
1251 pmd, address))? \
1252 NULL: pte_offset_map_lock(mm, pmd, address, ptlp))
1253
1254#define pte_alloc_kernel(pmd, address) \
1255 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
1256 NULL: pte_offset_kernel(pmd, address))
1257
1258extern void free_area_init(unsigned long * zones_size);
1259extern void free_area_init_node(int nid, unsigned long * zones_size,
1260 unsigned long zone_start_pfn, unsigned long *zholes_size);
1261extern void free_initmem(void);
1262
1263#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290extern void free_area_init_nodes(unsigned long *max_zone_pfn);
1291unsigned long node_map_pfn_alignment(void);
1292unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
1293 unsigned long end_pfn);
1294extern unsigned long absent_pages_in_range(unsigned long start_pfn,
1295 unsigned long end_pfn);
1296extern void get_pfn_range_for_nid(unsigned int nid,
1297 unsigned long *start_pfn, unsigned long *end_pfn);
1298extern unsigned long find_min_pfn_with_active_regions(void);
1299extern void free_bootmem_with_active_regions(int nid,
1300 unsigned long max_low_pfn);
1301extern void sparse_memory_present_with_active_regions(int nid);
1302
1303#endif
1304
1305#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
1306 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
1307static inline int __early_pfn_to_nid(unsigned long pfn)
1308{
1309 return 0;
1310}
1311#else
1312
1313extern int __meminit early_pfn_to_nid(unsigned long pfn);
1314#ifdef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
1315
1316extern int __meminit __early_pfn_to_nid(unsigned long pfn);
1317#endif
1318#endif
1319
1320extern void set_dma_reserve(unsigned long new_dma_reserve);
1321extern void memmap_init_zone(unsigned long, int, unsigned long,
1322 unsigned long, enum memmap_context);
1323extern void setup_per_zone_wmarks(void);
1324extern int __meminit init_per_zone_wmark_min(void);
1325extern void mem_init(void);
1326extern void __init mmap_init(void);
1327extern void show_mem(unsigned int flags);
1328extern void si_meminfo(struct sysinfo * val);
1329extern void si_meminfo_node(struct sysinfo *val, int nid);
1330extern int after_bootmem;
1331
1332extern __printf(3, 4)
1333void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...);
1334
1335extern void setup_per_cpu_pageset(void);
1336
1337extern void zone_pcp_update(struct zone *zone);
1338
1339
1340extern atomic_long_t mmap_pages_allocated;
1341extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
1342
1343
1344void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old);
1345void vma_prio_tree_insert(struct vm_area_struct *, struct prio_tree_root *);
1346void vma_prio_tree_remove(struct vm_area_struct *, struct prio_tree_root *);
1347struct vm_area_struct *vma_prio_tree_next(struct vm_area_struct *vma,
1348 struct prio_tree_iter *iter);
1349
1350#define vma_prio_tree_foreach(vma, iter, root, begin, end) \
1351 for (prio_tree_iter_init(iter, root, begin, end), vma = NULL; \
1352 (vma = vma_prio_tree_next(vma, iter)); )
1353
1354static inline void vma_nonlinear_insert(struct vm_area_struct *vma,
1355 struct list_head *list)
1356{
1357 vma->shared.vm_set.parent = NULL;
1358 list_add_tail(&vma->shared.vm_set.list, list);
1359}
1360
1361
1362extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
1363extern int vma_adjust(struct vm_area_struct *vma, unsigned long start,
1364 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert);
1365extern struct vm_area_struct *vma_merge(struct mm_struct *,
1366 struct vm_area_struct *prev, unsigned long addr, unsigned long end,
1367 unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
1368 struct mempolicy *);
1369extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
1370extern int split_vma(struct mm_struct *,
1371 struct vm_area_struct *, unsigned long addr, int new_below);
1372extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
1373extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
1374 struct rb_node **, struct rb_node *);
1375extern void unlink_file_vma(struct vm_area_struct *);
1376extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
1377 unsigned long addr, unsigned long len, pgoff_t pgoff);
1378extern void exit_mmap(struct mm_struct *);
1379
1380extern int mm_take_all_locks(struct mm_struct *mm);
1381extern void mm_drop_all_locks(struct mm_struct *mm);
1382
1383
1384extern void added_exe_file_vma(struct mm_struct *mm);
1385extern void removed_exe_file_vma(struct mm_struct *mm);
1386extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
1387extern struct file *get_mm_exe_file(struct mm_struct *mm);
1388
1389extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
1390extern int install_special_mapping(struct mm_struct *mm,
1391 unsigned long addr, unsigned long len,
1392 unsigned long flags, struct page **pages);
1393
1394extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
1395
1396extern unsigned long mmap_region(struct file *file, unsigned long addr,
1397 unsigned long len, unsigned long flags,
1398 vm_flags_t vm_flags, unsigned long pgoff);
1399extern unsigned long do_mmap(struct file *, unsigned long,
1400 unsigned long, unsigned long,
1401 unsigned long, unsigned long);
1402extern int do_munmap(struct mm_struct *, unsigned long, size_t);
1403
1404
1405extern unsigned long vm_brk(unsigned long, unsigned long);
1406extern int vm_munmap(unsigned long, size_t);
1407extern unsigned long vm_mmap(struct file *, unsigned long,
1408 unsigned long, unsigned long,
1409 unsigned long, unsigned long);
1410
1411
1412extern void truncate_inode_pages(struct address_space *, loff_t);
1413extern void truncate_inode_pages_range(struct address_space *,
1414 loff_t lstart, loff_t lend);
1415
1416
1417extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
1418
1419
1420int write_one_page(struct page *page, int wait);
1421void task_dirty_inc(struct task_struct *tsk);
1422
1423
1424#define VM_MAX_READAHEAD 128
1425#define VM_MIN_READAHEAD 16
1426
1427int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
1428 pgoff_t offset, unsigned long nr_to_read);
1429
1430void page_cache_sync_readahead(struct address_space *mapping,
1431 struct file_ra_state *ra,
1432 struct file *filp,
1433 pgoff_t offset,
1434 unsigned long size);
1435
1436void page_cache_async_readahead(struct address_space *mapping,
1437 struct file_ra_state *ra,
1438 struct file *filp,
1439 struct page *pg,
1440 pgoff_t offset,
1441 unsigned long size);
1442
1443unsigned long max_sane_readahead(unsigned long nr);
1444unsigned long ra_submit(struct file_ra_state *ra,
1445 struct address_space *mapping,
1446 struct file *filp);
1447
1448
1449extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
1450
1451
1452extern int expand_downwards(struct vm_area_struct *vma,
1453 unsigned long address);
1454#if VM_GROWSUP
1455extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
1456#else
1457 #define expand_upwards(vma, address) do { } while (0)
1458#endif
1459
1460
1461extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
1462extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
1463 struct vm_area_struct **pprev);
1464
1465
1466
1467static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
1468{
1469 struct vm_area_struct * vma = find_vma(mm,start_addr);
1470
1471 if (vma && end_addr <= vma->vm_start)
1472 vma = NULL;
1473 return vma;
1474}
1475
1476static inline unsigned long vma_pages(struct vm_area_struct *vma)
1477{
1478 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
1479}
1480
1481
1482static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
1483 unsigned long vm_start, unsigned long vm_end)
1484{
1485 struct vm_area_struct *vma = find_vma(mm, vm_start);
1486
1487 if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
1488 vma = NULL;
1489
1490 return vma;
1491}
1492
1493#ifdef CONFIG_MMU
1494pgprot_t vm_get_page_prot(unsigned long vm_flags);
1495#else
1496static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
1497{
1498 return __pgprot(0);
1499}
1500#endif
1501
1502struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
1503int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
1504 unsigned long pfn, unsigned long size, pgprot_t);
1505int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
1506int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1507 unsigned long pfn);
1508int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
1509 unsigned long pfn);
1510
1511struct page *follow_page(struct vm_area_struct *, unsigned long address,
1512 unsigned int foll_flags);
1513#define FOLL_WRITE 0x01
1514#define FOLL_TOUCH 0x02
1515#define FOLL_GET 0x04
1516#define FOLL_DUMP 0x08
1517#define FOLL_FORCE 0x10
1518#define FOLL_NOWAIT 0x20
1519
1520#define FOLL_MLOCK 0x40
1521#define FOLL_SPLIT 0x80
1522#define FOLL_HWPOISON 0x100
1523
1524typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
1525 void *data);
1526extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
1527 unsigned long size, pte_fn_t fn, void *data);
1528
1529#ifdef CONFIG_PROC_FS
1530void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
1531#else
1532static inline void vm_stat_account(struct mm_struct *mm,
1533 unsigned long flags, struct file *file, long pages)
1534{
1535}
1536#endif
1537
1538#ifdef CONFIG_DEBUG_PAGEALLOC
1539extern void kernel_map_pages(struct page *page, int numpages, int enable);
1540#ifdef CONFIG_HIBERNATION
1541extern bool kernel_page_present(struct page *page);
1542#endif
1543#else
1544static inline void
1545kernel_map_pages(struct page *page, int numpages, int enable) {}
1546#ifdef CONFIG_HIBERNATION
1547static inline bool kernel_page_present(struct page *page) { return true; }
1548#endif
1549#endif
1550
1551extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
1552#ifdef __HAVE_ARCH_GATE_AREA
1553int in_gate_area_no_mm(unsigned long addr);
1554int in_gate_area(struct mm_struct *mm, unsigned long addr);
1555#else
1556int in_gate_area_no_mm(unsigned long addr);
1557#define in_gate_area(mm, addr) ({(void)mm; in_gate_area_no_mm(addr);})
1558#endif
1559
1560int drop_caches_sysctl_handler(struct ctl_table *, int,
1561 void __user *, size_t *, loff_t *);
1562unsigned long shrink_slab(struct shrink_control *shrink,
1563 unsigned long nr_pages_scanned,
1564 unsigned long lru_pages);
1565
1566#ifndef CONFIG_MMU
1567#define randomize_va_space 0
1568#else
1569extern int randomize_va_space;
1570#endif
1571
1572const char * arch_vma_name(struct vm_area_struct *vma);
1573void print_vma_addr(char *prefix, unsigned long rip);
1574
1575void sparse_mem_maps_populate_node(struct page **map_map,
1576 unsigned long pnum_begin,
1577 unsigned long pnum_end,
1578 unsigned long map_count,
1579 int nodeid);
1580
1581struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
1582pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
1583pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
1584pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
1585pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
1586void *vmemmap_alloc_block(unsigned long size, int node);
1587void *vmemmap_alloc_block_buf(unsigned long size, int node);
1588void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
1589int vmemmap_populate_basepages(struct page *start_page,
1590 unsigned long pages, int node);
1591int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
1592void vmemmap_populate_print_last(void);
1593
1594
1595enum mf_flags {
1596 MF_COUNT_INCREASED = 1 << 0,
1597 MF_ACTION_REQUIRED = 1 << 1,
1598};
1599extern int memory_failure(unsigned long pfn, int trapno, int flags);
1600extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
1601extern int unpoison_memory(unsigned long pfn);
1602extern int sysctl_memory_failure_early_kill;
1603extern int sysctl_memory_failure_recovery;
1604extern void shake_page(struct page *p, int access);
1605extern atomic_long_t mce_bad_pages;
1606extern int soft_offline_page(struct page *page, int flags);
1607
1608extern void dump_page(struct page *page);
1609
1610#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
1611extern void clear_huge_page(struct page *page,
1612 unsigned long addr,
1613 unsigned int pages_per_huge_page);
1614extern void copy_user_huge_page(struct page *dst, struct page *src,
1615 unsigned long addr, struct vm_area_struct *vma,
1616 unsigned int pages_per_huge_page);
1617#endif
1618
1619#ifdef CONFIG_DEBUG_PAGEALLOC
1620extern unsigned int _debug_guardpage_minorder;
1621
1622static inline unsigned int debug_guardpage_minorder(void)
1623{
1624 return _debug_guardpage_minorder;
1625}
1626
1627static inline bool page_is_guard(struct page *page)
1628{
1629 return test_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
1630}
1631#else
1632static inline unsigned int debug_guardpage_minorder(void) { return 0; }
1633static inline bool page_is_guard(struct page *page) { return false; }
1634#endif
1635
1636#endif
1637#endif
1638