1#ifndef _LINUX_MM_H
2#define _LINUX_MM_H
3
4#include <linux/errno.h>
5
6#ifdef __KERNEL__
7
8#include <linux/gfp.h>
9#include <linux/bug.h>
10#include <linux/list.h>
11#include <linux/mmzone.h>
12#include <linux/rbtree.h>
13#include <linux/atomic.h>
14#include <linux/debug_locks.h>
15#include <linux/mm_types.h>
16#include <linux/range.h>
17#include <linux/pfn.h>
18#include <linux/bit_spinlock.h>
19#include <linux/shrinker.h>
20
21struct mempolicy;
22struct anon_vma;
23struct anon_vma_chain;
24struct file_ra_state;
25struct user_struct;
26struct writeback_control;
27
28#ifndef CONFIG_NEED_MULTIPLE_NODES
29extern unsigned long max_mapnr;
30
31static inline void set_max_mapnr(unsigned long limit)
32{
33 max_mapnr = limit;
34}
35#else
36static inline void set_max_mapnr(unsigned long limit) { }
37#endif
38
39extern unsigned long totalram_pages;
40extern void * high_memory;
41extern int page_cluster;
42
43#ifdef CONFIG_SYSCTL
44extern int sysctl_legacy_va_layout;
45#else
46#define sysctl_legacy_va_layout 0
47#endif
48
49#include <asm/page.h>
50#include <asm/pgtable.h>
51#include <asm/processor.h>
52
53extern unsigned long sysctl_user_reserve_kbytes;
54extern unsigned long sysctl_admin_reserve_kbytes;
55
56#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
57
58
59#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
60
61
62#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)addr, PAGE_SIZE)
63
64
65
66
67
68
69
70
71
72
73extern struct kmem_cache *vm_area_cachep;
74
75#ifndef CONFIG_MMU
76extern struct rb_root nommu_region_tree;
77extern struct rw_semaphore nommu_region_sem;
78
79extern unsigned int kobjsize(const void *objp);
80#endif
81
82
83
84
85#define VM_NONE 0x00000000
86
87#define VM_READ 0x00000001
88#define VM_WRITE 0x00000002
89#define VM_EXEC 0x00000004
90#define VM_SHARED 0x00000008
91
92
93#define VM_MAYREAD 0x00000010
94#define VM_MAYWRITE 0x00000020
95#define VM_MAYEXEC 0x00000040
96#define VM_MAYSHARE 0x00000080
97
98#define VM_GROWSDOWN 0x00000100
99#define VM_PFNMAP 0x00000400
100#define VM_DENYWRITE 0x00000800
101
102#define VM_LOCKED 0x00002000
103#define VM_IO 0x00004000
104
105
106#define VM_SEQ_READ 0x00008000
107#define VM_RAND_READ 0x00010000
108
109#define VM_DONTCOPY 0x00020000
110#define VM_DONTEXPAND 0x00040000
111#define VM_ACCOUNT 0x00100000
112#define VM_NORESERVE 0x00200000
113#define VM_HUGETLB 0x00400000
114#define VM_NONLINEAR 0x00800000
115#define VM_ARCH_1 0x01000000
116#define VM_DONTDUMP 0x04000000
117
118#ifdef CONFIG_MEM_SOFT_DIRTY
119# define VM_SOFTDIRTY 0x08000000
120#else
121# define VM_SOFTDIRTY 0
122#endif
123
124#define VM_MIXEDMAP 0x10000000
125#define VM_HUGEPAGE 0x20000000
126#define VM_NOHUGEPAGE 0x40000000
127#define VM_MERGEABLE 0x80000000
128
129#if defined(CONFIG_X86)
130# define VM_PAT VM_ARCH_1
131#elif defined(CONFIG_PPC)
132# define VM_SAO VM_ARCH_1
133#elif defined(CONFIG_PARISC)
134# define VM_GROWSUP VM_ARCH_1
135#elif defined(CONFIG_METAG)
136# define VM_GROWSUP VM_ARCH_1
137#elif defined(CONFIG_IA64)
138# define VM_GROWSUP VM_ARCH_1
139#elif !defined(CONFIG_MMU)
140# define VM_MAPPED_COPY VM_ARCH_1
141#endif
142
143#ifndef VM_GROWSUP
144# define VM_GROWSUP VM_NONE
145#endif
146
147
148#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ)
149
150#ifndef VM_STACK_DEFAULT_FLAGS
151#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
152#endif
153
154#ifdef CONFIG_STACK_GROWSUP
155#define VM_STACK_FLAGS (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
156#else
157#define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
158#endif
159
160
161
162
163
164#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP)
165
166
167
168
169
170extern pgprot_t protection_map[16];
171
172#define FAULT_FLAG_WRITE 0x01
173#define FAULT_FLAG_NONLINEAR 0x02
174#define FAULT_FLAG_MKWRITE 0x04
175#define FAULT_FLAG_ALLOW_RETRY 0x08
176#define FAULT_FLAG_RETRY_NOWAIT 0x10
177#define FAULT_FLAG_KILLABLE 0x20
178#define FAULT_FLAG_TRIED 0x40
179#define FAULT_FLAG_USER 0x80
180
181
182
183
184
185
186
187
188
189struct vm_fault {
190 unsigned int flags;
191 pgoff_t pgoff;
192 void __user *virtual_address;
193
194 struct page *page;
195
196
197
198
199};
200
201
202
203
204
205
206struct vm_operations_struct {
207 void (*open)(struct vm_area_struct * area);
208 void (*close)(struct vm_area_struct * area);
209 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
210
211
212
213 int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
214
215
216
217
218 int (*access)(struct vm_area_struct *vma, unsigned long addr,
219 void *buf, int len, int write);
220#ifdef CONFIG_NUMA
221
222
223
224
225
226
227
228 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
229
230
231
232
233
234
235
236
237
238
239
240 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
241 unsigned long addr);
242 int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
243 const nodemask_t *to, unsigned long flags);
244#endif
245
246 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
247 unsigned long size, pgoff_t pgoff);
248};
249
250struct mmu_gather;
251struct inode;
252
253#define page_private(page) ((page)->private)
254#define set_page_private(page, v) ((page)->private = (v))
255
256
257static inline void set_freepage_migratetype(struct page *page, int migratetype)
258{
259 page->index = migratetype;
260}
261
262
263static inline int get_freepage_migratetype(struct page *page)
264{
265 return page->index;
266}
267
268
269
270
271
272#include <linux/page-flags.h>
273#include <linux/huge_mm.h>
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291static inline int put_page_testzero(struct page *page)
292{
293 VM_BUG_ON(atomic_read(&page->_count) == 0);
294 return atomic_dec_and_test(&page->_count);
295}
296
297
298
299
300
301static inline int get_page_unless_zero(struct page *page)
302{
303 return atomic_inc_not_zero(&page->_count);
304}
305
306extern int page_is_ram(unsigned long pfn);
307
308
309struct page *vmalloc_to_page(const void *addr);
310unsigned long vmalloc_to_pfn(const void *addr);
311
312
313
314
315
316
317
318static inline int is_vmalloc_addr(const void *x)
319{
320#ifdef CONFIG_MMU
321 unsigned long addr = (unsigned long)x;
322
323 return addr >= VMALLOC_START && addr < VMALLOC_END;
324#else
325 return 0;
326#endif
327}
328#ifdef CONFIG_MMU
329extern int is_vmalloc_or_module_addr(const void *x);
330#else
331static inline int is_vmalloc_or_module_addr(const void *x)
332{
333 return 0;
334}
335#endif
336
337static inline void compound_lock(struct page *page)
338{
339#ifdef CONFIG_TRANSPARENT_HUGEPAGE
340 VM_BUG_ON(PageSlab(page));
341 bit_spin_lock(PG_compound_lock, &page->flags);
342#endif
343}
344
345static inline void compound_unlock(struct page *page)
346{
347#ifdef CONFIG_TRANSPARENT_HUGEPAGE
348 VM_BUG_ON(PageSlab(page));
349 bit_spin_unlock(PG_compound_lock, &page->flags);
350#endif
351}
352
353static inline unsigned long compound_lock_irqsave(struct page *page)
354{
355 unsigned long uninitialized_var(flags);
356#ifdef CONFIG_TRANSPARENT_HUGEPAGE
357 local_irq_save(flags);
358 compound_lock(page);
359#endif
360 return flags;
361}
362
363static inline void compound_unlock_irqrestore(struct page *page,
364 unsigned long flags)
365{
366#ifdef CONFIG_TRANSPARENT_HUGEPAGE
367 compound_unlock(page);
368 local_irq_restore(flags);
369#endif
370}
371
372static inline struct page *compound_head(struct page *page)
373{
374 if (unlikely(PageTail(page)))
375 return page->first_page;
376 return page;
377}
378
379
380
381
382
383
384static inline void page_mapcount_reset(struct page *page)
385{
386 atomic_set(&(page)->_mapcount, -1);
387}
388
389static inline int page_mapcount(struct page *page)
390{
391 return atomic_read(&(page)->_mapcount) + 1;
392}
393
394static inline int page_count(struct page *page)
395{
396 return atomic_read(&compound_head(page)->_count);
397}
398
399static inline void get_huge_page_tail(struct page *page)
400{
401
402
403
404
405 VM_BUG_ON(page_mapcount(page) < 0);
406 VM_BUG_ON(atomic_read(&page->_count) != 0);
407 atomic_inc(&page->_mapcount);
408}
409
410extern bool __get_page_tail(struct page *page);
411
412static inline void get_page(struct page *page)
413{
414 if (unlikely(PageTail(page)))
415 if (likely(__get_page_tail(page)))
416 return;
417
418
419
420
421 VM_BUG_ON(atomic_read(&page->_count) <= 0);
422 atomic_inc(&page->_count);
423}
424
425static inline struct page *virt_to_head_page(const void *x)
426{
427 struct page *page = virt_to_page(x);
428 return compound_head(page);
429}
430
431
432
433
434
435static inline void init_page_count(struct page *page)
436{
437 atomic_set(&page->_count, 1);
438}
439
440
441
442
443
444
445
446
447
448
449#define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
450
451static inline int PageBuddy(struct page *page)
452{
453 return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE;
454}
455
456static inline void __SetPageBuddy(struct page *page)
457{
458 VM_BUG_ON(atomic_read(&page->_mapcount) != -1);
459 atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
460}
461
462static inline void __ClearPageBuddy(struct page *page)
463{
464 VM_BUG_ON(!PageBuddy(page));
465 atomic_set(&page->_mapcount, -1);
466}
467
468void put_page(struct page *page);
469void put_pages_list(struct list_head *pages);
470
471void split_page(struct page *page, unsigned int order);
472int split_free_page(struct page *page);
473
474
475
476
477
478
479typedef void compound_page_dtor(struct page *);
480
481static inline void set_compound_page_dtor(struct page *page,
482 compound_page_dtor *dtor)
483{
484 page[1].lru.next = (void *)dtor;
485}
486
487static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
488{
489 return (compound_page_dtor *)page[1].lru.next;
490}
491
492static inline int compound_order(struct page *page)
493{
494 if (!PageHead(page))
495 return 0;
496 return (unsigned long)page[1].lru.prev;
497}
498
499static inline void set_compound_order(struct page *page, unsigned long order)
500{
501 page[1].lru.prev = (void *)order;
502}
503
504#ifdef CONFIG_MMU
505
506
507
508
509
510
511static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
512{
513 if (likely(vma->vm_flags & VM_WRITE))
514 pte = pte_mkwrite(pte);
515 return pte;
516}
517#endif
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
586#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
587#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
588#define LAST_NID_PGOFF (ZONES_PGOFF - LAST_NID_WIDTH)
589
590
591
592
593
594
595#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
596#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
597#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
598#define LAST_NID_PGSHIFT (LAST_NID_PGOFF * (LAST_NID_WIDTH != 0))
599
600
601#ifdef NODE_NOT_IN_PAGE_FLAGS
602#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
603#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \
604 SECTIONS_PGOFF : ZONES_PGOFF)
605#else
606#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
607#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \
608 NODES_PGOFF : ZONES_PGOFF)
609#endif
610
611#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
612
613#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
614#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
615#endif
616
617#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
618#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
619#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
620#define LAST_NID_MASK ((1UL << LAST_NID_WIDTH) - 1)
621#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
622
623static inline enum zone_type page_zonenum(const struct page *page)
624{
625 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
626}
627
628#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
629#define SECTION_IN_PAGE_FLAGS
630#endif
631
632
633
634
635
636
637
638
639
640static inline int page_zone_id(struct page *page)
641{
642 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
643}
644
645static inline int zone_to_nid(struct zone *zone)
646{
647#ifdef CONFIG_NUMA
648 return zone->node;
649#else
650 return 0;
651#endif
652}
653
654#ifdef NODE_NOT_IN_PAGE_FLAGS
655extern int page_to_nid(const struct page *page);
656#else
657static inline int page_to_nid(const struct page *page)
658{
659 return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
660}
661#endif
662
663#ifdef CONFIG_NUMA_BALANCING
664#ifdef LAST_NID_NOT_IN_PAGE_FLAGS
665static inline int page_nid_xchg_last(struct page *page, int nid)
666{
667 return xchg(&page->_last_nid, nid);
668}
669
670static inline int page_nid_last(struct page *page)
671{
672 return page->_last_nid;
673}
674static inline void page_nid_reset_last(struct page *page)
675{
676 page->_last_nid = -1;
677}
678#else
679static inline int page_nid_last(struct page *page)
680{
681 return (page->flags >> LAST_NID_PGSHIFT) & LAST_NID_MASK;
682}
683
684extern int page_nid_xchg_last(struct page *page, int nid);
685
686static inline void page_nid_reset_last(struct page *page)
687{
688 int nid = (1 << LAST_NID_SHIFT) - 1;
689
690 page->flags &= ~(LAST_NID_MASK << LAST_NID_PGSHIFT);
691 page->flags |= (nid & LAST_NID_MASK) << LAST_NID_PGSHIFT;
692}
693#endif
694#else
695static inline int page_nid_xchg_last(struct page *page, int nid)
696{
697 return page_to_nid(page);
698}
699
700static inline int page_nid_last(struct page *page)
701{
702 return page_to_nid(page);
703}
704
705static inline void page_nid_reset_last(struct page *page)
706{
707}
708#endif
709
710static inline struct zone *page_zone(const struct page *page)
711{
712 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
713}
714
715#ifdef SECTION_IN_PAGE_FLAGS
716static inline void set_page_section(struct page *page, unsigned long section)
717{
718 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
719 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
720}
721
722static inline unsigned long page_to_section(const struct page *page)
723{
724 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
725}
726#endif
727
728static inline void set_page_zone(struct page *page, enum zone_type zone)
729{
730 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
731 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
732}
733
734static inline void set_page_node(struct page *page, unsigned long node)
735{
736 page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
737 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
738}
739
740static inline void set_page_links(struct page *page, enum zone_type zone,
741 unsigned long node, unsigned long pfn)
742{
743 set_page_zone(page, zone);
744 set_page_node(page, node);
745#ifdef SECTION_IN_PAGE_FLAGS
746 set_page_section(page, pfn_to_section_nr(pfn));
747#endif
748}
749
750
751
752
753#include <linux/vmstat.h>
754
755static __always_inline void *lowmem_page_address(const struct page *page)
756{
757 return __va(PFN_PHYS(page_to_pfn(page)));
758}
759
760#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
761#define HASHED_PAGE_VIRTUAL
762#endif
763
764#if defined(WANT_PAGE_VIRTUAL)
765#define page_address(page) ((page)->virtual)
766#define set_page_address(page, address) \
767 do { \
768 (page)->virtual = (address); \
769 } while(0)
770#define page_address_init() do { } while(0)
771#endif
772
773#if defined(HASHED_PAGE_VIRTUAL)
774void *page_address(const struct page *page);
775void set_page_address(struct page *page, void *virtual);
776void page_address_init(void);
777#endif
778
779#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
780#define page_address(page) lowmem_page_address(page)
781#define set_page_address(page, address) do { } while(0)
782#define page_address_init() do { } while(0)
783#endif
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801#define PAGE_MAPPING_ANON 1
802#define PAGE_MAPPING_KSM 2
803#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
804
805extern struct address_space *page_mapping(struct page *page);
806
807
808static inline void *page_rmapping(struct page *page)
809{
810 return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
811}
812
813extern struct address_space *__page_file_mapping(struct page *);
814
815static inline
816struct address_space *page_file_mapping(struct page *page)
817{
818 if (unlikely(PageSwapCache(page)))
819 return __page_file_mapping(page);
820
821 return page->mapping;
822}
823
824static inline int PageAnon(struct page *page)
825{
826 return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
827}
828
829
830
831
832
833static inline pgoff_t page_index(struct page *page)
834{
835 if (unlikely(PageSwapCache(page)))
836 return page_private(page);
837 return page->index;
838}
839
840extern pgoff_t __page_file_index(struct page *page);
841
842
843
844
845
846static inline pgoff_t page_file_index(struct page *page)
847{
848 if (unlikely(PageSwapCache(page)))
849 return __page_file_index(page);
850
851 return page->index;
852}
853
854
855
856
857static inline int page_mapped(struct page *page)
858{
859 return atomic_read(&(page)->_mapcount) >= 0;
860}
861
862
863
864
865
866
867
868#define VM_FAULT_MINOR 0
869
870#define VM_FAULT_OOM 0x0001
871#define VM_FAULT_SIGBUS 0x0002
872#define VM_FAULT_MAJOR 0x0004
873#define VM_FAULT_WRITE 0x0008
874#define VM_FAULT_HWPOISON 0x0010
875#define VM_FAULT_HWPOISON_LARGE 0x0020
876
877#define VM_FAULT_NOPAGE 0x0100
878#define VM_FAULT_LOCKED 0x0200
879#define VM_FAULT_RETRY 0x0400
880#define VM_FAULT_FALLBACK 0x0800
881
882#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000
883
884#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \
885 VM_FAULT_FALLBACK | VM_FAULT_HWPOISON_LARGE)
886
887
888#define VM_FAULT_SET_HINDEX(x) ((x) << 12)
889#define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf)
890
891
892
893
894extern void pagefault_out_of_memory(void);
895
896#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
897
898
899
900
901
902#define SHOW_MEM_FILTER_NODES (0x0001u)
903#define SHOW_MEM_FILTER_PAGE_COUNT (0x0002u)
904
905extern void show_free_areas(unsigned int flags);
906extern bool skip_free_areas_node(unsigned int flags, int nid);
907
908int shmem_zero_setup(struct vm_area_struct *);
909
910extern int can_do_mlock(void);
911extern int user_shm_lock(size_t, struct user_struct *);
912extern void user_shm_unlock(size_t, struct user_struct *);
913
914
915
916
917struct zap_details {
918 struct vm_area_struct *nonlinear_vma;
919 struct address_space *check_mapping;
920 pgoff_t first_index;
921 pgoff_t last_index;
922};
923
924struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
925 pte_t pte);
926
927int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
928 unsigned long size);
929void zap_page_range(struct vm_area_struct *vma, unsigned long address,
930 unsigned long size, struct zap_details *);
931void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
932 unsigned long start, unsigned long end);
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950struct mm_walk {
951 int (*pgd_entry)(pgd_t *pgd, unsigned long addr,
952 unsigned long next, struct mm_walk *walk);
953 int (*pud_entry)(pud_t *pud, unsigned long addr,
954 unsigned long next, struct mm_walk *walk);
955 int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
956 unsigned long next, struct mm_walk *walk);
957 int (*pte_entry)(pte_t *pte, unsigned long addr,
958 unsigned long next, struct mm_walk *walk);
959 int (*pte_hole)(unsigned long addr, unsigned long next,
960 struct mm_walk *walk);
961 int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
962 unsigned long addr, unsigned long next,
963 struct mm_walk *walk);
964 struct mm_struct *mm;
965 void *private;
966};
967
968int walk_page_range(unsigned long addr, unsigned long end,
969 struct mm_walk *walk);
970void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
971 unsigned long end, unsigned long floor, unsigned long ceiling);
972int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
973 struct vm_area_struct *vma);
974void unmap_mapping_range(struct address_space *mapping,
975 loff_t const holebegin, loff_t const holelen, int even_cows);
976int follow_pfn(struct vm_area_struct *vma, unsigned long address,
977 unsigned long *pfn);
978int follow_phys(struct vm_area_struct *vma, unsigned long address,
979 unsigned int flags, unsigned long *prot, resource_size_t *phys);
980int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
981 void *buf, int len, int write);
982
983static inline void unmap_shared_mapping_range(struct address_space *mapping,
984 loff_t const holebegin, loff_t const holelen)
985{
986 unmap_mapping_range(mapping, holebegin, holelen, 0);
987}
988
989extern void truncate_pagecache(struct inode *inode, loff_t new);
990extern void truncate_setsize(struct inode *inode, loff_t newsize);
991void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
992int truncate_inode_page(struct address_space *mapping, struct page *page);
993int generic_error_remove_page(struct address_space *mapping, struct page *page);
994int invalidate_inode_page(struct page *page);
995
996#ifdef CONFIG_MMU
997extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
998 unsigned long address, unsigned int flags);
999extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
1000 unsigned long address, unsigned int fault_flags);
1001#else
1002static inline int handle_mm_fault(struct mm_struct *mm,
1003 struct vm_area_struct *vma, unsigned long address,
1004 unsigned int flags)
1005{
1006
1007 BUG();
1008 return VM_FAULT_SIGBUS;
1009}
1010static inline int fixup_user_fault(struct task_struct *tsk,
1011 struct mm_struct *mm, unsigned long address,
1012 unsigned int fault_flags)
1013{
1014
1015 BUG();
1016 return -EFAULT;
1017}
1018#endif
1019
1020extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
1021extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1022 void *buf, int len, int write);
1023
1024long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1025 unsigned long start, unsigned long nr_pages,
1026 unsigned int foll_flags, struct page **pages,
1027 struct vm_area_struct **vmas, int *nonblocking);
1028long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1029 unsigned long start, unsigned long nr_pages,
1030 int write, int force, struct page **pages,
1031 struct vm_area_struct **vmas);
1032int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1033 struct page **pages);
1034struct kvec;
1035int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
1036 struct page **pages);
1037int get_kernel_page(unsigned long start, int write, struct page **pages);
1038struct page *get_dump_page(unsigned long addr);
1039
1040extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
1041extern void do_invalidatepage(struct page *page, unsigned int offset,
1042 unsigned int length);
1043
1044int __set_page_dirty_nobuffers(struct page *page);
1045int __set_page_dirty_no_writeback(struct page *page);
1046int redirty_page_for_writepage(struct writeback_control *wbc,
1047 struct page *page);
1048void account_page_dirtied(struct page *page, struct address_space *mapping);
1049void account_page_writeback(struct page *page);
1050int set_page_dirty(struct page *page);
1051int set_page_dirty_lock(struct page *page);
1052int clear_page_dirty_for_io(struct page *page);
1053
1054
1055static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
1056{
1057 return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
1058}
1059
1060static inline int stack_guard_page_start(struct vm_area_struct *vma,
1061 unsigned long addr)
1062{
1063 return (vma->vm_flags & VM_GROWSDOWN) &&
1064 (vma->vm_start == addr) &&
1065 !vma_growsdown(vma->vm_prev, addr);
1066}
1067
1068
1069static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
1070{
1071 return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
1072}
1073
1074static inline int stack_guard_page_end(struct vm_area_struct *vma,
1075 unsigned long addr)
1076{
1077 return (vma->vm_flags & VM_GROWSUP) &&
1078 (vma->vm_end == addr) &&
1079 !vma_growsup(vma->vm_next, addr);
1080}
1081
1082extern pid_t
1083vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
1084
1085extern unsigned long move_page_tables(struct vm_area_struct *vma,
1086 unsigned long old_addr, struct vm_area_struct *new_vma,
1087 unsigned long new_addr, unsigned long len,
1088 bool need_rmap_locks);
1089extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
1090 unsigned long end, pgprot_t newprot,
1091 int dirty_accountable, int prot_numa);
1092extern int mprotect_fixup(struct vm_area_struct *vma,
1093 struct vm_area_struct **pprev, unsigned long start,
1094 unsigned long end, unsigned long newflags);
1095
1096
1097
1098
1099int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1100 struct page **pages);
1101
1102
1103
1104static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
1105{
1106 long val = atomic_long_read(&mm->rss_stat.count[member]);
1107
1108#ifdef SPLIT_RSS_COUNTING
1109
1110
1111
1112
1113 if (val < 0)
1114 val = 0;
1115#endif
1116 return (unsigned long)val;
1117}
1118
1119static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1120{
1121 atomic_long_add(value, &mm->rss_stat.count[member]);
1122}
1123
1124static inline void inc_mm_counter(struct mm_struct *mm, int member)
1125{
1126 atomic_long_inc(&mm->rss_stat.count[member]);
1127}
1128
1129static inline void dec_mm_counter(struct mm_struct *mm, int member)
1130{
1131 atomic_long_dec(&mm->rss_stat.count[member]);
1132}
1133
1134static inline unsigned long get_mm_rss(struct mm_struct *mm)
1135{
1136 return get_mm_counter(mm, MM_FILEPAGES) +
1137 get_mm_counter(mm, MM_ANONPAGES);
1138}
1139
1140static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
1141{
1142 return max(mm->hiwater_rss, get_mm_rss(mm));
1143}
1144
1145static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
1146{
1147 return max(mm->hiwater_vm, mm->total_vm);
1148}
1149
1150static inline void update_hiwater_rss(struct mm_struct *mm)
1151{
1152 unsigned long _rss = get_mm_rss(mm);
1153
1154 if ((mm)->hiwater_rss < _rss)
1155 (mm)->hiwater_rss = _rss;
1156}
1157
1158static inline void update_hiwater_vm(struct mm_struct *mm)
1159{
1160 if (mm->hiwater_vm < mm->total_vm)
1161 mm->hiwater_vm = mm->total_vm;
1162}
1163
1164static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
1165 struct mm_struct *mm)
1166{
1167 unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
1168
1169 if (*maxrss < hiwater_rss)
1170 *maxrss = hiwater_rss;
1171}
1172
1173#if defined(SPLIT_RSS_COUNTING)
1174void sync_mm_rss(struct mm_struct *mm);
1175#else
1176static inline void sync_mm_rss(struct mm_struct *mm)
1177{
1178}
1179#endif
1180
1181int vma_wants_writenotify(struct vm_area_struct *vma);
1182
1183extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1184 spinlock_t **ptl);
1185static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
1186 spinlock_t **ptl)
1187{
1188 pte_t *ptep;
1189 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
1190 return ptep;
1191}
1192
1193#ifdef __PAGETABLE_PUD_FOLDED
1194static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
1195 unsigned long address)
1196{
1197 return 0;
1198}
1199#else
1200int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
1201#endif
1202
1203#ifdef __PAGETABLE_PMD_FOLDED
1204static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
1205 unsigned long address)
1206{
1207 return 0;
1208}
1209#else
1210int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
1211#endif
1212
1213int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
1214 pmd_t *pmd, unsigned long address);
1215int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
1216
1217
1218
1219
1220
1221#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
1222static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
1223{
1224 return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))?
1225 NULL: pud_offset(pgd, address);
1226}
1227
1228static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
1229{
1230 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
1231 NULL: pmd_offset(pud, address);
1232}
1233#endif
1234
1235#if USE_SPLIT_PTLOCKS
1236
1237
1238
1239
1240
1241
1242#define __pte_lockptr(page) &((page)->ptl)
1243#define pte_lock_init(_page) do { \
1244 spin_lock_init(__pte_lockptr(_page)); \
1245} while (0)
1246#define pte_lock_deinit(page) ((page)->mapping = NULL)
1247#define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));})
1248#else
1249
1250
1251
1252#define pte_lock_init(page) do {} while (0)
1253#define pte_lock_deinit(page) do {} while (0)
1254#define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;})
1255#endif
1256
1257static inline void pgtable_page_ctor(struct page *page)
1258{
1259 pte_lock_init(page);
1260 inc_zone_page_state(page, NR_PAGETABLE);
1261}
1262
1263static inline void pgtable_page_dtor(struct page *page)
1264{
1265 pte_lock_deinit(page);
1266 dec_zone_page_state(page, NR_PAGETABLE);
1267}
1268
1269#define pte_offset_map_lock(mm, pmd, address, ptlp) \
1270({ \
1271 spinlock_t *__ptl = pte_lockptr(mm, pmd); \
1272 pte_t *__pte = pte_offset_map(pmd, address); \
1273 *(ptlp) = __ptl; \
1274 spin_lock(__ptl); \
1275 __pte; \
1276})
1277
1278#define pte_unmap_unlock(pte, ptl) do { \
1279 spin_unlock(ptl); \
1280 pte_unmap(pte); \
1281} while (0)
1282
1283#define pte_alloc_map(mm, vma, pmd, address) \
1284 ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, vma, \
1285 pmd, address))? \
1286 NULL: pte_offset_map(pmd, address))
1287
1288#define pte_alloc_map_lock(mm, pmd, address, ptlp) \
1289 ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, NULL, \
1290 pmd, address))? \
1291 NULL: pte_offset_map_lock(mm, pmd, address, ptlp))
1292
1293#define pte_alloc_kernel(pmd, address) \
1294 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
1295 NULL: pte_offset_kernel(pmd, address))
1296
1297extern void free_area_init(unsigned long * zones_size);
1298extern void free_area_init_node(int nid, unsigned long * zones_size,
1299 unsigned long zone_start_pfn, unsigned long *zholes_size);
1300extern void free_initmem(void);
1301
1302
1303
1304
1305
1306
1307
1308extern unsigned long free_reserved_area(void *start, void *end,
1309 int poison, char *s);
1310
1311#ifdef CONFIG_HIGHMEM
1312
1313
1314
1315
1316extern void free_highmem_page(struct page *page);
1317#endif
1318
1319extern void adjust_managed_page_count(struct page *page, long count);
1320extern void mem_init_print_info(const char *str);
1321
1322
1323static inline void __free_reserved_page(struct page *page)
1324{
1325 ClearPageReserved(page);
1326 init_page_count(page);
1327 __free_page(page);
1328}
1329
1330static inline void free_reserved_page(struct page *page)
1331{
1332 __free_reserved_page(page);
1333 adjust_managed_page_count(page, 1);
1334}
1335
1336static inline void mark_page_reserved(struct page *page)
1337{
1338 SetPageReserved(page);
1339 adjust_managed_page_count(page, -1);
1340}
1341
1342
1343
1344
1345
1346
1347
1348static inline unsigned long free_initmem_default(int poison)
1349{
1350 extern char __init_begin[], __init_end[];
1351
1352 return free_reserved_area(&__init_begin, &__init_end,
1353 poison, "unused kernel");
1354}
1355
1356static inline unsigned long get_num_physpages(void)
1357{
1358 int nid;
1359 unsigned long phys_pages = 0;
1360
1361 for_each_online_node(nid)
1362 phys_pages += node_present_pages(nid);
1363
1364 return phys_pages;
1365}
1366
1367#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394extern void free_area_init_nodes(unsigned long *max_zone_pfn);
1395unsigned long node_map_pfn_alignment(void);
1396unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
1397 unsigned long end_pfn);
1398extern unsigned long absent_pages_in_range(unsigned long start_pfn,
1399 unsigned long end_pfn);
1400extern void get_pfn_range_for_nid(unsigned int nid,
1401 unsigned long *start_pfn, unsigned long *end_pfn);
1402extern unsigned long find_min_pfn_with_active_regions(void);
1403extern void free_bootmem_with_active_regions(int nid,
1404 unsigned long max_low_pfn);
1405extern void sparse_memory_present_with_active_regions(int nid);
1406
1407#endif
1408
1409#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
1410 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
1411static inline int __early_pfn_to_nid(unsigned long pfn)
1412{
1413 return 0;
1414}
1415#else
1416
1417extern int __meminit early_pfn_to_nid(unsigned long pfn);
1418#ifdef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
1419
1420extern int __meminit __early_pfn_to_nid(unsigned long pfn);
1421#endif
1422#endif
1423
1424extern void set_dma_reserve(unsigned long new_dma_reserve);
1425extern void memmap_init_zone(unsigned long, int, unsigned long,
1426 unsigned long, enum memmap_context);
1427extern void setup_per_zone_wmarks(void);
1428extern int __meminit init_per_zone_wmark_min(void);
1429extern void mem_init(void);
1430extern void __init mmap_init(void);
1431extern void show_mem(unsigned int flags);
1432extern void si_meminfo(struct sysinfo * val);
1433extern void si_meminfo_node(struct sysinfo *val, int nid);
1434
1435extern __printf(3, 4)
1436void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...);
1437
1438extern void setup_per_cpu_pageset(void);
1439
1440extern void zone_pcp_update(struct zone *zone);
1441extern void zone_pcp_reset(struct zone *zone);
1442
1443
1444extern int min_free_kbytes;
1445
1446
1447extern atomic_long_t mmap_pages_allocated;
1448extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
1449
1450
1451void vma_interval_tree_insert(struct vm_area_struct *node,
1452 struct rb_root *root);
1453void vma_interval_tree_insert_after(struct vm_area_struct *node,
1454 struct vm_area_struct *prev,
1455 struct rb_root *root);
1456void vma_interval_tree_remove(struct vm_area_struct *node,
1457 struct rb_root *root);
1458struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root *root,
1459 unsigned long start, unsigned long last);
1460struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
1461 unsigned long start, unsigned long last);
1462
1463#define vma_interval_tree_foreach(vma, root, start, last) \
1464 for (vma = vma_interval_tree_iter_first(root, start, last); \
1465 vma; vma = vma_interval_tree_iter_next(vma, start, last))
1466
1467static inline void vma_nonlinear_insert(struct vm_area_struct *vma,
1468 struct list_head *list)
1469{
1470 list_add_tail(&vma->shared.nonlinear, list);
1471}
1472
1473void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
1474 struct rb_root *root);
1475void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
1476 struct rb_root *root);
1477struct anon_vma_chain *anon_vma_interval_tree_iter_first(
1478 struct rb_root *root, unsigned long start, unsigned long last);
1479struct anon_vma_chain *anon_vma_interval_tree_iter_next(
1480 struct anon_vma_chain *node, unsigned long start, unsigned long last);
1481#ifdef CONFIG_DEBUG_VM_RB
1482void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
1483#endif
1484
1485#define anon_vma_interval_tree_foreach(avc, root, start, last) \
1486 for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
1487 avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
1488
1489
1490extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
1491extern int vma_adjust(struct vm_area_struct *vma, unsigned long start,
1492 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert);
1493extern struct vm_area_struct *vma_merge(struct mm_struct *,
1494 struct vm_area_struct *prev, unsigned long addr, unsigned long end,
1495 unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
1496 struct mempolicy *);
1497extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
1498extern int split_vma(struct mm_struct *,
1499 struct vm_area_struct *, unsigned long addr, int new_below);
1500extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
1501extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
1502 struct rb_node **, struct rb_node *);
1503extern void unlink_file_vma(struct vm_area_struct *);
1504extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
1505 unsigned long addr, unsigned long len, pgoff_t pgoff,
1506 bool *need_rmap_locks);
1507extern void exit_mmap(struct mm_struct *);
1508
1509extern int mm_take_all_locks(struct mm_struct *mm);
1510extern void mm_drop_all_locks(struct mm_struct *mm);
1511
1512extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
1513extern struct file *get_mm_exe_file(struct mm_struct *mm);
1514
1515extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
1516extern int install_special_mapping(struct mm_struct *mm,
1517 unsigned long addr, unsigned long len,
1518 unsigned long flags, struct page **pages);
1519
1520extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
1521
1522extern unsigned long mmap_region(struct file *file, unsigned long addr,
1523 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
1524extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
1525 unsigned long len, unsigned long prot, unsigned long flags,
1526 unsigned long pgoff, unsigned long *populate);
1527extern int do_munmap(struct mm_struct *, unsigned long, size_t);
1528
1529#ifdef CONFIG_MMU
1530extern int __mm_populate(unsigned long addr, unsigned long len,
1531 int ignore_errors);
1532static inline void mm_populate(unsigned long addr, unsigned long len)
1533{
1534
1535 (void) __mm_populate(addr, len, 1);
1536}
1537#else
1538static inline void mm_populate(unsigned long addr, unsigned long len) {}
1539#endif
1540
1541
1542extern unsigned long vm_brk(unsigned long, unsigned long);
1543extern int vm_munmap(unsigned long, size_t);
1544extern unsigned long vm_mmap(struct file *, unsigned long,
1545 unsigned long, unsigned long,
1546 unsigned long, unsigned long);
1547
1548struct vm_unmapped_area_info {
1549#define VM_UNMAPPED_AREA_TOPDOWN 1
1550 unsigned long flags;
1551 unsigned long length;
1552 unsigned long low_limit;
1553 unsigned long high_limit;
1554 unsigned long align_mask;
1555 unsigned long align_offset;
1556};
1557
1558extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
1559extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570static inline unsigned long
1571vm_unmapped_area(struct vm_unmapped_area_info *info)
1572{
1573 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
1574 return unmapped_area(info);
1575 else
1576 return unmapped_area_topdown(info);
1577}
1578
1579
1580extern void truncate_inode_pages(struct address_space *, loff_t);
1581extern void truncate_inode_pages_range(struct address_space *,
1582 loff_t lstart, loff_t lend);
1583
1584
1585extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
1586extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
1587
1588
1589int write_one_page(struct page *page, int wait);
1590void task_dirty_inc(struct task_struct *tsk);
1591
1592
1593#define VM_MAX_READAHEAD 128
1594#define VM_MIN_READAHEAD 16
1595
1596int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
1597 pgoff_t offset, unsigned long nr_to_read);
1598
1599void page_cache_sync_readahead(struct address_space *mapping,
1600 struct file_ra_state *ra,
1601 struct file *filp,
1602 pgoff_t offset,
1603 unsigned long size);
1604
1605void page_cache_async_readahead(struct address_space *mapping,
1606 struct file_ra_state *ra,
1607 struct file *filp,
1608 struct page *pg,
1609 pgoff_t offset,
1610 unsigned long size);
1611
1612unsigned long max_sane_readahead(unsigned long nr);
1613unsigned long ra_submit(struct file_ra_state *ra,
1614 struct address_space *mapping,
1615 struct file *filp);
1616
1617
1618extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
1619
1620
1621extern int expand_downwards(struct vm_area_struct *vma,
1622 unsigned long address);
1623#if VM_GROWSUP
1624extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
1625#else
1626 #define expand_upwards(vma, address) do { } while (0)
1627#endif
1628
1629
1630extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
1631extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
1632 struct vm_area_struct **pprev);
1633
1634
1635
1636static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
1637{
1638 struct vm_area_struct * vma = find_vma(mm,start_addr);
1639
1640 if (vma && end_addr <= vma->vm_start)
1641 vma = NULL;
1642 return vma;
1643}
1644
1645static inline unsigned long vma_pages(struct vm_area_struct *vma)
1646{
1647 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
1648}
1649
1650
1651static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
1652 unsigned long vm_start, unsigned long vm_end)
1653{
1654 struct vm_area_struct *vma = find_vma(mm, vm_start);
1655
1656 if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
1657 vma = NULL;
1658
1659 return vma;
1660}
1661
1662#ifdef CONFIG_MMU
1663pgprot_t vm_get_page_prot(unsigned long vm_flags);
1664#else
1665static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
1666{
1667 return __pgprot(0);
1668}
1669#endif
1670
1671#ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
1672unsigned long change_prot_numa(struct vm_area_struct *vma,
1673 unsigned long start, unsigned long end);
1674#endif
1675
1676struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
1677int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
1678 unsigned long pfn, unsigned long size, pgprot_t);
1679int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
1680int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1681 unsigned long pfn);
1682int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
1683 unsigned long pfn);
1684int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
1685
1686
1687struct page *follow_page_mask(struct vm_area_struct *vma,
1688 unsigned long address, unsigned int foll_flags,
1689 unsigned int *page_mask);
1690
1691static inline struct page *follow_page(struct vm_area_struct *vma,
1692 unsigned long address, unsigned int foll_flags)
1693{
1694 unsigned int unused_page_mask;
1695 return follow_page_mask(vma, address, foll_flags, &unused_page_mask);
1696}
1697
1698#define FOLL_WRITE 0x01
1699#define FOLL_TOUCH 0x02
1700#define FOLL_GET 0x04
1701#define FOLL_DUMP 0x08
1702#define FOLL_FORCE 0x10
1703#define FOLL_NOWAIT 0x20
1704
1705#define FOLL_MLOCK 0x40
1706#define FOLL_SPLIT 0x80
1707#define FOLL_HWPOISON 0x100
1708#define FOLL_NUMA 0x200
1709#define FOLL_MIGRATION 0x400
1710
1711typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
1712 void *data);
1713extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
1714 unsigned long size, pte_fn_t fn, void *data);
1715
1716#ifdef CONFIG_PROC_FS
1717void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
1718#else
1719static inline void vm_stat_account(struct mm_struct *mm,
1720 unsigned long flags, struct file *file, long pages)
1721{
1722 mm->total_vm += pages;
1723}
1724#endif
1725
1726#ifdef CONFIG_DEBUG_PAGEALLOC
1727extern void kernel_map_pages(struct page *page, int numpages, int enable);
1728#ifdef CONFIG_HIBERNATION
1729extern bool kernel_page_present(struct page *page);
1730#endif
1731#else
1732static inline void
1733kernel_map_pages(struct page *page, int numpages, int enable) {}
1734#ifdef CONFIG_HIBERNATION
1735static inline bool kernel_page_present(struct page *page) { return true; }
1736#endif
1737#endif
1738
1739extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
1740#ifdef __HAVE_ARCH_GATE_AREA
1741int in_gate_area_no_mm(unsigned long addr);
1742int in_gate_area(struct mm_struct *mm, unsigned long addr);
1743#else
1744int in_gate_area_no_mm(unsigned long addr);
1745#define in_gate_area(mm, addr) ({(void)mm; in_gate_area_no_mm(addr);})
1746#endif
1747
1748#ifdef CONFIG_SYSCTL
1749extern int sysctl_drop_caches;
1750int drop_caches_sysctl_handler(struct ctl_table *, int,
1751 void __user *, size_t *, loff_t *);
1752#endif
1753
1754unsigned long shrink_slab(struct shrink_control *shrink,
1755 unsigned long nr_pages_scanned,
1756 unsigned long lru_pages);
1757
1758#ifndef CONFIG_MMU
1759#define randomize_va_space 0
1760#else
1761extern int randomize_va_space;
1762#endif
1763
1764const char * arch_vma_name(struct vm_area_struct *vma);
1765void print_vma_addr(char *prefix, unsigned long rip);
1766
1767void sparse_mem_maps_populate_node(struct page **map_map,
1768 unsigned long pnum_begin,
1769 unsigned long pnum_end,
1770 unsigned long map_count,
1771 int nodeid);
1772
1773struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
1774pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
1775pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
1776pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
1777pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
1778void *vmemmap_alloc_block(unsigned long size, int node);
1779void *vmemmap_alloc_block_buf(unsigned long size, int node);
1780void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
1781int vmemmap_populate_basepages(unsigned long start, unsigned long end,
1782 int node);
1783int vmemmap_populate(unsigned long start, unsigned long end, int node);
1784void vmemmap_populate_print_last(void);
1785#ifdef CONFIG_MEMORY_HOTPLUG
1786void vmemmap_free(unsigned long start, unsigned long end);
1787#endif
1788void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
1789 unsigned long size);
1790
1791enum mf_flags {
1792 MF_COUNT_INCREASED = 1 << 0,
1793 MF_ACTION_REQUIRED = 1 << 1,
1794 MF_MUST_KILL = 1 << 2,
1795 MF_SOFT_OFFLINE = 1 << 3,
1796};
1797extern int memory_failure(unsigned long pfn, int trapno, int flags);
1798extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
1799extern int unpoison_memory(unsigned long pfn);
1800extern int sysctl_memory_failure_early_kill;
1801extern int sysctl_memory_failure_recovery;
1802extern void shake_page(struct page *p, int access);
1803extern atomic_long_t num_poisoned_pages;
1804extern int soft_offline_page(struct page *page, int flags);
1805
1806extern void dump_page(struct page *page);
1807
1808#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
1809extern void clear_huge_page(struct page *page,
1810 unsigned long addr,
1811 unsigned int pages_per_huge_page);
1812extern void copy_user_huge_page(struct page *dst, struct page *src,
1813 unsigned long addr, struct vm_area_struct *vma,
1814 unsigned int pages_per_huge_page);
1815#endif
1816
1817#ifdef CONFIG_DEBUG_PAGEALLOC
1818extern unsigned int _debug_guardpage_minorder;
1819
1820static inline unsigned int debug_guardpage_minorder(void)
1821{
1822 return _debug_guardpage_minorder;
1823}
1824
1825static inline bool page_is_guard(struct page *page)
1826{
1827 return test_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
1828}
1829#else
1830static inline unsigned int debug_guardpage_minorder(void) { return 0; }
1831static inline bool page_is_guard(struct page *page) { return false; }
1832#endif
1833
1834#if MAX_NUMNODES > 1
1835void __init setup_nr_node_ids(void);
1836#else
1837static inline void setup_nr_node_ids(void) {}
1838#endif
1839
1840#endif
1841#endif
1842