1#ifndef _LINUX_MM_H
2#define _LINUX_MM_H
3
4#include <linux/errno.h>
5
6#ifdef __KERNEL__
7
8#include <linux/gfp.h>
9#include <linux/bug.h>
10#include <linux/list.h>
11#include <linux/mmzone.h>
12#include <linux/rbtree.h>
13#include <linux/atomic.h>
14#include <linux/debug_locks.h>
15#include <linux/mm_types.h>
16#include <linux/range.h>
17#include <linux/pfn.h>
18#include <linux/bit_spinlock.h>
19#include <linux/shrinker.h>
20
21struct mempolicy;
22struct anon_vma;
23struct anon_vma_chain;
24struct file_ra_state;
25struct user_struct;
26struct writeback_control;
27
28#ifndef CONFIG_DISCONTIGMEM
29extern unsigned long max_mapnr;
30#endif
31
32extern unsigned long num_physpages;
33extern unsigned long totalram_pages;
34extern void * high_memory;
35extern int page_cluster;
36
37#ifdef CONFIG_SYSCTL
38extern int sysctl_legacy_va_layout;
39#else
40#define sysctl_legacy_va_layout 0
41#endif
42
43#include <asm/page.h>
44#include <asm/pgtable.h>
45#include <asm/processor.h>
46
47extern unsigned long sysctl_user_reserve_kbytes;
48extern unsigned long sysctl_admin_reserve_kbytes;
49
50#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
51
52
53#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
54
55
56
57
58
59
60
61
62
63
64extern struct kmem_cache *vm_area_cachep;
65
66#ifndef CONFIG_MMU
67extern struct rb_root nommu_region_tree;
68extern struct rw_semaphore nommu_region_sem;
69
70extern unsigned int kobjsize(const void *objp);
71#endif
72
73
74
75
76#define VM_NONE 0x00000000
77
78#define VM_READ 0x00000001
79#define VM_WRITE 0x00000002
80#define VM_EXEC 0x00000004
81#define VM_SHARED 0x00000008
82
83
84#define VM_MAYREAD 0x00000010
85#define VM_MAYWRITE 0x00000020
86#define VM_MAYEXEC 0x00000040
87#define VM_MAYSHARE 0x00000080
88
89#define VM_GROWSDOWN 0x00000100
90#define VM_PFNMAP 0x00000400
91#define VM_DENYWRITE 0x00000800
92
93#define VM_LOCKED 0x00002000
94#define VM_IO 0x00004000
95
96
97#define VM_SEQ_READ 0x00008000
98#define VM_RAND_READ 0x00010000
99
100#define VM_DONTCOPY 0x00020000
101#define VM_DONTEXPAND 0x00040000
102#define VM_ACCOUNT 0x00100000
103#define VM_NORESERVE 0x00200000
104#define VM_HUGETLB 0x00400000
105#define VM_NONLINEAR 0x00800000
106#define VM_ARCH_1 0x01000000
107#define VM_DONTDUMP 0x04000000
108
109#define VM_MIXEDMAP 0x10000000
110#define VM_HUGEPAGE 0x20000000
111#define VM_NOHUGEPAGE 0x40000000
112#define VM_MERGEABLE 0x80000000
113
114#if defined(CONFIG_X86)
115# define VM_PAT VM_ARCH_1
116#elif defined(CONFIG_PPC)
117# define VM_SAO VM_ARCH_1
118#elif defined(CONFIG_PARISC)
119# define VM_GROWSUP VM_ARCH_1
120#elif defined(CONFIG_METAG)
121# define VM_GROWSUP VM_ARCH_1
122#elif defined(CONFIG_IA64)
123# define VM_GROWSUP VM_ARCH_1
124#elif !defined(CONFIG_MMU)
125# define VM_MAPPED_COPY VM_ARCH_1
126#endif
127
128#ifndef VM_GROWSUP
129# define VM_GROWSUP VM_NONE
130#endif
131
132
133#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ)
134
135#ifndef VM_STACK_DEFAULT_FLAGS
136#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
137#endif
138
139#ifdef CONFIG_STACK_GROWSUP
140#define VM_STACK_FLAGS (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
141#else
142#define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
143#endif
144
145#define VM_READHINTMASK (VM_SEQ_READ | VM_RAND_READ)
146#define VM_ClearReadHint(v) (v)->vm_flags &= ~VM_READHINTMASK
147#define VM_NormalReadHint(v) (!((v)->vm_flags & VM_READHINTMASK))
148#define VM_SequentialReadHint(v) ((v)->vm_flags & VM_SEQ_READ)
149#define VM_RandomReadHint(v) ((v)->vm_flags & VM_RAND_READ)
150
151
152
153
154
155#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP)
156
157
158
159
160
161extern pgprot_t protection_map[16];
162
163#define FAULT_FLAG_WRITE 0x01
164#define FAULT_FLAG_NONLINEAR 0x02
165#define FAULT_FLAG_MKWRITE 0x04
166#define FAULT_FLAG_ALLOW_RETRY 0x08
167#define FAULT_FLAG_RETRY_NOWAIT 0x10
168#define FAULT_FLAG_KILLABLE 0x20
169#define FAULT_FLAG_TRIED 0x40
170
171
172
173
174
175
176
177
178
179struct vm_fault {
180 unsigned int flags;
181 pgoff_t pgoff;
182 void __user *virtual_address;
183
184 struct page *page;
185
186
187
188
189};
190
191
192
193
194
195
196struct vm_operations_struct {
197 void (*open)(struct vm_area_struct * area);
198 void (*close)(struct vm_area_struct * area);
199 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
200
201
202
203 int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
204
205
206
207
208 int (*access)(struct vm_area_struct *vma, unsigned long addr,
209 void *buf, int len, int write);
210#ifdef CONFIG_NUMA
211
212
213
214
215
216
217
218 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
219
220
221
222
223
224
225
226
227
228
229
230 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
231 unsigned long addr);
232 int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
233 const nodemask_t *to, unsigned long flags);
234#endif
235
236 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
237 unsigned long size, pgoff_t pgoff);
238};
239
240struct mmu_gather;
241struct inode;
242
243#define page_private(page) ((page)->private)
244#define set_page_private(page, v) ((page)->private = (v))
245
246
247static inline void set_freepage_migratetype(struct page *page, int migratetype)
248{
249 page->index = migratetype;
250}
251
252
253static inline int get_freepage_migratetype(struct page *page)
254{
255 return page->index;
256}
257
258
259
260
261
262#include <linux/page-flags.h>
263#include <linux/huge_mm.h>
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281static inline int put_page_testzero(struct page *page)
282{
283 VM_BUG_ON(atomic_read(&page->_count) == 0);
284 return atomic_dec_and_test(&page->_count);
285}
286
287
288
289
290
291static inline int get_page_unless_zero(struct page *page)
292{
293 return atomic_inc_not_zero(&page->_count);
294}
295
296extern int page_is_ram(unsigned long pfn);
297
298
299struct page *vmalloc_to_page(const void *addr);
300unsigned long vmalloc_to_pfn(const void *addr);
301
302
303
304
305
306
307
308static inline int is_vmalloc_addr(const void *x)
309{
310#ifdef CONFIG_MMU
311 unsigned long addr = (unsigned long)x;
312
313 return addr >= VMALLOC_START && addr < VMALLOC_END;
314#else
315 return 0;
316#endif
317}
318#ifdef CONFIG_MMU
319extern int is_vmalloc_or_module_addr(const void *x);
320#else
321static inline int is_vmalloc_or_module_addr(const void *x)
322{
323 return 0;
324}
325#endif
326
327static inline void compound_lock(struct page *page)
328{
329#ifdef CONFIG_TRANSPARENT_HUGEPAGE
330 VM_BUG_ON(PageSlab(page));
331 bit_spin_lock(PG_compound_lock, &page->flags);
332#endif
333}
334
335static inline void compound_unlock(struct page *page)
336{
337#ifdef CONFIG_TRANSPARENT_HUGEPAGE
338 VM_BUG_ON(PageSlab(page));
339 bit_spin_unlock(PG_compound_lock, &page->flags);
340#endif
341}
342
343static inline unsigned long compound_lock_irqsave(struct page *page)
344{
345 unsigned long uninitialized_var(flags);
346#ifdef CONFIG_TRANSPARENT_HUGEPAGE
347 local_irq_save(flags);
348 compound_lock(page);
349#endif
350 return flags;
351}
352
353static inline void compound_unlock_irqrestore(struct page *page,
354 unsigned long flags)
355{
356#ifdef CONFIG_TRANSPARENT_HUGEPAGE
357 compound_unlock(page);
358 local_irq_restore(flags);
359#endif
360}
361
362static inline struct page *compound_head(struct page *page)
363{
364 if (unlikely(PageTail(page)))
365 return page->first_page;
366 return page;
367}
368
369
370
371
372
373
374static inline void page_mapcount_reset(struct page *page)
375{
376 atomic_set(&(page)->_mapcount, -1);
377}
378
379static inline int page_mapcount(struct page *page)
380{
381 return atomic_read(&(page)->_mapcount) + 1;
382}
383
384static inline int page_count(struct page *page)
385{
386 return atomic_read(&compound_head(page)->_count);
387}
388
389static inline void get_huge_page_tail(struct page *page)
390{
391
392
393
394
395 VM_BUG_ON(page_mapcount(page) < 0);
396 VM_BUG_ON(atomic_read(&page->_count) != 0);
397 atomic_inc(&page->_mapcount);
398}
399
400extern bool __get_page_tail(struct page *page);
401
402static inline void get_page(struct page *page)
403{
404 if (unlikely(PageTail(page)))
405 if (likely(__get_page_tail(page)))
406 return;
407
408
409
410
411 VM_BUG_ON(atomic_read(&page->_count) <= 0);
412 atomic_inc(&page->_count);
413}
414
415static inline struct page *virt_to_head_page(const void *x)
416{
417 struct page *page = virt_to_page(x);
418 return compound_head(page);
419}
420
421
422
423
424
425static inline void init_page_count(struct page *page)
426{
427 atomic_set(&page->_count, 1);
428}
429
430
431
432
433
434
435
436
437
438
439#define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
440
441static inline int PageBuddy(struct page *page)
442{
443 return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE;
444}
445
446static inline void __SetPageBuddy(struct page *page)
447{
448 VM_BUG_ON(atomic_read(&page->_mapcount) != -1);
449 atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
450}
451
452static inline void __ClearPageBuddy(struct page *page)
453{
454 VM_BUG_ON(!PageBuddy(page));
455 atomic_set(&page->_mapcount, -1);
456}
457
458void put_page(struct page *page);
459void put_pages_list(struct list_head *pages);
460
461void split_page(struct page *page, unsigned int order);
462int split_free_page(struct page *page);
463
464
465
466
467
468
469typedef void compound_page_dtor(struct page *);
470
471static inline void set_compound_page_dtor(struct page *page,
472 compound_page_dtor *dtor)
473{
474 page[1].lru.next = (void *)dtor;
475}
476
477static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
478{
479 return (compound_page_dtor *)page[1].lru.next;
480}
481
482static inline int compound_order(struct page *page)
483{
484 if (!PageHead(page))
485 return 0;
486 return (unsigned long)page[1].lru.prev;
487}
488
489static inline int compound_trans_order(struct page *page)
490{
491 int order;
492 unsigned long flags;
493
494 if (!PageHead(page))
495 return 0;
496
497 flags = compound_lock_irqsave(page);
498 order = compound_order(page);
499 compound_unlock_irqrestore(page, flags);
500 return order;
501}
502
503static inline void set_compound_order(struct page *page, unsigned long order)
504{
505 page[1].lru.prev = (void *)order;
506}
507
508#ifdef CONFIG_MMU
509
510
511
512
513
514
515static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
516{
517 if (likely(vma->vm_flags & VM_WRITE))
518 pte = pte_mkwrite(pte);
519 return pte;
520}
521#endif
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
590#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
591#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
592#define LAST_NID_PGOFF (ZONES_PGOFF - LAST_NID_WIDTH)
593
594
595
596
597
598
599#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
600#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
601#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
602#define LAST_NID_PGSHIFT (LAST_NID_PGOFF * (LAST_NID_WIDTH != 0))
603
604
605#ifdef NODE_NOT_IN_PAGE_FLAGS
606#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
607#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \
608 SECTIONS_PGOFF : ZONES_PGOFF)
609#else
610#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
611#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \
612 NODES_PGOFF : ZONES_PGOFF)
613#endif
614
615#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
616
617#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
618#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
619#endif
620
621#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
622#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
623#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
624#define LAST_NID_MASK ((1UL << LAST_NID_WIDTH) - 1)
625#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
626
627static inline enum zone_type page_zonenum(const struct page *page)
628{
629 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
630}
631
632#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
633#define SECTION_IN_PAGE_FLAGS
634#endif
635
636
637
638
639
640
641
642
643
644static inline int page_zone_id(struct page *page)
645{
646 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
647}
648
649static inline int zone_to_nid(struct zone *zone)
650{
651#ifdef CONFIG_NUMA
652 return zone->node;
653#else
654 return 0;
655#endif
656}
657
658#ifdef NODE_NOT_IN_PAGE_FLAGS
659extern int page_to_nid(const struct page *page);
660#else
661static inline int page_to_nid(const struct page *page)
662{
663 return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
664}
665#endif
666
667#ifdef CONFIG_NUMA_BALANCING
668#ifdef LAST_NID_NOT_IN_PAGE_FLAGS
669static inline int page_nid_xchg_last(struct page *page, int nid)
670{
671 return xchg(&page->_last_nid, nid);
672}
673
674static inline int page_nid_last(struct page *page)
675{
676 return page->_last_nid;
677}
678static inline void page_nid_reset_last(struct page *page)
679{
680 page->_last_nid = -1;
681}
682#else
683static inline int page_nid_last(struct page *page)
684{
685 return (page->flags >> LAST_NID_PGSHIFT) & LAST_NID_MASK;
686}
687
688extern int page_nid_xchg_last(struct page *page, int nid);
689
690static inline void page_nid_reset_last(struct page *page)
691{
692 int nid = (1 << LAST_NID_SHIFT) - 1;
693
694 page->flags &= ~(LAST_NID_MASK << LAST_NID_PGSHIFT);
695 page->flags |= (nid & LAST_NID_MASK) << LAST_NID_PGSHIFT;
696}
697#endif
698#else
699static inline int page_nid_xchg_last(struct page *page, int nid)
700{
701 return page_to_nid(page);
702}
703
704static inline int page_nid_last(struct page *page)
705{
706 return page_to_nid(page);
707}
708
709static inline void page_nid_reset_last(struct page *page)
710{
711}
712#endif
713
714static inline struct zone *page_zone(const struct page *page)
715{
716 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
717}
718
719#ifdef SECTION_IN_PAGE_FLAGS
720static inline void set_page_section(struct page *page, unsigned long section)
721{
722 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
723 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
724}
725
726static inline unsigned long page_to_section(const struct page *page)
727{
728 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
729}
730#endif
731
732static inline void set_page_zone(struct page *page, enum zone_type zone)
733{
734 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
735 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
736}
737
738static inline void set_page_node(struct page *page, unsigned long node)
739{
740 page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
741 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
742}
743
744static inline void set_page_links(struct page *page, enum zone_type zone,
745 unsigned long node, unsigned long pfn)
746{
747 set_page_zone(page, zone);
748 set_page_node(page, node);
749#ifdef SECTION_IN_PAGE_FLAGS
750 set_page_section(page, pfn_to_section_nr(pfn));
751#endif
752}
753
754
755
756
757#include <linux/vmstat.h>
758
759static __always_inline void *lowmem_page_address(const struct page *page)
760{
761 return __va(PFN_PHYS(page_to_pfn(page)));
762}
763
764#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
765#define HASHED_PAGE_VIRTUAL
766#endif
767
768#if defined(WANT_PAGE_VIRTUAL)
769#define page_address(page) ((page)->virtual)
770#define set_page_address(page, address) \
771 do { \
772 (page)->virtual = (address); \
773 } while(0)
774#define page_address_init() do { } while(0)
775#endif
776
777#if defined(HASHED_PAGE_VIRTUAL)
778void *page_address(const struct page *page);
779void set_page_address(struct page *page, void *virtual);
780void page_address_init(void);
781#endif
782
783#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
784#define page_address(page) lowmem_page_address(page)
785#define set_page_address(page, address) do { } while(0)
786#define page_address_init() do { } while(0)
787#endif
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805#define PAGE_MAPPING_ANON 1
806#define PAGE_MAPPING_KSM 2
807#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
808
809extern struct address_space *page_mapping(struct page *page);
810
811
812static inline void *page_rmapping(struct page *page)
813{
814 return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
815}
816
817extern struct address_space *__page_file_mapping(struct page *);
818
819static inline
820struct address_space *page_file_mapping(struct page *page)
821{
822 if (unlikely(PageSwapCache(page)))
823 return __page_file_mapping(page);
824
825 return page->mapping;
826}
827
828static inline int PageAnon(struct page *page)
829{
830 return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
831}
832
833
834
835
836
837static inline pgoff_t page_index(struct page *page)
838{
839 if (unlikely(PageSwapCache(page)))
840 return page_private(page);
841 return page->index;
842}
843
844extern pgoff_t __page_file_index(struct page *page);
845
846
847
848
849
850static inline pgoff_t page_file_index(struct page *page)
851{
852 if (unlikely(PageSwapCache(page)))
853 return __page_file_index(page);
854
855 return page->index;
856}
857
858
859
860
861static inline int page_mapped(struct page *page)
862{
863 return atomic_read(&(page)->_mapcount) >= 0;
864}
865
866
867
868
869
870
871
872#define VM_FAULT_MINOR 0
873
874#define VM_FAULT_OOM 0x0001
875#define VM_FAULT_SIGBUS 0x0002
876#define VM_FAULT_MAJOR 0x0004
877#define VM_FAULT_WRITE 0x0008
878#define VM_FAULT_HWPOISON 0x0010
879#define VM_FAULT_HWPOISON_LARGE 0x0020
880
881#define VM_FAULT_NOPAGE 0x0100
882#define VM_FAULT_LOCKED 0x0200
883#define VM_FAULT_RETRY 0x0400
884
885#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000
886
887#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \
888 VM_FAULT_HWPOISON_LARGE)
889
890
891#define VM_FAULT_SET_HINDEX(x) ((x) << 12)
892#define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf)
893
894
895
896
897extern void pagefault_out_of_memory(void);
898
899#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
900
901
902
903
904
905#define SHOW_MEM_FILTER_NODES (0x0001u)
906#define SHOW_MEM_FILTER_PAGE_COUNT (0x0002u)
907
908extern void show_free_areas(unsigned int flags);
909extern bool skip_free_areas_node(unsigned int flags, int nid);
910
911int shmem_zero_setup(struct vm_area_struct *);
912
913extern int can_do_mlock(void);
914extern int user_shm_lock(size_t, struct user_struct *);
915extern void user_shm_unlock(size_t, struct user_struct *);
916
917
918
919
920struct zap_details {
921 struct vm_area_struct *nonlinear_vma;
922 struct address_space *check_mapping;
923 pgoff_t first_index;
924 pgoff_t last_index;
925};
926
927struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
928 pte_t pte);
929
930int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
931 unsigned long size);
932void zap_page_range(struct vm_area_struct *vma, unsigned long address,
933 unsigned long size, struct zap_details *);
934void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
935 unsigned long start, unsigned long end);
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953struct mm_walk {
954 int (*pgd_entry)(pgd_t *pgd, unsigned long addr,
955 unsigned long next, struct mm_walk *walk);
956 int (*pud_entry)(pud_t *pud, unsigned long addr,
957 unsigned long next, struct mm_walk *walk);
958 int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
959 unsigned long next, struct mm_walk *walk);
960 int (*pte_entry)(pte_t *pte, unsigned long addr,
961 unsigned long next, struct mm_walk *walk);
962 int (*pte_hole)(unsigned long addr, unsigned long next,
963 struct mm_walk *walk);
964 int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
965 unsigned long addr, unsigned long next,
966 struct mm_walk *walk);
967 struct mm_struct *mm;
968 void *private;
969};
970
971int walk_page_range(unsigned long addr, unsigned long end,
972 struct mm_walk *walk);
973void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
974 unsigned long end, unsigned long floor, unsigned long ceiling);
975int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
976 struct vm_area_struct *vma);
977void unmap_mapping_range(struct address_space *mapping,
978 loff_t const holebegin, loff_t const holelen, int even_cows);
979int follow_pfn(struct vm_area_struct *vma, unsigned long address,
980 unsigned long *pfn);
981int follow_phys(struct vm_area_struct *vma, unsigned long address,
982 unsigned int flags, unsigned long *prot, resource_size_t *phys);
983int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
984 void *buf, int len, int write);
985
986static inline void unmap_shared_mapping_range(struct address_space *mapping,
987 loff_t const holebegin, loff_t const holelen)
988{
989 unmap_mapping_range(mapping, holebegin, holelen, 0);
990}
991
992extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new);
993extern void truncate_setsize(struct inode *inode, loff_t newsize);
994void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
995int truncate_inode_page(struct address_space *mapping, struct page *page);
996int generic_error_remove_page(struct address_space *mapping, struct page *page);
997int invalidate_inode_page(struct page *page);
998
999#ifdef CONFIG_MMU
1000extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
1001 unsigned long address, unsigned int flags);
1002extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
1003 unsigned long address, unsigned int fault_flags);
1004#else
1005static inline int handle_mm_fault(struct mm_struct *mm,
1006 struct vm_area_struct *vma, unsigned long address,
1007 unsigned int flags)
1008{
1009
1010 BUG();
1011 return VM_FAULT_SIGBUS;
1012}
1013static inline int fixup_user_fault(struct task_struct *tsk,
1014 struct mm_struct *mm, unsigned long address,
1015 unsigned int fault_flags)
1016{
1017
1018 BUG();
1019 return -EFAULT;
1020}
1021#endif
1022
1023extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
1024extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1025 void *buf, int len, int write);
1026
1027long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1028 unsigned long start, unsigned long nr_pages,
1029 unsigned int foll_flags, struct page **pages,
1030 struct vm_area_struct **vmas, int *nonblocking);
1031long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1032 unsigned long start, unsigned long nr_pages,
1033 int write, int force, struct page **pages,
1034 struct vm_area_struct **vmas);
1035int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1036 struct page **pages);
1037struct kvec;
1038int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
1039 struct page **pages);
1040int get_kernel_page(unsigned long start, int write, struct page **pages);
1041struct page *get_dump_page(unsigned long addr);
1042
1043extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
1044extern void do_invalidatepage(struct page *page, unsigned long offset);
1045
1046int __set_page_dirty_nobuffers(struct page *page);
1047int __set_page_dirty_no_writeback(struct page *page);
1048int redirty_page_for_writepage(struct writeback_control *wbc,
1049 struct page *page);
1050void account_page_dirtied(struct page *page, struct address_space *mapping);
1051void account_page_writeback(struct page *page);
1052int set_page_dirty(struct page *page);
1053int set_page_dirty_lock(struct page *page);
1054int clear_page_dirty_for_io(struct page *page);
1055
1056
1057static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
1058{
1059 return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
1060}
1061
1062static inline int stack_guard_page_start(struct vm_area_struct *vma,
1063 unsigned long addr)
1064{
1065 return (vma->vm_flags & VM_GROWSDOWN) &&
1066 (vma->vm_start == addr) &&
1067 !vma_growsdown(vma->vm_prev, addr);
1068}
1069
1070
1071static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
1072{
1073 return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
1074}
1075
1076static inline int stack_guard_page_end(struct vm_area_struct *vma,
1077 unsigned long addr)
1078{
1079 return (vma->vm_flags & VM_GROWSUP) &&
1080 (vma->vm_end == addr) &&
1081 !vma_growsup(vma->vm_next, addr);
1082}
1083
1084extern pid_t
1085vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
1086
1087extern unsigned long move_page_tables(struct vm_area_struct *vma,
1088 unsigned long old_addr, struct vm_area_struct *new_vma,
1089 unsigned long new_addr, unsigned long len,
1090 bool need_rmap_locks);
1091extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
1092 unsigned long end, pgprot_t newprot,
1093 int dirty_accountable, int prot_numa);
1094extern int mprotect_fixup(struct vm_area_struct *vma,
1095 struct vm_area_struct **pprev, unsigned long start,
1096 unsigned long end, unsigned long newflags);
1097
1098
1099
1100
1101int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1102 struct page **pages);
1103
1104
1105
1106static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
1107{
1108 long val = atomic_long_read(&mm->rss_stat.count[member]);
1109
1110#ifdef SPLIT_RSS_COUNTING
1111
1112
1113
1114
1115 if (val < 0)
1116 val = 0;
1117#endif
1118 return (unsigned long)val;
1119}
1120
1121static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1122{
1123 atomic_long_add(value, &mm->rss_stat.count[member]);
1124}
1125
1126static inline void inc_mm_counter(struct mm_struct *mm, int member)
1127{
1128 atomic_long_inc(&mm->rss_stat.count[member]);
1129}
1130
1131static inline void dec_mm_counter(struct mm_struct *mm, int member)
1132{
1133 atomic_long_dec(&mm->rss_stat.count[member]);
1134}
1135
1136static inline unsigned long get_mm_rss(struct mm_struct *mm)
1137{
1138 return get_mm_counter(mm, MM_FILEPAGES) +
1139 get_mm_counter(mm, MM_ANONPAGES);
1140}
1141
1142static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
1143{
1144 return max(mm->hiwater_rss, get_mm_rss(mm));
1145}
1146
1147static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
1148{
1149 return max(mm->hiwater_vm, mm->total_vm);
1150}
1151
1152static inline void update_hiwater_rss(struct mm_struct *mm)
1153{
1154 unsigned long _rss = get_mm_rss(mm);
1155
1156 if ((mm)->hiwater_rss < _rss)
1157 (mm)->hiwater_rss = _rss;
1158}
1159
1160static inline void update_hiwater_vm(struct mm_struct *mm)
1161{
1162 if (mm->hiwater_vm < mm->total_vm)
1163 mm->hiwater_vm = mm->total_vm;
1164}
1165
1166static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
1167 struct mm_struct *mm)
1168{
1169 unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
1170
1171 if (*maxrss < hiwater_rss)
1172 *maxrss = hiwater_rss;
1173}
1174
1175#if defined(SPLIT_RSS_COUNTING)
1176void sync_mm_rss(struct mm_struct *mm);
1177#else
1178static inline void sync_mm_rss(struct mm_struct *mm)
1179{
1180}
1181#endif
1182
1183int vma_wants_writenotify(struct vm_area_struct *vma);
1184
1185extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1186 spinlock_t **ptl);
1187static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
1188 spinlock_t **ptl)
1189{
1190 pte_t *ptep;
1191 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
1192 return ptep;
1193}
1194
1195#ifdef __PAGETABLE_PUD_FOLDED
1196static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
1197 unsigned long address)
1198{
1199 return 0;
1200}
1201#else
1202int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
1203#endif
1204
1205#ifdef __PAGETABLE_PMD_FOLDED
1206static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
1207 unsigned long address)
1208{
1209 return 0;
1210}
1211#else
1212int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
1213#endif
1214
1215int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
1216 pmd_t *pmd, unsigned long address);
1217int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
1218
1219
1220
1221
1222
1223#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
1224static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
1225{
1226 return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))?
1227 NULL: pud_offset(pgd, address);
1228}
1229
1230static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
1231{
1232 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
1233 NULL: pmd_offset(pud, address);
1234}
1235#endif
1236
1237#if USE_SPLIT_PTLOCKS
1238
1239
1240
1241
1242
1243
1244#define __pte_lockptr(page) &((page)->ptl)
1245#define pte_lock_init(_page) do { \
1246 spin_lock_init(__pte_lockptr(_page)); \
1247} while (0)
1248#define pte_lock_deinit(page) ((page)->mapping = NULL)
1249#define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));})
1250#else
1251
1252
1253
1254#define pte_lock_init(page) do {} while (0)
1255#define pte_lock_deinit(page) do {} while (0)
1256#define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;})
1257#endif
1258
1259static inline void pgtable_page_ctor(struct page *page)
1260{
1261 pte_lock_init(page);
1262 inc_zone_page_state(page, NR_PAGETABLE);
1263}
1264
1265static inline void pgtable_page_dtor(struct page *page)
1266{
1267 pte_lock_deinit(page);
1268 dec_zone_page_state(page, NR_PAGETABLE);
1269}
1270
1271#define pte_offset_map_lock(mm, pmd, address, ptlp) \
1272({ \
1273 spinlock_t *__ptl = pte_lockptr(mm, pmd); \
1274 pte_t *__pte = pte_offset_map(pmd, address); \
1275 *(ptlp) = __ptl; \
1276 spin_lock(__ptl); \
1277 __pte; \
1278})
1279
1280#define pte_unmap_unlock(pte, ptl) do { \
1281 spin_unlock(ptl); \
1282 pte_unmap(pte); \
1283} while (0)
1284
1285#define pte_alloc_map(mm, vma, pmd, address) \
1286 ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, vma, \
1287 pmd, address))? \
1288 NULL: pte_offset_map(pmd, address))
1289
1290#define pte_alloc_map_lock(mm, pmd, address, ptlp) \
1291 ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, NULL, \
1292 pmd, address))? \
1293 NULL: pte_offset_map_lock(mm, pmd, address, ptlp))
1294
1295#define pte_alloc_kernel(pmd, address) \
1296 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
1297 NULL: pte_offset_kernel(pmd, address))
1298
1299extern void free_area_init(unsigned long * zones_size);
1300extern void free_area_init_node(int nid, unsigned long * zones_size,
1301 unsigned long zone_start_pfn, unsigned long *zholes_size);
1302extern void free_initmem(void);
1303
1304
1305
1306
1307
1308
1309
1310extern unsigned long free_reserved_area(unsigned long start, unsigned long end,
1311 int poison, char *s);
1312#ifdef CONFIG_HIGHMEM
1313
1314
1315
1316
1317extern void free_highmem_page(struct page *page);
1318#endif
1319
1320static inline void adjust_managed_page_count(struct page *page, long count)
1321{
1322 totalram_pages += count;
1323}
1324
1325
1326static inline void __free_reserved_page(struct page *page)
1327{
1328 ClearPageReserved(page);
1329 init_page_count(page);
1330 __free_page(page);
1331}
1332
1333static inline void free_reserved_page(struct page *page)
1334{
1335 __free_reserved_page(page);
1336 adjust_managed_page_count(page, 1);
1337}
1338
1339static inline void mark_page_reserved(struct page *page)
1340{
1341 SetPageReserved(page);
1342 adjust_managed_page_count(page, -1);
1343}
1344
1345
1346
1347
1348
1349
1350static inline unsigned long free_initmem_default(int poison)
1351{
1352 extern char __init_begin[], __init_end[];
1353
1354 return free_reserved_area(PAGE_ALIGN((unsigned long)&__init_begin) ,
1355 ((unsigned long)&__init_end) & PAGE_MASK,
1356 poison, "unused kernel");
1357}
1358
1359#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386extern void free_area_init_nodes(unsigned long *max_zone_pfn);
1387unsigned long node_map_pfn_alignment(void);
1388unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
1389 unsigned long end_pfn);
1390extern unsigned long absent_pages_in_range(unsigned long start_pfn,
1391 unsigned long end_pfn);
1392extern void get_pfn_range_for_nid(unsigned int nid,
1393 unsigned long *start_pfn, unsigned long *end_pfn);
1394extern unsigned long find_min_pfn_with_active_regions(void);
1395extern void free_bootmem_with_active_regions(int nid,
1396 unsigned long max_low_pfn);
1397extern void sparse_memory_present_with_active_regions(int nid);
1398
1399#endif
1400
1401#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
1402 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
1403static inline int __early_pfn_to_nid(unsigned long pfn)
1404{
1405 return 0;
1406}
1407#else
1408
1409extern int __meminit early_pfn_to_nid(unsigned long pfn);
1410#ifdef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
1411
1412extern int __meminit __early_pfn_to_nid(unsigned long pfn);
1413#endif
1414#endif
1415
1416extern void set_dma_reserve(unsigned long new_dma_reserve);
1417extern void memmap_init_zone(unsigned long, int, unsigned long,
1418 unsigned long, enum memmap_context);
1419extern void setup_per_zone_wmarks(void);
1420extern int __meminit init_per_zone_wmark_min(void);
1421extern void mem_init(void);
1422extern void __init mmap_init(void);
1423extern void show_mem(unsigned int flags);
1424extern void si_meminfo(struct sysinfo * val);
1425extern void si_meminfo_node(struct sysinfo *val, int nid);
1426
1427extern __printf(3, 4)
1428void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...);
1429
1430extern void setup_per_cpu_pageset(void);
1431
1432extern void zone_pcp_update(struct zone *zone);
1433extern void zone_pcp_reset(struct zone *zone);
1434
1435
1436extern int min_free_kbytes;
1437
1438
1439extern atomic_long_t mmap_pages_allocated;
1440extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
1441
1442
1443void vma_interval_tree_insert(struct vm_area_struct *node,
1444 struct rb_root *root);
1445void vma_interval_tree_insert_after(struct vm_area_struct *node,
1446 struct vm_area_struct *prev,
1447 struct rb_root *root);
1448void vma_interval_tree_remove(struct vm_area_struct *node,
1449 struct rb_root *root);
1450struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root *root,
1451 unsigned long start, unsigned long last);
1452struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
1453 unsigned long start, unsigned long last);
1454
1455#define vma_interval_tree_foreach(vma, root, start, last) \
1456 for (vma = vma_interval_tree_iter_first(root, start, last); \
1457 vma; vma = vma_interval_tree_iter_next(vma, start, last))
1458
1459static inline void vma_nonlinear_insert(struct vm_area_struct *vma,
1460 struct list_head *list)
1461{
1462 list_add_tail(&vma->shared.nonlinear, list);
1463}
1464
1465void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
1466 struct rb_root *root);
1467void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
1468 struct rb_root *root);
1469struct anon_vma_chain *anon_vma_interval_tree_iter_first(
1470 struct rb_root *root, unsigned long start, unsigned long last);
1471struct anon_vma_chain *anon_vma_interval_tree_iter_next(
1472 struct anon_vma_chain *node, unsigned long start, unsigned long last);
1473#ifdef CONFIG_DEBUG_VM_RB
1474void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
1475#endif
1476
1477#define anon_vma_interval_tree_foreach(avc, root, start, last) \
1478 for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
1479 avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
1480
1481
1482extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
1483extern int vma_adjust(struct vm_area_struct *vma, unsigned long start,
1484 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert);
1485extern struct vm_area_struct *vma_merge(struct mm_struct *,
1486 struct vm_area_struct *prev, unsigned long addr, unsigned long end,
1487 unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
1488 struct mempolicy *);
1489extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
1490extern int split_vma(struct mm_struct *,
1491 struct vm_area_struct *, unsigned long addr, int new_below);
1492extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
1493extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
1494 struct rb_node **, struct rb_node *);
1495extern void unlink_file_vma(struct vm_area_struct *);
1496extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
1497 unsigned long addr, unsigned long len, pgoff_t pgoff,
1498 bool *need_rmap_locks);
1499extern void exit_mmap(struct mm_struct *);
1500
1501extern int mm_take_all_locks(struct mm_struct *mm);
1502extern void mm_drop_all_locks(struct mm_struct *mm);
1503
1504extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
1505extern struct file *get_mm_exe_file(struct mm_struct *mm);
1506
1507extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
1508extern int install_special_mapping(struct mm_struct *mm,
1509 unsigned long addr, unsigned long len,
1510 unsigned long flags, struct page **pages);
1511
1512extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
1513
1514extern unsigned long mmap_region(struct file *file, unsigned long addr,
1515 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
1516extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
1517 unsigned long len, unsigned long prot, unsigned long flags,
1518 unsigned long pgoff, unsigned long *populate);
1519extern int do_munmap(struct mm_struct *, unsigned long, size_t);
1520
1521#ifdef CONFIG_MMU
1522extern int __mm_populate(unsigned long addr, unsigned long len,
1523 int ignore_errors);
1524static inline void mm_populate(unsigned long addr, unsigned long len)
1525{
1526
1527 (void) __mm_populate(addr, len, 1);
1528}
1529#else
1530static inline void mm_populate(unsigned long addr, unsigned long len) {}
1531#endif
1532
1533
1534extern unsigned long vm_brk(unsigned long, unsigned long);
1535extern int vm_munmap(unsigned long, size_t);
1536extern unsigned long vm_mmap(struct file *, unsigned long,
1537 unsigned long, unsigned long,
1538 unsigned long, unsigned long);
1539
1540struct vm_unmapped_area_info {
1541#define VM_UNMAPPED_AREA_TOPDOWN 1
1542 unsigned long flags;
1543 unsigned long length;
1544 unsigned long low_limit;
1545 unsigned long high_limit;
1546 unsigned long align_mask;
1547 unsigned long align_offset;
1548};
1549
1550extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
1551extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562static inline unsigned long
1563vm_unmapped_area(struct vm_unmapped_area_info *info)
1564{
1565 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
1566 return unmapped_area(info);
1567 else
1568 return unmapped_area_topdown(info);
1569}
1570
1571
1572extern void truncate_inode_pages(struct address_space *, loff_t);
1573extern void truncate_inode_pages_range(struct address_space *,
1574 loff_t lstart, loff_t lend);
1575
1576
1577extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
1578extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
1579
1580
1581int write_one_page(struct page *page, int wait);
1582void task_dirty_inc(struct task_struct *tsk);
1583
1584
1585#define VM_MAX_READAHEAD 128
1586#define VM_MIN_READAHEAD 16
1587
1588int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
1589 pgoff_t offset, unsigned long nr_to_read);
1590
1591void page_cache_sync_readahead(struct address_space *mapping,
1592 struct file_ra_state *ra,
1593 struct file *filp,
1594 pgoff_t offset,
1595 unsigned long size);
1596
1597void page_cache_async_readahead(struct address_space *mapping,
1598 struct file_ra_state *ra,
1599 struct file *filp,
1600 struct page *pg,
1601 pgoff_t offset,
1602 unsigned long size);
1603
1604unsigned long max_sane_readahead(unsigned long nr);
1605unsigned long ra_submit(struct file_ra_state *ra,
1606 struct address_space *mapping,
1607 struct file *filp);
1608
1609
1610extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
1611
1612
1613extern int expand_downwards(struct vm_area_struct *vma,
1614 unsigned long address);
1615#if VM_GROWSUP
1616extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
1617#else
1618 #define expand_upwards(vma, address) do { } while (0)
1619#endif
1620
1621
1622extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
1623extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
1624 struct vm_area_struct **pprev);
1625
1626
1627
1628static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
1629{
1630 struct vm_area_struct * vma = find_vma(mm,start_addr);
1631
1632 if (vma && end_addr <= vma->vm_start)
1633 vma = NULL;
1634 return vma;
1635}
1636
1637static inline unsigned long vma_pages(struct vm_area_struct *vma)
1638{
1639 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
1640}
1641
1642
1643static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
1644 unsigned long vm_start, unsigned long vm_end)
1645{
1646 struct vm_area_struct *vma = find_vma(mm, vm_start);
1647
1648 if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
1649 vma = NULL;
1650
1651 return vma;
1652}
1653
1654#ifdef CONFIG_MMU
1655pgprot_t vm_get_page_prot(unsigned long vm_flags);
1656#else
1657static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
1658{
1659 return __pgprot(0);
1660}
1661#endif
1662
1663#ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
1664unsigned long change_prot_numa(struct vm_area_struct *vma,
1665 unsigned long start, unsigned long end);
1666#endif
1667
1668struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
1669int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
1670 unsigned long pfn, unsigned long size, pgprot_t);
1671int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
1672int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1673 unsigned long pfn);
1674int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
1675 unsigned long pfn);
1676int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
1677
1678
1679struct page *follow_page_mask(struct vm_area_struct *vma,
1680 unsigned long address, unsigned int foll_flags,
1681 unsigned int *page_mask);
1682
1683static inline struct page *follow_page(struct vm_area_struct *vma,
1684 unsigned long address, unsigned int foll_flags)
1685{
1686 unsigned int unused_page_mask;
1687 return follow_page_mask(vma, address, foll_flags, &unused_page_mask);
1688}
1689
1690#define FOLL_WRITE 0x01
1691#define FOLL_TOUCH 0x02
1692#define FOLL_GET 0x04
1693#define FOLL_DUMP 0x08
1694#define FOLL_FORCE 0x10
1695#define FOLL_NOWAIT 0x20
1696
1697#define FOLL_MLOCK 0x40
1698#define FOLL_SPLIT 0x80
1699#define FOLL_HWPOISON 0x100
1700#define FOLL_NUMA 0x200
1701#define FOLL_MIGRATION 0x400
1702
1703typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
1704 void *data);
1705extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
1706 unsigned long size, pte_fn_t fn, void *data);
1707
1708#ifdef CONFIG_PROC_FS
1709void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
1710#else
1711static inline void vm_stat_account(struct mm_struct *mm,
1712 unsigned long flags, struct file *file, long pages)
1713{
1714 mm->total_vm += pages;
1715}
1716#endif
1717
1718#ifdef CONFIG_DEBUG_PAGEALLOC
1719extern void kernel_map_pages(struct page *page, int numpages, int enable);
1720#ifdef CONFIG_HIBERNATION
1721extern bool kernel_page_present(struct page *page);
1722#endif
1723#else
1724static inline void
1725kernel_map_pages(struct page *page, int numpages, int enable) {}
1726#ifdef CONFIG_HIBERNATION
1727static inline bool kernel_page_present(struct page *page) { return true; }
1728#endif
1729#endif
1730
1731extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
1732#ifdef __HAVE_ARCH_GATE_AREA
1733int in_gate_area_no_mm(unsigned long addr);
1734int in_gate_area(struct mm_struct *mm, unsigned long addr);
1735#else
1736int in_gate_area_no_mm(unsigned long addr);
1737#define in_gate_area(mm, addr) ({(void)mm; in_gate_area_no_mm(addr);})
1738#endif
1739
1740#ifdef CONFIG_SYSCTL
1741extern int sysctl_drop_caches;
1742int drop_caches_sysctl_handler(struct ctl_table *, int,
1743 void __user *, size_t *, loff_t *);
1744#endif
1745
1746unsigned long shrink_slab(struct shrink_control *shrink,
1747 unsigned long nr_pages_scanned,
1748 unsigned long lru_pages);
1749
1750#ifndef CONFIG_MMU
1751#define randomize_va_space 0
1752#else
1753extern int randomize_va_space;
1754#endif
1755
1756const char * arch_vma_name(struct vm_area_struct *vma);
1757void print_vma_addr(char *prefix, unsigned long rip);
1758
1759void sparse_mem_maps_populate_node(struct page **map_map,
1760 unsigned long pnum_begin,
1761 unsigned long pnum_end,
1762 unsigned long map_count,
1763 int nodeid);
1764
1765struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
1766pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
1767pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
1768pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
1769pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
1770void *vmemmap_alloc_block(unsigned long size, int node);
1771void *vmemmap_alloc_block_buf(unsigned long size, int node);
1772void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
1773int vmemmap_populate_basepages(unsigned long start, unsigned long end,
1774 int node);
1775int vmemmap_populate(unsigned long start, unsigned long end, int node);
1776void vmemmap_populate_print_last(void);
1777#ifdef CONFIG_MEMORY_HOTPLUG
1778void vmemmap_free(unsigned long start, unsigned long end);
1779#endif
1780void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
1781 unsigned long size);
1782
1783enum mf_flags {
1784 MF_COUNT_INCREASED = 1 << 0,
1785 MF_ACTION_REQUIRED = 1 << 1,
1786 MF_MUST_KILL = 1 << 2,
1787};
1788extern int memory_failure(unsigned long pfn, int trapno, int flags);
1789extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
1790extern int unpoison_memory(unsigned long pfn);
1791extern int sysctl_memory_failure_early_kill;
1792extern int sysctl_memory_failure_recovery;
1793extern void shake_page(struct page *p, int access);
1794extern atomic_long_t num_poisoned_pages;
1795extern int soft_offline_page(struct page *page, int flags);
1796
1797extern void dump_page(struct page *page);
1798
1799#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
1800extern void clear_huge_page(struct page *page,
1801 unsigned long addr,
1802 unsigned int pages_per_huge_page);
1803extern void copy_user_huge_page(struct page *dst, struct page *src,
1804 unsigned long addr, struct vm_area_struct *vma,
1805 unsigned int pages_per_huge_page);
1806#endif
1807
1808#ifdef CONFIG_DEBUG_PAGEALLOC
1809extern unsigned int _debug_guardpage_minorder;
1810
1811static inline unsigned int debug_guardpage_minorder(void)
1812{
1813 return _debug_guardpage_minorder;
1814}
1815
1816static inline bool page_is_guard(struct page *page)
1817{
1818 return test_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
1819}
1820#else
1821static inline unsigned int debug_guardpage_minorder(void) { return 0; }
1822static inline bool page_is_guard(struct page *page) { return false; }
1823#endif
1824
1825#if MAX_NUMNODES > 1
1826void __init setup_nr_node_ids(void);
1827#else
1828static inline void setup_nr_node_ids(void) {}
1829#endif
1830
1831#endif
1832#endif
1833