1#ifndef _LINUX_MM_H
2#define _LINUX_MM_H
3
4#include <linux/errno.h>
5
6#ifdef __KERNEL__
7
8#include <linux/gfp.h>
9#include <linux/bug.h>
10#include <linux/list.h>
11#include <linux/mmzone.h>
12#include <linux/rbtree.h>
13#include <linux/atomic.h>
14#include <linux/debug_locks.h>
15#include <linux/mm_types.h>
16#include <linux/range.h>
17#include <linux/pfn.h>
18#include <linux/bit_spinlock.h>
19#include <linux/shrinker.h>
20
21struct mempolicy;
22struct anon_vma;
23struct anon_vma_chain;
24struct file_ra_state;
25struct user_struct;
26struct writeback_control;
27
28#ifndef CONFIG_DISCONTIGMEM
29extern unsigned long max_mapnr;
30#endif
31
32extern unsigned long num_physpages;
33extern unsigned long totalram_pages;
34extern void * high_memory;
35extern int page_cluster;
36
37#ifdef CONFIG_SYSCTL
38extern int sysctl_legacy_va_layout;
39#else
40#define sysctl_legacy_va_layout 0
41#endif
42
43#include <asm/page.h>
44#include <asm/pgtable.h>
45#include <asm/processor.h>
46
47#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
48
49
50#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
51
52
53
54
55
56
57
58
59
60
61extern struct kmem_cache *vm_area_cachep;
62
63#ifndef CONFIG_MMU
64extern struct rb_root nommu_region_tree;
65extern struct rw_semaphore nommu_region_sem;
66
67extern unsigned int kobjsize(const void *objp);
68#endif
69
70
71
72
73#define VM_NONE 0x00000000
74
75#define VM_READ 0x00000001
76#define VM_WRITE 0x00000002
77#define VM_EXEC 0x00000004
78#define VM_SHARED 0x00000008
79
80
81#define VM_MAYREAD 0x00000010
82#define VM_MAYWRITE 0x00000020
83#define VM_MAYEXEC 0x00000040
84#define VM_MAYSHARE 0x00000080
85
86#define VM_GROWSDOWN 0x00000100
87#define VM_PFNMAP 0x00000400
88#define VM_DENYWRITE 0x00000800
89
90#define VM_LOCKED 0x00002000
91#define VM_IO 0x00004000
92
93
94#define VM_SEQ_READ 0x00008000
95#define VM_RAND_READ 0x00010000
96
97#define VM_DONTCOPY 0x00020000
98#define VM_DONTEXPAND 0x00040000
99#define VM_ACCOUNT 0x00100000
100#define VM_NORESERVE 0x00200000
101#define VM_HUGETLB 0x00400000
102#define VM_NONLINEAR 0x00800000
103#define VM_ARCH_1 0x01000000
104#define VM_DONTDUMP 0x04000000
105
106#define VM_MIXEDMAP 0x10000000
107#define VM_HUGEPAGE 0x20000000
108#define VM_NOHUGEPAGE 0x40000000
109#define VM_MERGEABLE 0x80000000
110
111#if defined(CONFIG_X86)
112# define VM_PAT VM_ARCH_1
113#elif defined(CONFIG_PPC)
114# define VM_SAO VM_ARCH_1
115#elif defined(CONFIG_PARISC)
116# define VM_GROWSUP VM_ARCH_1
117#elif defined(CONFIG_IA64)
118# define VM_GROWSUP VM_ARCH_1
119#elif !defined(CONFIG_MMU)
120# define VM_MAPPED_COPY VM_ARCH_1
121#endif
122
123#ifndef VM_GROWSUP
124# define VM_GROWSUP VM_NONE
125#endif
126
127
128#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ)
129
130#ifndef VM_STACK_DEFAULT_FLAGS
131#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
132#endif
133
134#ifdef CONFIG_STACK_GROWSUP
135#define VM_STACK_FLAGS (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
136#else
137#define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
138#endif
139
140#define VM_READHINTMASK (VM_SEQ_READ | VM_RAND_READ)
141#define VM_ClearReadHint(v) (v)->vm_flags &= ~VM_READHINTMASK
142#define VM_NormalReadHint(v) (!((v)->vm_flags & VM_READHINTMASK))
143#define VM_SequentialReadHint(v) ((v)->vm_flags & VM_SEQ_READ)
144#define VM_RandomReadHint(v) ((v)->vm_flags & VM_RAND_READ)
145
146
147
148
149
150#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP)
151
152
153
154
155
156extern pgprot_t protection_map[16];
157
158#define FAULT_FLAG_WRITE 0x01
159#define FAULT_FLAG_NONLINEAR 0x02
160#define FAULT_FLAG_MKWRITE 0x04
161#define FAULT_FLAG_ALLOW_RETRY 0x08
162#define FAULT_FLAG_RETRY_NOWAIT 0x10
163#define FAULT_FLAG_KILLABLE 0x20
164#define FAULT_FLAG_TRIED 0x40
165
166
167
168
169
170
171
172
173
174struct vm_fault {
175 unsigned int flags;
176 pgoff_t pgoff;
177 void __user *virtual_address;
178
179 struct page *page;
180
181
182
183
184};
185
186
187
188
189
190
191struct vm_operations_struct {
192 void (*open)(struct vm_area_struct * area);
193 void (*close)(struct vm_area_struct * area);
194 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
195
196
197
198 int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
199
200
201
202
203 int (*access)(struct vm_area_struct *vma, unsigned long addr,
204 void *buf, int len, int write);
205#ifdef CONFIG_NUMA
206
207
208
209
210
211
212
213 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
214
215
216
217
218
219
220
221
222
223
224
225 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
226 unsigned long addr);
227 int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
228 const nodemask_t *to, unsigned long flags);
229#endif
230
231 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
232 unsigned long size, pgoff_t pgoff);
233};
234
235struct mmu_gather;
236struct inode;
237
238#define page_private(page) ((page)->private)
239#define set_page_private(page, v) ((page)->private = (v))
240
241
242static inline void set_freepage_migratetype(struct page *page, int migratetype)
243{
244 page->index = migratetype;
245}
246
247
248static inline int get_freepage_migratetype(struct page *page)
249{
250 return page->index;
251}
252
253
254
255
256
257#include <linux/page-flags.h>
258#include <linux/huge_mm.h>
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276static inline int put_page_testzero(struct page *page)
277{
278 VM_BUG_ON(atomic_read(&page->_count) == 0);
279 return atomic_dec_and_test(&page->_count);
280}
281
282
283
284
285
286static inline int get_page_unless_zero(struct page *page)
287{
288 return atomic_inc_not_zero(&page->_count);
289}
290
291extern int page_is_ram(unsigned long pfn);
292
293
294struct page *vmalloc_to_page(const void *addr);
295unsigned long vmalloc_to_pfn(const void *addr);
296
297
298
299
300
301
302
303static inline int is_vmalloc_addr(const void *x)
304{
305#ifdef CONFIG_MMU
306 unsigned long addr = (unsigned long)x;
307
308 return addr >= VMALLOC_START && addr < VMALLOC_END;
309#else
310 return 0;
311#endif
312}
313#ifdef CONFIG_MMU
314extern int is_vmalloc_or_module_addr(const void *x);
315#else
316static inline int is_vmalloc_or_module_addr(const void *x)
317{
318 return 0;
319}
320#endif
321
322static inline void compound_lock(struct page *page)
323{
324#ifdef CONFIG_TRANSPARENT_HUGEPAGE
325 VM_BUG_ON(PageSlab(page));
326 bit_spin_lock(PG_compound_lock, &page->flags);
327#endif
328}
329
330static inline void compound_unlock(struct page *page)
331{
332#ifdef CONFIG_TRANSPARENT_HUGEPAGE
333 VM_BUG_ON(PageSlab(page));
334 bit_spin_unlock(PG_compound_lock, &page->flags);
335#endif
336}
337
338static inline unsigned long compound_lock_irqsave(struct page *page)
339{
340 unsigned long uninitialized_var(flags);
341#ifdef CONFIG_TRANSPARENT_HUGEPAGE
342 local_irq_save(flags);
343 compound_lock(page);
344#endif
345 return flags;
346}
347
348static inline void compound_unlock_irqrestore(struct page *page,
349 unsigned long flags)
350{
351#ifdef CONFIG_TRANSPARENT_HUGEPAGE
352 compound_unlock(page);
353 local_irq_restore(flags);
354#endif
355}
356
357static inline struct page *compound_head(struct page *page)
358{
359 if (unlikely(PageTail(page)))
360 return page->first_page;
361 return page;
362}
363
364
365
366
367
368
369static inline void reset_page_mapcount(struct page *page)
370{
371 atomic_set(&(page)->_mapcount, -1);
372}
373
374static inline int page_mapcount(struct page *page)
375{
376 return atomic_read(&(page)->_mapcount) + 1;
377}
378
379static inline int page_count(struct page *page)
380{
381 return atomic_read(&compound_head(page)->_count);
382}
383
384static inline void get_huge_page_tail(struct page *page)
385{
386
387
388
389
390 VM_BUG_ON(page_mapcount(page) < 0);
391 VM_BUG_ON(atomic_read(&page->_count) != 0);
392 atomic_inc(&page->_mapcount);
393}
394
395extern bool __get_page_tail(struct page *page);
396
397static inline void get_page(struct page *page)
398{
399 if (unlikely(PageTail(page)))
400 if (likely(__get_page_tail(page)))
401 return;
402
403
404
405
406 VM_BUG_ON(atomic_read(&page->_count) <= 0);
407 atomic_inc(&page->_count);
408}
409
410static inline struct page *virt_to_head_page(const void *x)
411{
412 struct page *page = virt_to_page(x);
413 return compound_head(page);
414}
415
416
417
418
419
420static inline void init_page_count(struct page *page)
421{
422 atomic_set(&page->_count, 1);
423}
424
425
426
427
428
429
430
431
432
433
434#define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
435
436static inline int PageBuddy(struct page *page)
437{
438 return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE;
439}
440
441static inline void __SetPageBuddy(struct page *page)
442{
443 VM_BUG_ON(atomic_read(&page->_mapcount) != -1);
444 atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
445}
446
447static inline void __ClearPageBuddy(struct page *page)
448{
449 VM_BUG_ON(!PageBuddy(page));
450 atomic_set(&page->_mapcount, -1);
451}
452
453void put_page(struct page *page);
454void put_pages_list(struct list_head *pages);
455
456void split_page(struct page *page, unsigned int order);
457int split_free_page(struct page *page);
458
459
460
461
462
463
464typedef void compound_page_dtor(struct page *);
465
466static inline void set_compound_page_dtor(struct page *page,
467 compound_page_dtor *dtor)
468{
469 page[1].lru.next = (void *)dtor;
470}
471
472static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
473{
474 return (compound_page_dtor *)page[1].lru.next;
475}
476
477static inline int compound_order(struct page *page)
478{
479 if (!PageHead(page))
480 return 0;
481 return (unsigned long)page[1].lru.prev;
482}
483
484static inline int compound_trans_order(struct page *page)
485{
486 int order;
487 unsigned long flags;
488
489 if (!PageHead(page))
490 return 0;
491
492 flags = compound_lock_irqsave(page);
493 order = compound_order(page);
494 compound_unlock_irqrestore(page, flags);
495 return order;
496}
497
498static inline void set_compound_order(struct page *page, unsigned long order)
499{
500 page[1].lru.prev = (void *)order;
501}
502
503#ifdef CONFIG_MMU
504
505
506
507
508
509
510static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
511{
512 if (likely(vma->vm_flags & VM_WRITE))
513 pte = pte_mkwrite(pte);
514 return pte;
515}
516#endif
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
599#define SECTIONS_WIDTH SECTIONS_SHIFT
600#else
601#define SECTIONS_WIDTH 0
602#endif
603
604#define ZONES_WIDTH ZONES_SHIFT
605
606#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
607#define NODES_WIDTH NODES_SHIFT
608#else
609#ifdef CONFIG_SPARSEMEM_VMEMMAP
610#error "Vmemmap: No space for nodes field in page flags"
611#endif
612#define NODES_WIDTH 0
613#endif
614
615
616#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
617#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
618#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
619
620
621
622
623
624#if !(NODES_WIDTH > 0 || NODES_SHIFT == 0)
625#define NODE_NOT_IN_PAGE_FLAGS
626#endif
627
628
629
630
631
632
633#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
634#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
635#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
636
637
638#ifdef NODE_NOT_IN_PAGE_FLAGS
639#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
640#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \
641 SECTIONS_PGOFF : ZONES_PGOFF)
642#else
643#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
644#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \
645 NODES_PGOFF : ZONES_PGOFF)
646#endif
647
648#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
649
650#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
651#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
652#endif
653
654#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
655#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
656#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
657#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
658
659static inline enum zone_type page_zonenum(const struct page *page)
660{
661 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
662}
663
664
665
666
667
668
669
670
671
672static inline int page_zone_id(struct page *page)
673{
674 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
675}
676
677static inline int zone_to_nid(struct zone *zone)
678{
679#ifdef CONFIG_NUMA
680 return zone->node;
681#else
682 return 0;
683#endif
684}
685
686#ifdef NODE_NOT_IN_PAGE_FLAGS
687extern int page_to_nid(const struct page *page);
688#else
689static inline int page_to_nid(const struct page *page)
690{
691 return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
692}
693#endif
694
695#ifdef CONFIG_NUMA_BALANCING
696static inline int page_xchg_last_nid(struct page *page, int nid)
697{
698 return xchg(&page->_last_nid, nid);
699}
700
701static inline int page_last_nid(struct page *page)
702{
703 return page->_last_nid;
704}
705static inline void reset_page_last_nid(struct page *page)
706{
707 page->_last_nid = -1;
708}
709#else
710static inline int page_xchg_last_nid(struct page *page, int nid)
711{
712 return page_to_nid(page);
713}
714
715static inline int page_last_nid(struct page *page)
716{
717 return page_to_nid(page);
718}
719
720static inline void reset_page_last_nid(struct page *page)
721{
722}
723#endif
724
725static inline struct zone *page_zone(const struct page *page)
726{
727 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
728}
729
730#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
731static inline void set_page_section(struct page *page, unsigned long section)
732{
733 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
734 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
735}
736
737static inline unsigned long page_to_section(const struct page *page)
738{
739 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
740}
741#endif
742
743static inline void set_page_zone(struct page *page, enum zone_type zone)
744{
745 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
746 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
747}
748
749static inline void set_page_node(struct page *page, unsigned long node)
750{
751 page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
752 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
753}
754
755static inline void set_page_links(struct page *page, enum zone_type zone,
756 unsigned long node, unsigned long pfn)
757{
758 set_page_zone(page, zone);
759 set_page_node(page, node);
760#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
761 set_page_section(page, pfn_to_section_nr(pfn));
762#endif
763}
764
765
766
767
768#include <linux/vmstat.h>
769
770static __always_inline void *lowmem_page_address(const struct page *page)
771{
772 return __va(PFN_PHYS(page_to_pfn(page)));
773}
774
775#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
776#define HASHED_PAGE_VIRTUAL
777#endif
778
779#if defined(WANT_PAGE_VIRTUAL)
780#define page_address(page) ((page)->virtual)
781#define set_page_address(page, address) \
782 do { \
783 (page)->virtual = (address); \
784 } while(0)
785#define page_address_init() do { } while(0)
786#endif
787
788#if defined(HASHED_PAGE_VIRTUAL)
789void *page_address(const struct page *page);
790void set_page_address(struct page *page, void *virtual);
791void page_address_init(void);
792#endif
793
794#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
795#define page_address(page) lowmem_page_address(page)
796#define set_page_address(page, address) do { } while(0)
797#define page_address_init() do { } while(0)
798#endif
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816#define PAGE_MAPPING_ANON 1
817#define PAGE_MAPPING_KSM 2
818#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
819
820extern struct address_space swapper_space;
821static inline struct address_space *page_mapping(struct page *page)
822{
823 struct address_space *mapping = page->mapping;
824
825 VM_BUG_ON(PageSlab(page));
826 if (unlikely(PageSwapCache(page)))
827 mapping = &swapper_space;
828 else if ((unsigned long)mapping & PAGE_MAPPING_ANON)
829 mapping = NULL;
830 return mapping;
831}
832
833
834static inline void *page_rmapping(struct page *page)
835{
836 return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
837}
838
839extern struct address_space *__page_file_mapping(struct page *);
840
841static inline
842struct address_space *page_file_mapping(struct page *page)
843{
844 if (unlikely(PageSwapCache(page)))
845 return __page_file_mapping(page);
846
847 return page->mapping;
848}
849
850static inline int PageAnon(struct page *page)
851{
852 return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
853}
854
855
856
857
858
859static inline pgoff_t page_index(struct page *page)
860{
861 if (unlikely(PageSwapCache(page)))
862 return page_private(page);
863 return page->index;
864}
865
866extern pgoff_t __page_file_index(struct page *page);
867
868
869
870
871
872static inline pgoff_t page_file_index(struct page *page)
873{
874 if (unlikely(PageSwapCache(page)))
875 return __page_file_index(page);
876
877 return page->index;
878}
879
880
881
882
883static inline int page_mapped(struct page *page)
884{
885 return atomic_read(&(page)->_mapcount) >= 0;
886}
887
888
889
890
891
892
893
894#define VM_FAULT_MINOR 0
895
896#define VM_FAULT_OOM 0x0001
897#define VM_FAULT_SIGBUS 0x0002
898#define VM_FAULT_MAJOR 0x0004
899#define VM_FAULT_WRITE 0x0008
900#define VM_FAULT_HWPOISON 0x0010
901#define VM_FAULT_HWPOISON_LARGE 0x0020
902
903#define VM_FAULT_NOPAGE 0x0100
904#define VM_FAULT_LOCKED 0x0200
905#define VM_FAULT_RETRY 0x0400
906
907#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000
908
909#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \
910 VM_FAULT_HWPOISON_LARGE)
911
912
913#define VM_FAULT_SET_HINDEX(x) ((x) << 12)
914#define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf)
915
916
917
918
919extern void pagefault_out_of_memory(void);
920
921#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
922
923
924
925
926
927#define SHOW_MEM_FILTER_NODES (0x0001u)
928
929extern void show_free_areas(unsigned int flags);
930extern bool skip_free_areas_node(unsigned int flags, int nid);
931
932int shmem_zero_setup(struct vm_area_struct *);
933
934extern int can_do_mlock(void);
935extern int user_shm_lock(size_t, struct user_struct *);
936extern void user_shm_unlock(size_t, struct user_struct *);
937
938
939
940
941struct zap_details {
942 struct vm_area_struct *nonlinear_vma;
943 struct address_space *check_mapping;
944 pgoff_t first_index;
945 pgoff_t last_index;
946};
947
948struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
949 pte_t pte);
950
951int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
952 unsigned long size);
953void zap_page_range(struct vm_area_struct *vma, unsigned long address,
954 unsigned long size, struct zap_details *);
955void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
956 unsigned long start, unsigned long end);
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974struct mm_walk {
975 int (*pgd_entry)(pgd_t *, unsigned long, unsigned long, struct mm_walk *);
976 int (*pud_entry)(pud_t *, unsigned long, unsigned long, struct mm_walk *);
977 int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *);
978 int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *);
979 int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *);
980 int (*hugetlb_entry)(pte_t *, unsigned long,
981 unsigned long, unsigned long, struct mm_walk *);
982 struct mm_struct *mm;
983 void *private;
984};
985
986int walk_page_range(unsigned long addr, unsigned long end,
987 struct mm_walk *walk);
988void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
989 unsigned long end, unsigned long floor, unsigned long ceiling);
990int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
991 struct vm_area_struct *vma);
992void unmap_mapping_range(struct address_space *mapping,
993 loff_t const holebegin, loff_t const holelen, int even_cows);
994int follow_pfn(struct vm_area_struct *vma, unsigned long address,
995 unsigned long *pfn);
996int follow_phys(struct vm_area_struct *vma, unsigned long address,
997 unsigned int flags, unsigned long *prot, resource_size_t *phys);
998int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
999 void *buf, int len, int write);
1000
1001static inline void unmap_shared_mapping_range(struct address_space *mapping,
1002 loff_t const holebegin, loff_t const holelen)
1003{
1004 unmap_mapping_range(mapping, holebegin, holelen, 0);
1005}
1006
1007extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new);
1008extern void truncate_setsize(struct inode *inode, loff_t newsize);
1009void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
1010int truncate_inode_page(struct address_space *mapping, struct page *page);
1011int generic_error_remove_page(struct address_space *mapping, struct page *page);
1012int invalidate_inode_page(struct page *page);
1013
1014#ifdef CONFIG_MMU
1015extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
1016 unsigned long address, unsigned int flags);
1017extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
1018 unsigned long address, unsigned int fault_flags);
1019#else
1020static inline int handle_mm_fault(struct mm_struct *mm,
1021 struct vm_area_struct *vma, unsigned long address,
1022 unsigned int flags)
1023{
1024
1025 BUG();
1026 return VM_FAULT_SIGBUS;
1027}
1028static inline int fixup_user_fault(struct task_struct *tsk,
1029 struct mm_struct *mm, unsigned long address,
1030 unsigned int fault_flags)
1031{
1032
1033 BUG();
1034 return -EFAULT;
1035}
1036#endif
1037
1038extern int make_pages_present(unsigned long addr, unsigned long end);
1039extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
1040extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1041 void *buf, int len, int write);
1042
1043int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1044 unsigned long start, int len, unsigned int foll_flags,
1045 struct page **pages, struct vm_area_struct **vmas,
1046 int *nonblocking);
1047int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1048 unsigned long start, int nr_pages, int write, int force,
1049 struct page **pages, struct vm_area_struct **vmas);
1050int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1051 struct page **pages);
1052struct kvec;
1053int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
1054 struct page **pages);
1055int get_kernel_page(unsigned long start, int write, struct page **pages);
1056struct page *get_dump_page(unsigned long addr);
1057
1058extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
1059extern void do_invalidatepage(struct page *page, unsigned long offset);
1060
1061int __set_page_dirty_nobuffers(struct page *page);
1062int __set_page_dirty_no_writeback(struct page *page);
1063int redirty_page_for_writepage(struct writeback_control *wbc,
1064 struct page *page);
1065void account_page_dirtied(struct page *page, struct address_space *mapping);
1066void account_page_writeback(struct page *page);
1067int set_page_dirty(struct page *page);
1068int set_page_dirty_lock(struct page *page);
1069int clear_page_dirty_for_io(struct page *page);
1070
1071
1072static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
1073{
1074 return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
1075}
1076
1077static inline int stack_guard_page_start(struct vm_area_struct *vma,
1078 unsigned long addr)
1079{
1080 return (vma->vm_flags & VM_GROWSDOWN) &&
1081 (vma->vm_start == addr) &&
1082 !vma_growsdown(vma->vm_prev, addr);
1083}
1084
1085
1086static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
1087{
1088 return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
1089}
1090
1091static inline int stack_guard_page_end(struct vm_area_struct *vma,
1092 unsigned long addr)
1093{
1094 return (vma->vm_flags & VM_GROWSUP) &&
1095 (vma->vm_end == addr) &&
1096 !vma_growsup(vma->vm_next, addr);
1097}
1098
1099extern pid_t
1100vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
1101
1102extern unsigned long move_page_tables(struct vm_area_struct *vma,
1103 unsigned long old_addr, struct vm_area_struct *new_vma,
1104 unsigned long new_addr, unsigned long len,
1105 bool need_rmap_locks);
1106extern unsigned long do_mremap(unsigned long addr,
1107 unsigned long old_len, unsigned long new_len,
1108 unsigned long flags, unsigned long new_addr);
1109extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
1110 unsigned long end, pgprot_t newprot,
1111 int dirty_accountable, int prot_numa);
1112extern int mprotect_fixup(struct vm_area_struct *vma,
1113 struct vm_area_struct **pprev, unsigned long start,
1114 unsigned long end, unsigned long newflags);
1115
1116
1117
1118
1119int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1120 struct page **pages);
1121
1122
1123
1124static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
1125{
1126 long val = atomic_long_read(&mm->rss_stat.count[member]);
1127
1128#ifdef SPLIT_RSS_COUNTING
1129
1130
1131
1132
1133 if (val < 0)
1134 val = 0;
1135#endif
1136 return (unsigned long)val;
1137}
1138
1139static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1140{
1141 atomic_long_add(value, &mm->rss_stat.count[member]);
1142}
1143
1144static inline void inc_mm_counter(struct mm_struct *mm, int member)
1145{
1146 atomic_long_inc(&mm->rss_stat.count[member]);
1147}
1148
1149static inline void dec_mm_counter(struct mm_struct *mm, int member)
1150{
1151 atomic_long_dec(&mm->rss_stat.count[member]);
1152}
1153
1154static inline unsigned long get_mm_rss(struct mm_struct *mm)
1155{
1156 return get_mm_counter(mm, MM_FILEPAGES) +
1157 get_mm_counter(mm, MM_ANONPAGES);
1158}
1159
1160static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
1161{
1162 return max(mm->hiwater_rss, get_mm_rss(mm));
1163}
1164
1165static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
1166{
1167 return max(mm->hiwater_vm, mm->total_vm);
1168}
1169
1170static inline void update_hiwater_rss(struct mm_struct *mm)
1171{
1172 unsigned long _rss = get_mm_rss(mm);
1173
1174 if ((mm)->hiwater_rss < _rss)
1175 (mm)->hiwater_rss = _rss;
1176}
1177
1178static inline void update_hiwater_vm(struct mm_struct *mm)
1179{
1180 if (mm->hiwater_vm < mm->total_vm)
1181 mm->hiwater_vm = mm->total_vm;
1182}
1183
1184static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
1185 struct mm_struct *mm)
1186{
1187 unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
1188
1189 if (*maxrss < hiwater_rss)
1190 *maxrss = hiwater_rss;
1191}
1192
1193#if defined(SPLIT_RSS_COUNTING)
1194void sync_mm_rss(struct mm_struct *mm);
1195#else
1196static inline void sync_mm_rss(struct mm_struct *mm)
1197{
1198}
1199#endif
1200
1201int vma_wants_writenotify(struct vm_area_struct *vma);
1202
1203extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1204 spinlock_t **ptl);
1205static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
1206 spinlock_t **ptl)
1207{
1208 pte_t *ptep;
1209 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
1210 return ptep;
1211}
1212
1213#ifdef __PAGETABLE_PUD_FOLDED
1214static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
1215 unsigned long address)
1216{
1217 return 0;
1218}
1219#else
1220int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
1221#endif
1222
1223#ifdef __PAGETABLE_PMD_FOLDED
1224static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
1225 unsigned long address)
1226{
1227 return 0;
1228}
1229#else
1230int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
1231#endif
1232
1233int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
1234 pmd_t *pmd, unsigned long address);
1235int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
1236
1237
1238
1239
1240
1241#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
1242static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
1243{
1244 return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))?
1245 NULL: pud_offset(pgd, address);
1246}
1247
1248static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
1249{
1250 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
1251 NULL: pmd_offset(pud, address);
1252}
1253#endif
1254
1255#if USE_SPLIT_PTLOCKS
1256
1257
1258
1259
1260
1261
1262#define __pte_lockptr(page) &((page)->ptl)
1263#define pte_lock_init(_page) do { \
1264 spin_lock_init(__pte_lockptr(_page)); \
1265} while (0)
1266#define pte_lock_deinit(page) ((page)->mapping = NULL)
1267#define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));})
1268#else
1269
1270
1271
1272#define pte_lock_init(page) do {} while (0)
1273#define pte_lock_deinit(page) do {} while (0)
1274#define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;})
1275#endif
1276
1277static inline void pgtable_page_ctor(struct page *page)
1278{
1279 pte_lock_init(page);
1280 inc_zone_page_state(page, NR_PAGETABLE);
1281}
1282
1283static inline void pgtable_page_dtor(struct page *page)
1284{
1285 pte_lock_deinit(page);
1286 dec_zone_page_state(page, NR_PAGETABLE);
1287}
1288
1289#define pte_offset_map_lock(mm, pmd, address, ptlp) \
1290({ \
1291 spinlock_t *__ptl = pte_lockptr(mm, pmd); \
1292 pte_t *__pte = pte_offset_map(pmd, address); \
1293 *(ptlp) = __ptl; \
1294 spin_lock(__ptl); \
1295 __pte; \
1296})
1297
1298#define pte_unmap_unlock(pte, ptl) do { \
1299 spin_unlock(ptl); \
1300 pte_unmap(pte); \
1301} while (0)
1302
1303#define pte_alloc_map(mm, vma, pmd, address) \
1304 ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, vma, \
1305 pmd, address))? \
1306 NULL: pte_offset_map(pmd, address))
1307
1308#define pte_alloc_map_lock(mm, pmd, address, ptlp) \
1309 ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, NULL, \
1310 pmd, address))? \
1311 NULL: pte_offset_map_lock(mm, pmd, address, ptlp))
1312
1313#define pte_alloc_kernel(pmd, address) \
1314 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
1315 NULL: pte_offset_kernel(pmd, address))
1316
1317extern void free_area_init(unsigned long * zones_size);
1318extern void free_area_init_node(int nid, unsigned long * zones_size,
1319 unsigned long zone_start_pfn, unsigned long *zholes_size);
1320extern void free_initmem(void);
1321
1322#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349extern void free_area_init_nodes(unsigned long *max_zone_pfn);
1350unsigned long node_map_pfn_alignment(void);
1351unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
1352 unsigned long end_pfn);
1353extern unsigned long absent_pages_in_range(unsigned long start_pfn,
1354 unsigned long end_pfn);
1355extern void get_pfn_range_for_nid(unsigned int nid,
1356 unsigned long *start_pfn, unsigned long *end_pfn);
1357extern unsigned long find_min_pfn_with_active_regions(void);
1358extern void free_bootmem_with_active_regions(int nid,
1359 unsigned long max_low_pfn);
1360extern void sparse_memory_present_with_active_regions(int nid);
1361
1362#endif
1363
1364#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
1365 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
1366static inline int __early_pfn_to_nid(unsigned long pfn)
1367{
1368 return 0;
1369}
1370#else
1371
1372extern int __meminit early_pfn_to_nid(unsigned long pfn);
1373#ifdef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
1374
1375extern int __meminit __early_pfn_to_nid(unsigned long pfn);
1376#endif
1377#endif
1378
1379extern void set_dma_reserve(unsigned long new_dma_reserve);
1380extern void memmap_init_zone(unsigned long, int, unsigned long,
1381 unsigned long, enum memmap_context);
1382extern void setup_per_zone_wmarks(void);
1383extern int __meminit init_per_zone_wmark_min(void);
1384extern void mem_init(void);
1385extern void __init mmap_init(void);
1386extern void show_mem(unsigned int flags);
1387extern void si_meminfo(struct sysinfo * val);
1388extern void si_meminfo_node(struct sysinfo *val, int nid);
1389extern int after_bootmem;
1390
1391extern __printf(3, 4)
1392void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...);
1393
1394extern void setup_per_cpu_pageset(void);
1395
1396extern void zone_pcp_update(struct zone *zone);
1397extern void zone_pcp_reset(struct zone *zone);
1398
1399
1400extern atomic_long_t mmap_pages_allocated;
1401extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
1402
1403
1404void vma_interval_tree_insert(struct vm_area_struct *node,
1405 struct rb_root *root);
1406void vma_interval_tree_insert_after(struct vm_area_struct *node,
1407 struct vm_area_struct *prev,
1408 struct rb_root *root);
1409void vma_interval_tree_remove(struct vm_area_struct *node,
1410 struct rb_root *root);
1411struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root *root,
1412 unsigned long start, unsigned long last);
1413struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
1414 unsigned long start, unsigned long last);
1415
1416#define vma_interval_tree_foreach(vma, root, start, last) \
1417 for (vma = vma_interval_tree_iter_first(root, start, last); \
1418 vma; vma = vma_interval_tree_iter_next(vma, start, last))
1419
1420static inline void vma_nonlinear_insert(struct vm_area_struct *vma,
1421 struct list_head *list)
1422{
1423 list_add_tail(&vma->shared.nonlinear, list);
1424}
1425
1426void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
1427 struct rb_root *root);
1428void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
1429 struct rb_root *root);
1430struct anon_vma_chain *anon_vma_interval_tree_iter_first(
1431 struct rb_root *root, unsigned long start, unsigned long last);
1432struct anon_vma_chain *anon_vma_interval_tree_iter_next(
1433 struct anon_vma_chain *node, unsigned long start, unsigned long last);
1434#ifdef CONFIG_DEBUG_VM_RB
1435void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
1436#endif
1437
1438#define anon_vma_interval_tree_foreach(avc, root, start, last) \
1439 for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
1440 avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
1441
1442
1443extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
1444extern int vma_adjust(struct vm_area_struct *vma, unsigned long start,
1445 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert);
1446extern struct vm_area_struct *vma_merge(struct mm_struct *,
1447 struct vm_area_struct *prev, unsigned long addr, unsigned long end,
1448 unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
1449 struct mempolicy *);
1450extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
1451extern int split_vma(struct mm_struct *,
1452 struct vm_area_struct *, unsigned long addr, int new_below);
1453extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
1454extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
1455 struct rb_node **, struct rb_node *);
1456extern void unlink_file_vma(struct vm_area_struct *);
1457extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
1458 unsigned long addr, unsigned long len, pgoff_t pgoff,
1459 bool *need_rmap_locks);
1460extern void exit_mmap(struct mm_struct *);
1461
1462extern int mm_take_all_locks(struct mm_struct *mm);
1463extern void mm_drop_all_locks(struct mm_struct *mm);
1464
1465extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
1466extern struct file *get_mm_exe_file(struct mm_struct *mm);
1467
1468extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
1469extern int install_special_mapping(struct mm_struct *mm,
1470 unsigned long addr, unsigned long len,
1471 unsigned long flags, struct page **pages);
1472
1473extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
1474
1475extern unsigned long mmap_region(struct file *file, unsigned long addr,
1476 unsigned long len, unsigned long flags,
1477 vm_flags_t vm_flags, unsigned long pgoff);
1478extern unsigned long do_mmap_pgoff(struct file *, unsigned long,
1479 unsigned long, unsigned long,
1480 unsigned long, unsigned long);
1481extern int do_munmap(struct mm_struct *, unsigned long, size_t);
1482
1483
1484extern unsigned long vm_brk(unsigned long, unsigned long);
1485extern int vm_munmap(unsigned long, size_t);
1486extern unsigned long vm_mmap(struct file *, unsigned long,
1487 unsigned long, unsigned long,
1488 unsigned long, unsigned long);
1489
1490struct vm_unmapped_area_info {
1491#define VM_UNMAPPED_AREA_TOPDOWN 1
1492 unsigned long flags;
1493 unsigned long length;
1494 unsigned long low_limit;
1495 unsigned long high_limit;
1496 unsigned long align_mask;
1497 unsigned long align_offset;
1498};
1499
1500extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
1501extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512static inline unsigned long
1513vm_unmapped_area(struct vm_unmapped_area_info *info)
1514{
1515 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
1516 return unmapped_area(info);
1517 else
1518 return unmapped_area_topdown(info);
1519}
1520
1521
1522extern void truncate_inode_pages(struct address_space *, loff_t);
1523extern void truncate_inode_pages_range(struct address_space *,
1524 loff_t lstart, loff_t lend);
1525
1526
1527extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
1528extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
1529
1530
1531int write_one_page(struct page *page, int wait);
1532void task_dirty_inc(struct task_struct *tsk);
1533
1534
1535#define VM_MAX_READAHEAD 128
1536#define VM_MIN_READAHEAD 16
1537
1538int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
1539 pgoff_t offset, unsigned long nr_to_read);
1540
1541void page_cache_sync_readahead(struct address_space *mapping,
1542 struct file_ra_state *ra,
1543 struct file *filp,
1544 pgoff_t offset,
1545 unsigned long size);
1546
1547void page_cache_async_readahead(struct address_space *mapping,
1548 struct file_ra_state *ra,
1549 struct file *filp,
1550 struct page *pg,
1551 pgoff_t offset,
1552 unsigned long size);
1553
1554unsigned long max_sane_readahead(unsigned long nr);
1555unsigned long ra_submit(struct file_ra_state *ra,
1556 struct address_space *mapping,
1557 struct file *filp);
1558
1559
1560extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
1561
1562
1563extern int expand_downwards(struct vm_area_struct *vma,
1564 unsigned long address);
1565#if VM_GROWSUP
1566extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
1567#else
1568 #define expand_upwards(vma, address) do { } while (0)
1569#endif
1570
1571
1572extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
1573extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
1574 struct vm_area_struct **pprev);
1575
1576
1577
1578static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
1579{
1580 struct vm_area_struct * vma = find_vma(mm,start_addr);
1581
1582 if (vma && end_addr <= vma->vm_start)
1583 vma = NULL;
1584 return vma;
1585}
1586
1587static inline unsigned long vma_pages(struct vm_area_struct *vma)
1588{
1589 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
1590}
1591
1592
1593static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
1594 unsigned long vm_start, unsigned long vm_end)
1595{
1596 struct vm_area_struct *vma = find_vma(mm, vm_start);
1597
1598 if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
1599 vma = NULL;
1600
1601 return vma;
1602}
1603
1604#ifdef CONFIG_MMU
1605pgprot_t vm_get_page_prot(unsigned long vm_flags);
1606#else
1607static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
1608{
1609 return __pgprot(0);
1610}
1611#endif
1612
1613#ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
1614unsigned long change_prot_numa(struct vm_area_struct *vma,
1615 unsigned long start, unsigned long end);
1616#endif
1617
1618struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
1619int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
1620 unsigned long pfn, unsigned long size, pgprot_t);
1621int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
1622int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1623 unsigned long pfn);
1624int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
1625 unsigned long pfn);
1626
1627struct page *follow_page(struct vm_area_struct *, unsigned long address,
1628 unsigned int foll_flags);
1629#define FOLL_WRITE 0x01
1630#define FOLL_TOUCH 0x02
1631#define FOLL_GET 0x04
1632#define FOLL_DUMP 0x08
1633#define FOLL_FORCE 0x10
1634#define FOLL_NOWAIT 0x20
1635
1636#define FOLL_MLOCK 0x40
1637#define FOLL_SPLIT 0x80
1638#define FOLL_HWPOISON 0x100
1639#define FOLL_NUMA 0x200
1640
1641typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
1642 void *data);
1643extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
1644 unsigned long size, pte_fn_t fn, void *data);
1645
1646#ifdef CONFIG_PROC_FS
1647void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
1648#else
1649static inline void vm_stat_account(struct mm_struct *mm,
1650 unsigned long flags, struct file *file, long pages)
1651{
1652 mm->total_vm += pages;
1653}
1654#endif
1655
1656#ifdef CONFIG_DEBUG_PAGEALLOC
1657extern void kernel_map_pages(struct page *page, int numpages, int enable);
1658#ifdef CONFIG_HIBERNATION
1659extern bool kernel_page_present(struct page *page);
1660#endif
1661#else
1662static inline void
1663kernel_map_pages(struct page *page, int numpages, int enable) {}
1664#ifdef CONFIG_HIBERNATION
1665static inline bool kernel_page_present(struct page *page) { return true; }
1666#endif
1667#endif
1668
1669extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
1670#ifdef __HAVE_ARCH_GATE_AREA
1671int in_gate_area_no_mm(unsigned long addr);
1672int in_gate_area(struct mm_struct *mm, unsigned long addr);
1673#else
1674int in_gate_area_no_mm(unsigned long addr);
1675#define in_gate_area(mm, addr) ({(void)mm; in_gate_area_no_mm(addr);})
1676#endif
1677
1678int drop_caches_sysctl_handler(struct ctl_table *, int,
1679 void __user *, size_t *, loff_t *);
1680unsigned long shrink_slab(struct shrink_control *shrink,
1681 unsigned long nr_pages_scanned,
1682 unsigned long lru_pages);
1683
1684#ifndef CONFIG_MMU
1685#define randomize_va_space 0
1686#else
1687extern int randomize_va_space;
1688#endif
1689
1690const char * arch_vma_name(struct vm_area_struct *vma);
1691void print_vma_addr(char *prefix, unsigned long rip);
1692
1693void sparse_mem_maps_populate_node(struct page **map_map,
1694 unsigned long pnum_begin,
1695 unsigned long pnum_end,
1696 unsigned long map_count,
1697 int nodeid);
1698
1699struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
1700pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
1701pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
1702pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
1703pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
1704void *vmemmap_alloc_block(unsigned long size, int node);
1705void *vmemmap_alloc_block_buf(unsigned long size, int node);
1706void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
1707int vmemmap_populate_basepages(struct page *start_page,
1708 unsigned long pages, int node);
1709int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
1710void vmemmap_populate_print_last(void);
1711
1712
1713enum mf_flags {
1714 MF_COUNT_INCREASED = 1 << 0,
1715 MF_ACTION_REQUIRED = 1 << 1,
1716 MF_MUST_KILL = 1 << 2,
1717};
1718extern int memory_failure(unsigned long pfn, int trapno, int flags);
1719extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
1720extern int unpoison_memory(unsigned long pfn);
1721extern int sysctl_memory_failure_early_kill;
1722extern int sysctl_memory_failure_recovery;
1723extern void shake_page(struct page *p, int access);
1724extern atomic_long_t mce_bad_pages;
1725extern int soft_offline_page(struct page *page, int flags);
1726
1727extern void dump_page(struct page *page);
1728
1729#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
1730extern void clear_huge_page(struct page *page,
1731 unsigned long addr,
1732 unsigned int pages_per_huge_page);
1733extern void copy_user_huge_page(struct page *dst, struct page *src,
1734 unsigned long addr, struct vm_area_struct *vma,
1735 unsigned int pages_per_huge_page);
1736#endif
1737
1738#ifdef CONFIG_DEBUG_PAGEALLOC
1739extern unsigned int _debug_guardpage_minorder;
1740
1741static inline unsigned int debug_guardpage_minorder(void)
1742{
1743 return _debug_guardpage_minorder;
1744}
1745
1746static inline bool page_is_guard(struct page *page)
1747{
1748 return test_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
1749}
1750#else
1751static inline unsigned int debug_guardpage_minorder(void) { return 0; }
1752static inline bool page_is_guard(struct page *page) { return false; }
1753#endif
1754
1755#endif
1756#endif
1757