1#ifndef _LINUX_MM_H
2#define _LINUX_MM_H
3
4#include <linux/errno.h>
5
6#ifdef __KERNEL__
7
8#include <linux/gfp.h>
9#include <linux/list.h>
10#include <linux/mmzone.h>
11#include <linux/rbtree.h>
12#include <linux/prio_tree.h>
13#include <linux/debug_locks.h>
14#include <linux/mm_types.h>
15
16struct mempolicy;
17struct anon_vma;
18struct file_ra_state;
19struct user_struct;
20struct writeback_control;
21struct rlimit;
22
23#ifndef CONFIG_DISCONTIGMEM
24extern unsigned long max_mapnr;
25#endif
26
27extern unsigned long num_physpages;
28extern unsigned long totalram_pages;
29extern void * high_memory;
30extern int page_cluster;
31
32#ifdef CONFIG_SYSCTL
33extern int sysctl_legacy_va_layout;
34#else
35#define sysctl_legacy_va_layout 0
36#endif
37
38#include <asm/page.h>
39#include <asm/pgtable.h>
40#include <asm/processor.h>
41
42#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
43
44
45#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
46
47
48
49
50
51
52
53
54
55
56extern struct kmem_cache *vm_area_cachep;
57
58#ifndef CONFIG_MMU
59extern struct rb_root nommu_region_tree;
60extern struct rw_semaphore nommu_region_sem;
61
62extern unsigned int kobjsize(const void *objp);
63#endif
64
65
66
67
68#define VM_READ 0x00000001
69#define VM_WRITE 0x00000002
70#define VM_EXEC 0x00000004
71#define VM_SHARED 0x00000008
72
73
74#define VM_MAYREAD 0x00000010
75#define VM_MAYWRITE 0x00000020
76#define VM_MAYEXEC 0x00000040
77#define VM_MAYSHARE 0x00000080
78
79#define VM_GROWSDOWN 0x00000100
80#define VM_GROWSUP 0x00000200
81#define VM_PFNMAP 0x00000400
82#define VM_DENYWRITE 0x00000800
83
84#define VM_EXECUTABLE 0x00001000
85#define VM_LOCKED 0x00002000
86#define VM_IO 0x00004000
87
88
89#define VM_SEQ_READ 0x00008000
90#define VM_RAND_READ 0x00010000
91
92#define VM_DONTCOPY 0x00020000
93#define VM_DONTEXPAND 0x00040000
94#define VM_RESERVED 0x00080000
95#define VM_ACCOUNT 0x00100000
96#define VM_NORESERVE 0x00200000
97#define VM_HUGETLB 0x00400000
98#define VM_NONLINEAR 0x00800000
99#define VM_MAPPED_COPY 0x01000000
100#define VM_INSERTPAGE 0x02000000
101#define VM_ALWAYSDUMP 0x04000000
102
103#define VM_CAN_NONLINEAR 0x08000000
104#define VM_MIXEDMAP 0x10000000
105#define VM_SAO 0x20000000
106#define VM_PFN_AT_MMAP 0x40000000
107#define VM_MERGEABLE 0x80000000
108
109#ifndef VM_STACK_DEFAULT_FLAGS
110#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
111#endif
112
113#ifdef CONFIG_STACK_GROWSUP
114#define VM_STACK_FLAGS (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
115#else
116#define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
117#endif
118
119#define VM_READHINTMASK (VM_SEQ_READ | VM_RAND_READ)
120#define VM_ClearReadHint(v) (v)->vm_flags &= ~VM_READHINTMASK
121#define VM_NormalReadHint(v) (!((v)->vm_flags & VM_READHINTMASK))
122#define VM_SequentialReadHint(v) ((v)->vm_flags & VM_SEQ_READ)
123#define VM_RandomReadHint(v) ((v)->vm_flags & VM_RAND_READ)
124
125
126
127
128#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP)
129
130
131
132
133
134extern pgprot_t protection_map[16];
135
136#define FAULT_FLAG_WRITE 0x01
137#define FAULT_FLAG_NONLINEAR 0x02
138#define FAULT_FLAG_MKWRITE 0x04
139
140
141
142
143
144
145
146
147
148static inline int is_linear_pfn_mapping(struct vm_area_struct *vma)
149{
150 return (vma->vm_flags & VM_PFN_AT_MMAP);
151}
152
153static inline int is_pfn_mapping(struct vm_area_struct *vma)
154{
155 return (vma->vm_flags & VM_PFNMAP);
156}
157
158
159
160
161
162
163
164
165
166
167struct vm_fault {
168 unsigned int flags;
169 pgoff_t pgoff;
170 void __user *virtual_address;
171
172 struct page *page;
173
174
175
176
177};
178
179
180
181
182
183
184struct vm_operations_struct {
185 void (*open)(struct vm_area_struct * area);
186 void (*close)(struct vm_area_struct * area);
187 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
188
189
190
191 int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
192
193
194
195
196 int (*access)(struct vm_area_struct *vma, unsigned long addr,
197 void *buf, int len, int write);
198#ifdef CONFIG_NUMA
199
200
201
202
203
204
205
206 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
207
208
209
210
211
212
213
214
215
216
217
218 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
219 unsigned long addr);
220 int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
221 const nodemask_t *to, unsigned long flags);
222#endif
223};
224
225struct mmu_gather;
226struct inode;
227
228#define page_private(page) ((page)->private)
229#define set_page_private(page, v) ((page)->private = (v))
230
231
232
233
234
235#include <linux/page-flags.h>
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253static inline int put_page_testzero(struct page *page)
254{
255 VM_BUG_ON(atomic_read(&page->_count) == 0);
256 return atomic_dec_and_test(&page->_count);
257}
258
259
260
261
262
263static inline int get_page_unless_zero(struct page *page)
264{
265 return atomic_inc_not_zero(&page->_count);
266}
267
268
269struct page *vmalloc_to_page(const void *addr);
270unsigned long vmalloc_to_pfn(const void *addr);
271
272
273
274
275
276
277
278static inline int is_vmalloc_addr(const void *x)
279{
280#ifdef CONFIG_MMU
281 unsigned long addr = (unsigned long)x;
282
283 return addr >= VMALLOC_START && addr < VMALLOC_END;
284#else
285 return 0;
286#endif
287}
288#ifdef CONFIG_MMU
289extern int is_vmalloc_or_module_addr(const void *x);
290#else
291static inline int is_vmalloc_or_module_addr(const void *x)
292{
293 return 0;
294}
295#endif
296
297static inline struct page *compound_head(struct page *page)
298{
299 if (unlikely(PageTail(page)))
300 return page->first_page;
301 return page;
302}
303
304static inline int page_count(struct page *page)
305{
306 return atomic_read(&compound_head(page)->_count);
307}
308
309static inline void get_page(struct page *page)
310{
311 page = compound_head(page);
312 VM_BUG_ON(atomic_read(&page->_count) == 0);
313 atomic_inc(&page->_count);
314}
315
316static inline struct page *virt_to_head_page(const void *x)
317{
318 struct page *page = virt_to_page(x);
319 return compound_head(page);
320}
321
322
323
324
325
326static inline void init_page_count(struct page *page)
327{
328 atomic_set(&page->_count, 1);
329}
330
331void put_page(struct page *page);
332void put_pages_list(struct list_head *pages);
333
334void split_page(struct page *page, unsigned int order);
335
336
337
338
339
340
341typedef void compound_page_dtor(struct page *);
342
343static inline void set_compound_page_dtor(struct page *page,
344 compound_page_dtor *dtor)
345{
346 page[1].lru.next = (void *)dtor;
347}
348
349static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
350{
351 return (compound_page_dtor *)page[1].lru.next;
352}
353
354static inline int compound_order(struct page *page)
355{
356 if (!PageHead(page))
357 return 0;
358 return (unsigned long)page[1].lru.prev;
359}
360
361static inline void set_compound_order(struct page *page, unsigned long order)
362{
363 page[1].lru.prev = (void *)order;
364}
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
447#define SECTIONS_WIDTH SECTIONS_SHIFT
448#else
449#define SECTIONS_WIDTH 0
450#endif
451
452#define ZONES_WIDTH ZONES_SHIFT
453
454#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
455#define NODES_WIDTH NODES_SHIFT
456#else
457#ifdef CONFIG_SPARSEMEM_VMEMMAP
458#error "Vmemmap: No space for nodes field in page flags"
459#endif
460#define NODES_WIDTH 0
461#endif
462
463
464#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
465#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
466#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
467
468
469
470
471
472#if !(NODES_WIDTH > 0 || NODES_SHIFT == 0)
473#define NODE_NOT_IN_PAGE_FLAGS
474#endif
475
476#ifndef PFN_SECTION_SHIFT
477#define PFN_SECTION_SHIFT 0
478#endif
479
480
481
482
483
484
485#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
486#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
487#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
488
489
490#ifdef NODE_NOT_IN_PAGEFLAGS
491#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
492#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \
493 SECTIONS_PGOFF : ZONES_PGOFF)
494#else
495#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
496#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \
497 NODES_PGOFF : ZONES_PGOFF)
498#endif
499
500#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
501
502#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
503#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
504#endif
505
506#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
507#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
508#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
509#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
510
511static inline enum zone_type page_zonenum(struct page *page)
512{
513 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
514}
515
516
517
518
519
520
521
522
523
524static inline int page_zone_id(struct page *page)
525{
526 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
527}
528
529static inline int zone_to_nid(struct zone *zone)
530{
531#ifdef CONFIG_NUMA
532 return zone->node;
533#else
534 return 0;
535#endif
536}
537
538#ifdef NODE_NOT_IN_PAGE_FLAGS
539extern int page_to_nid(struct page *page);
540#else
541static inline int page_to_nid(struct page *page)
542{
543 return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
544}
545#endif
546
547static inline struct zone *page_zone(struct page *page)
548{
549 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
550}
551
552#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
553static inline unsigned long page_to_section(struct page *page)
554{
555 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
556}
557#endif
558
559static inline void set_page_zone(struct page *page, enum zone_type zone)
560{
561 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
562 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
563}
564
565static inline void set_page_node(struct page *page, unsigned long node)
566{
567 page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
568 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
569}
570
571static inline void set_page_section(struct page *page, unsigned long section)
572{
573 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
574 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
575}
576
577static inline void set_page_links(struct page *page, enum zone_type zone,
578 unsigned long node, unsigned long pfn)
579{
580 set_page_zone(page, zone);
581 set_page_node(page, node);
582 set_page_section(page, pfn_to_section_nr(pfn));
583}
584
585
586
587
588#include <linux/vmstat.h>
589
590static __always_inline void *lowmem_page_address(struct page *page)
591{
592 return __va(page_to_pfn(page) << PAGE_SHIFT);
593}
594
595#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
596#define HASHED_PAGE_VIRTUAL
597#endif
598
599#if defined(WANT_PAGE_VIRTUAL)
600#define page_address(page) ((page)->virtual)
601#define set_page_address(page, address) \
602 do { \
603 (page)->virtual = (address); \
604 } while(0)
605#define page_address_init() do { } while(0)
606#endif
607
608#if defined(HASHED_PAGE_VIRTUAL)
609void *page_address(struct page *page);
610void set_page_address(struct page *page, void *virtual);
611void page_address_init(void);
612#endif
613
614#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
615#define page_address(page) lowmem_page_address(page)
616#define set_page_address(page, address) do { } while(0)
617#define page_address_init() do { } while(0)
618#endif
619
620
621
622
623
624
625
626
627
628
629#define PAGE_MAPPING_ANON 1
630
631extern struct address_space swapper_space;
632static inline struct address_space *page_mapping(struct page *page)
633{
634 struct address_space *mapping = page->mapping;
635
636 VM_BUG_ON(PageSlab(page));
637#ifdef CONFIG_SWAP
638 if (unlikely(PageSwapCache(page)))
639 mapping = &swapper_space;
640 else
641#endif
642 if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
643 mapping = NULL;
644 return mapping;
645}
646
647static inline int PageAnon(struct page *page)
648{
649 return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
650}
651
652
653
654
655
656static inline pgoff_t page_index(struct page *page)
657{
658 if (unlikely(PageSwapCache(page)))
659 return page_private(page);
660 return page->index;
661}
662
663
664
665
666
667
668static inline void reset_page_mapcount(struct page *page)
669{
670 atomic_set(&(page)->_mapcount, -1);
671}
672
673static inline int page_mapcount(struct page *page)
674{
675 return atomic_read(&(page)->_mapcount) + 1;
676}
677
678
679
680
681static inline int page_mapped(struct page *page)
682{
683 return atomic_read(&(page)->_mapcount) >= 0;
684}
685
686
687
688
689
690
691
692#define VM_FAULT_MINOR 0
693
694#define VM_FAULT_OOM 0x0001
695#define VM_FAULT_SIGBUS 0x0002
696#define VM_FAULT_MAJOR 0x0004
697#define VM_FAULT_WRITE 0x0008
698#define VM_FAULT_HWPOISON 0x0010
699
700#define VM_FAULT_NOPAGE 0x0100
701#define VM_FAULT_LOCKED 0x0200
702
703#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON)
704
705
706
707
708extern void pagefault_out_of_memory(void);
709
710#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
711
712extern void show_free_areas(void);
713
714int shmem_lock(struct file *file, int lock, struct user_struct *user);
715struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags);
716int shmem_zero_setup(struct vm_area_struct *);
717
718#ifndef CONFIG_MMU
719extern unsigned long shmem_get_unmapped_area(struct file *file,
720 unsigned long addr,
721 unsigned long len,
722 unsigned long pgoff,
723 unsigned long flags);
724#endif
725
726extern int can_do_mlock(void);
727extern int user_shm_lock(size_t, struct user_struct *);
728extern void user_shm_unlock(size_t, struct user_struct *);
729
730
731
732
733struct zap_details {
734 struct vm_area_struct *nonlinear_vma;
735 struct address_space *check_mapping;
736 pgoff_t first_index;
737 pgoff_t last_index;
738 spinlock_t *i_mmap_lock;
739 unsigned long truncate_count;
740};
741
742struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
743 pte_t pte);
744
745int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
746 unsigned long size);
747unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
748 unsigned long size, struct zap_details *);
749unsigned long unmap_vmas(struct mmu_gather **tlb,
750 struct vm_area_struct *start_vma, unsigned long start_addr,
751 unsigned long end_addr, unsigned long *nr_accounted,
752 struct zap_details *);
753
754
755
756
757
758
759
760
761
762
763
764struct mm_walk {
765 int (*pgd_entry)(pgd_t *, unsigned long, unsigned long, struct mm_walk *);
766 int (*pud_entry)(pud_t *, unsigned long, unsigned long, struct mm_walk *);
767 int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *);
768 int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *);
769 int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *);
770 struct mm_struct *mm;
771 void *private;
772};
773
774int walk_page_range(unsigned long addr, unsigned long end,
775 struct mm_walk *walk);
776void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
777 unsigned long end, unsigned long floor, unsigned long ceiling);
778int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
779 struct vm_area_struct *vma);
780void unmap_mapping_range(struct address_space *mapping,
781 loff_t const holebegin, loff_t const holelen, int even_cows);
782int follow_pfn(struct vm_area_struct *vma, unsigned long address,
783 unsigned long *pfn);
784int follow_phys(struct vm_area_struct *vma, unsigned long address,
785 unsigned int flags, unsigned long *prot, resource_size_t *phys);
786int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
787 void *buf, int len, int write);
788
789static inline void unmap_shared_mapping_range(struct address_space *mapping,
790 loff_t const holebegin, loff_t const holelen)
791{
792 unmap_mapping_range(mapping, holebegin, holelen, 0);
793}
794
795extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new);
796extern int vmtruncate(struct inode *inode, loff_t offset);
797extern int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end);
798
799int truncate_inode_page(struct address_space *mapping, struct page *page);
800int generic_error_remove_page(struct address_space *mapping, struct page *page);
801
802int invalidate_inode_page(struct page *page);
803
804#ifdef CONFIG_MMU
805extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
806 unsigned long address, unsigned int flags);
807#else
808static inline int handle_mm_fault(struct mm_struct *mm,
809 struct vm_area_struct *vma, unsigned long address,
810 unsigned int flags)
811{
812
813 BUG();
814 return VM_FAULT_SIGBUS;
815}
816#endif
817
818extern int make_pages_present(unsigned long addr, unsigned long end);
819extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
820
821int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
822 unsigned long start, int nr_pages, int write, int force,
823 struct page **pages, struct vm_area_struct **vmas);
824int get_user_pages_fast(unsigned long start, int nr_pages, int write,
825 struct page **pages);
826struct page *get_dump_page(unsigned long addr);
827
828extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
829extern void do_invalidatepage(struct page *page, unsigned long offset);
830
831int __set_page_dirty_nobuffers(struct page *page);
832int __set_page_dirty_no_writeback(struct page *page);
833int redirty_page_for_writepage(struct writeback_control *wbc,
834 struct page *page);
835void account_page_dirtied(struct page *page, struct address_space *mapping);
836int set_page_dirty(struct page *page);
837int set_page_dirty_lock(struct page *page);
838int clear_page_dirty_for_io(struct page *page);
839
840extern unsigned long move_page_tables(struct vm_area_struct *vma,
841 unsigned long old_addr, struct vm_area_struct *new_vma,
842 unsigned long new_addr, unsigned long len);
843extern unsigned long do_mremap(unsigned long addr,
844 unsigned long old_len, unsigned long new_len,
845 unsigned long flags, unsigned long new_addr);
846extern int mprotect_fixup(struct vm_area_struct *vma,
847 struct vm_area_struct **pprev, unsigned long start,
848 unsigned long end, unsigned long newflags);
849
850
851
852
853int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
854 struct page **pages);
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871struct shrinker {
872 int (*shrink)(int nr_to_scan, gfp_t gfp_mask);
873 int seeks;
874
875
876 struct list_head list;
877 long nr;
878};
879#define DEFAULT_SEEKS 2
880extern void register_shrinker(struct shrinker *);
881extern void unregister_shrinker(struct shrinker *);
882
883int vma_wants_writenotify(struct vm_area_struct *vma);
884
885extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
886
887#ifdef __PAGETABLE_PUD_FOLDED
888static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
889 unsigned long address)
890{
891 return 0;
892}
893#else
894int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
895#endif
896
897#ifdef __PAGETABLE_PMD_FOLDED
898static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
899 unsigned long address)
900{
901 return 0;
902}
903#else
904int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
905#endif
906
907int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address);
908int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
909
910
911
912
913
914#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
915static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
916{
917 return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))?
918 NULL: pud_offset(pgd, address);
919}
920
921static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
922{
923 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
924 NULL: pmd_offset(pud, address);
925}
926#endif
927
928#if USE_SPLIT_PTLOCKS
929
930
931
932
933
934
935#define __pte_lockptr(page) &((page)->ptl)
936#define pte_lock_init(_page) do { \
937 spin_lock_init(__pte_lockptr(_page)); \
938} while (0)
939#define pte_lock_deinit(page) ((page)->mapping = NULL)
940#define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));})
941#else
942
943
944
945#define pte_lock_init(page) do {} while (0)
946#define pte_lock_deinit(page) do {} while (0)
947#define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;})
948#endif
949
950static inline void pgtable_page_ctor(struct page *page)
951{
952 pte_lock_init(page);
953 inc_zone_page_state(page, NR_PAGETABLE);
954}
955
956static inline void pgtable_page_dtor(struct page *page)
957{
958 pte_lock_deinit(page);
959 dec_zone_page_state(page, NR_PAGETABLE);
960}
961
962#define pte_offset_map_lock(mm, pmd, address, ptlp) \
963({ \
964 spinlock_t *__ptl = pte_lockptr(mm, pmd); \
965 pte_t *__pte = pte_offset_map(pmd, address); \
966 *(ptlp) = __ptl; \
967 spin_lock(__ptl); \
968 __pte; \
969})
970
971#define pte_unmap_unlock(pte, ptl) do { \
972 spin_unlock(ptl); \
973 pte_unmap(pte); \
974} while (0)
975
976#define pte_alloc_map(mm, pmd, address) \
977 ((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \
978 NULL: pte_offset_map(pmd, address))
979
980#define pte_alloc_map_lock(mm, pmd, address, ptlp) \
981 ((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \
982 NULL: pte_offset_map_lock(mm, pmd, address, ptlp))
983
984#define pte_alloc_kernel(pmd, address) \
985 ((unlikely(!pmd_present(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
986 NULL: pte_offset_kernel(pmd, address))
987
988extern void free_area_init(unsigned long * zones_size);
989extern void free_area_init_node(int nid, unsigned long * zones_size,
990 unsigned long zone_start_pfn, unsigned long *zholes_size);
991#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019extern void free_area_init_nodes(unsigned long *max_zone_pfn);
1020extern void add_active_range(unsigned int nid, unsigned long start_pfn,
1021 unsigned long end_pfn);
1022extern void remove_active_range(unsigned int nid, unsigned long start_pfn,
1023 unsigned long end_pfn);
1024extern void remove_all_active_ranges(void);
1025extern unsigned long absent_pages_in_range(unsigned long start_pfn,
1026 unsigned long end_pfn);
1027extern void get_pfn_range_for_nid(unsigned int nid,
1028 unsigned long *start_pfn, unsigned long *end_pfn);
1029extern unsigned long find_min_pfn_with_active_regions(void);
1030extern void free_bootmem_with_active_regions(int nid,
1031 unsigned long max_low_pfn);
1032typedef int (*work_fn_t)(unsigned long, unsigned long, void *);
1033extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data);
1034extern void sparse_memory_present_with_active_regions(int nid);
1035#endif
1036
1037#if !defined(CONFIG_ARCH_POPULATES_NODE_MAP) && \
1038 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
1039static inline int __early_pfn_to_nid(unsigned long pfn)
1040{
1041 return 0;
1042}
1043#else
1044
1045extern int __meminit early_pfn_to_nid(unsigned long pfn);
1046#ifdef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
1047
1048extern int __meminit __early_pfn_to_nid(unsigned long pfn);
1049#endif
1050#endif
1051
1052extern void set_dma_reserve(unsigned long new_dma_reserve);
1053extern void memmap_init_zone(unsigned long, int, unsigned long,
1054 unsigned long, enum memmap_context);
1055extern void setup_per_zone_wmarks(void);
1056extern void calculate_zone_inactive_ratio(struct zone *zone);
1057extern void mem_init(void);
1058extern void __init mmap_init(void);
1059extern void show_mem(void);
1060extern void si_meminfo(struct sysinfo * val);
1061extern void si_meminfo_node(struct sysinfo *val, int nid);
1062extern int after_bootmem;
1063
1064#ifdef CONFIG_NUMA
1065extern void setup_per_cpu_pageset(void);
1066#else
1067static inline void setup_per_cpu_pageset(void) {}
1068#endif
1069
1070extern void zone_pcp_update(struct zone *zone);
1071
1072
1073extern atomic_long_t mmap_pages_allocated;
1074
1075
1076void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old);
1077void vma_prio_tree_insert(struct vm_area_struct *, struct prio_tree_root *);
1078void vma_prio_tree_remove(struct vm_area_struct *, struct prio_tree_root *);
1079struct vm_area_struct *vma_prio_tree_next(struct vm_area_struct *vma,
1080 struct prio_tree_iter *iter);
1081
1082#define vma_prio_tree_foreach(vma, iter, root, begin, end) \
1083 for (prio_tree_iter_init(iter, root, begin, end), vma = NULL; \
1084 (vma = vma_prio_tree_next(vma, iter)); )
1085
1086static inline void vma_nonlinear_insert(struct vm_area_struct *vma,
1087 struct list_head *list)
1088{
1089 vma->shared.vm_set.parent = NULL;
1090 list_add_tail(&vma->shared.vm_set.list, list);
1091}
1092
1093
1094extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
1095extern void vma_adjust(struct vm_area_struct *vma, unsigned long start,
1096 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert);
1097extern struct vm_area_struct *vma_merge(struct mm_struct *,
1098 struct vm_area_struct *prev, unsigned long addr, unsigned long end,
1099 unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
1100 struct mempolicy *);
1101extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
1102extern int split_vma(struct mm_struct *,
1103 struct vm_area_struct *, unsigned long addr, int new_below);
1104extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
1105extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
1106 struct rb_node **, struct rb_node *);
1107extern void unlink_file_vma(struct vm_area_struct *);
1108extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
1109 unsigned long addr, unsigned long len, pgoff_t pgoff);
1110extern void exit_mmap(struct mm_struct *);
1111
1112extern int mm_take_all_locks(struct mm_struct *mm);
1113extern void mm_drop_all_locks(struct mm_struct *mm);
1114
1115#ifdef CONFIG_PROC_FS
1116
1117extern void added_exe_file_vma(struct mm_struct *mm);
1118extern void removed_exe_file_vma(struct mm_struct *mm);
1119#else
1120static inline void added_exe_file_vma(struct mm_struct *mm)
1121{}
1122
1123static inline void removed_exe_file_vma(struct mm_struct *mm)
1124{}
1125#endif
1126
1127extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
1128extern int install_special_mapping(struct mm_struct *mm,
1129 unsigned long addr, unsigned long len,
1130 unsigned long flags, struct page **pages);
1131
1132extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
1133
1134extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
1135 unsigned long len, unsigned long prot,
1136 unsigned long flag, unsigned long pgoff);
1137extern unsigned long mmap_region(struct file *file, unsigned long addr,
1138 unsigned long len, unsigned long flags,
1139 unsigned int vm_flags, unsigned long pgoff);
1140
1141static inline unsigned long do_mmap(struct file *file, unsigned long addr,
1142 unsigned long len, unsigned long prot,
1143 unsigned long flag, unsigned long offset)
1144{
1145 unsigned long ret = -EINVAL;
1146 if ((offset + PAGE_ALIGN(len)) < offset)
1147 goto out;
1148 if (!(offset & ~PAGE_MASK))
1149 ret = do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
1150out:
1151 return ret;
1152}
1153
1154extern int do_munmap(struct mm_struct *, unsigned long, size_t);
1155
1156extern unsigned long do_brk(unsigned long, unsigned long);
1157
1158
1159extern unsigned long page_unuse(struct page *);
1160extern void truncate_inode_pages(struct address_space *, loff_t);
1161extern void truncate_inode_pages_range(struct address_space *,
1162 loff_t lstart, loff_t lend);
1163
1164
1165extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
1166
1167
1168int write_one_page(struct page *page, int wait);
1169void task_dirty_inc(struct task_struct *tsk);
1170
1171
1172#define VM_MAX_READAHEAD 128
1173#define VM_MIN_READAHEAD 16
1174
1175int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
1176 pgoff_t offset, unsigned long nr_to_read);
1177
1178void page_cache_sync_readahead(struct address_space *mapping,
1179 struct file_ra_state *ra,
1180 struct file *filp,
1181 pgoff_t offset,
1182 unsigned long size);
1183
1184void page_cache_async_readahead(struct address_space *mapping,
1185 struct file_ra_state *ra,
1186 struct file *filp,
1187 struct page *pg,
1188 pgoff_t offset,
1189 unsigned long size);
1190
1191unsigned long max_sane_readahead(unsigned long nr);
1192unsigned long ra_submit(struct file_ra_state *ra,
1193 struct address_space *mapping,
1194 struct file *filp);
1195
1196
1197extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
1198#ifdef CONFIG_IA64
1199extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
1200#endif
1201extern int expand_stack_downwards(struct vm_area_struct *vma,
1202 unsigned long address);
1203
1204
1205extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
1206extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
1207 struct vm_area_struct **pprev);
1208
1209
1210
1211static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
1212{
1213 struct vm_area_struct * vma = find_vma(mm,start_addr);
1214
1215 if (vma && end_addr <= vma->vm_start)
1216 vma = NULL;
1217 return vma;
1218}
1219
1220static inline unsigned long vma_pages(struct vm_area_struct *vma)
1221{
1222 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
1223}
1224
1225pgprot_t vm_get_page_prot(unsigned long vm_flags);
1226struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
1227int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
1228 unsigned long pfn, unsigned long size, pgprot_t);
1229int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
1230int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1231 unsigned long pfn);
1232int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
1233 unsigned long pfn);
1234
1235struct page *follow_page(struct vm_area_struct *, unsigned long address,
1236 unsigned int foll_flags);
1237#define FOLL_WRITE 0x01
1238#define FOLL_TOUCH 0x02
1239#define FOLL_GET 0x04
1240#define FOLL_DUMP 0x08
1241#define FOLL_FORCE 0x10
1242
1243typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
1244 void *data);
1245extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
1246 unsigned long size, pte_fn_t fn, void *data);
1247
1248#ifdef CONFIG_PROC_FS
1249void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
1250#else
1251static inline void vm_stat_account(struct mm_struct *mm,
1252 unsigned long flags, struct file *file, long pages)
1253{
1254}
1255#endif
1256
1257#ifdef CONFIG_DEBUG_PAGEALLOC
1258extern int debug_pagealloc_enabled;
1259
1260extern void kernel_map_pages(struct page *page, int numpages, int enable);
1261
1262static inline void enable_debug_pagealloc(void)
1263{
1264 debug_pagealloc_enabled = 1;
1265}
1266#ifdef CONFIG_HIBERNATION
1267extern bool kernel_page_present(struct page *page);
1268#endif
1269#else
1270static inline void
1271kernel_map_pages(struct page *page, int numpages, int enable) {}
1272static inline void enable_debug_pagealloc(void)
1273{
1274}
1275#ifdef CONFIG_HIBERNATION
1276static inline bool kernel_page_present(struct page *page) { return true; }
1277#endif
1278#endif
1279
1280extern struct vm_area_struct *get_gate_vma(struct task_struct *tsk);
1281#ifdef __HAVE_ARCH_GATE_AREA
1282int in_gate_area_no_task(unsigned long addr);
1283int in_gate_area(struct task_struct *task, unsigned long addr);
1284#else
1285int in_gate_area_no_task(unsigned long addr);
1286#define in_gate_area(task, addr) ({(void)task; in_gate_area_no_task(addr);})
1287#endif
1288
1289int drop_caches_sysctl_handler(struct ctl_table *, int,
1290 void __user *, size_t *, loff_t *);
1291unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
1292 unsigned long lru_pages);
1293
1294#ifndef CONFIG_MMU
1295#define randomize_va_space 0
1296#else
1297extern int randomize_va_space;
1298#endif
1299
1300const char * arch_vma_name(struct vm_area_struct *vma);
1301void print_vma_addr(char *prefix, unsigned long rip);
1302
1303struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
1304pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
1305pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
1306pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
1307pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
1308void *vmemmap_alloc_block(unsigned long size, int node);
1309void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
1310int vmemmap_populate_basepages(struct page *start_page,
1311 unsigned long pages, int node);
1312int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
1313void vmemmap_populate_print_last(void);
1314
1315extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim,
1316 size_t size);
1317extern void refund_locked_memory(struct mm_struct *mm, size_t size);
1318
1319extern void memory_failure(unsigned long pfn, int trapno);
1320extern int __memory_failure(unsigned long pfn, int trapno, int ref);
1321extern int sysctl_memory_failure_early_kill;
1322extern int sysctl_memory_failure_recovery;
1323extern atomic_long_t mce_bad_pages;
1324
1325#endif
1326#endif
1327