1#ifndef _LINUX_MM_H
2#define _LINUX_MM_H
3
4#include <linux/errno.h>
5
6#ifdef __KERNEL__
7
8#include <linux/mmdebug.h>
9#include <linux/gfp.h>
10#include <linux/bug.h>
11#include <linux/list.h>
12#include <linux/mmzone.h>
13#include <linux/rbtree.h>
14#include <linux/atomic.h>
15#include <linux/debug_locks.h>
16#include <linux/mm_types.h>
17#include <linux/range.h>
18#include <linux/pfn.h>
19#include <linux/bit_spinlock.h>
20#include <linux/shrinker.h>
21#include <linux/resource.h>
22#include <linux/page_ext.h>
23
24struct mempolicy;
25struct anon_vma;
26struct anon_vma_chain;
27struct file_ra_state;
28struct user_struct;
29struct writeback_control;
30
31#ifndef CONFIG_NEED_MULTIPLE_NODES
32extern unsigned long max_mapnr;
33
34static inline void set_max_mapnr(unsigned long limit)
35{
36 max_mapnr = limit;
37}
38#else
39static inline void set_max_mapnr(unsigned long limit) { }
40#endif
41
42extern unsigned long totalram_pages;
43extern void * high_memory;
44extern int page_cluster;
45
46#ifdef CONFIG_SYSCTL
47extern int sysctl_legacy_va_layout;
48#else
49#define sysctl_legacy_va_layout 0
50#endif
51
52#include <asm/page.h>
53#include <asm/pgtable.h>
54#include <asm/processor.h>
55
56#ifndef __pa_symbol
57#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
58#endif
59
60
61
62
63
64
65
66
67#ifndef mm_forbids_zeropage
68#define mm_forbids_zeropage(X) (0)
69#endif
70
71extern unsigned long sysctl_user_reserve_kbytes;
72extern unsigned long sysctl_admin_reserve_kbytes;
73
74extern int sysctl_overcommit_memory;
75extern int sysctl_overcommit_ratio;
76extern unsigned long sysctl_overcommit_kbytes;
77
78extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *,
79 size_t *, loff_t *);
80extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
81 size_t *, loff_t *);
82
83#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
84
85
86#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
87
88
89#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)addr, PAGE_SIZE)
90
91
92
93
94
95
96
97
98
99
100extern struct kmem_cache *vm_area_cachep;
101
102#ifndef CONFIG_MMU
103extern struct rb_root nommu_region_tree;
104extern struct rw_semaphore nommu_region_sem;
105
106extern unsigned int kobjsize(const void *objp);
107#endif
108
109
110
111
112#define VM_NONE 0x00000000
113
114#define VM_READ 0x00000001
115#define VM_WRITE 0x00000002
116#define VM_EXEC 0x00000004
117#define VM_SHARED 0x00000008
118
119
120#define VM_MAYREAD 0x00000010
121#define VM_MAYWRITE 0x00000020
122#define VM_MAYEXEC 0x00000040
123#define VM_MAYSHARE 0x00000080
124
125#define VM_GROWSDOWN 0x00000100
126#define VM_PFNMAP 0x00000400
127#define VM_DENYWRITE 0x00000800
128
129#define VM_LOCKED 0x00002000
130#define VM_IO 0x00004000
131
132
133#define VM_SEQ_READ 0x00008000
134#define VM_RAND_READ 0x00010000
135
136#define VM_DONTCOPY 0x00020000
137#define VM_DONTEXPAND 0x00040000
138#define VM_ACCOUNT 0x00100000
139#define VM_NORESERVE 0x00200000
140#define VM_HUGETLB 0x00400000
141#define VM_ARCH_1 0x01000000
142#define VM_ARCH_2 0x02000000
143#define VM_DONTDUMP 0x04000000
144
145#ifdef CONFIG_MEM_SOFT_DIRTY
146# define VM_SOFTDIRTY 0x08000000
147#else
148# define VM_SOFTDIRTY 0
149#endif
150
151#define VM_MIXEDMAP 0x10000000
152#define VM_HUGEPAGE 0x20000000
153#define VM_NOHUGEPAGE 0x40000000
154#define VM_MERGEABLE 0x80000000
155
156#if defined(CONFIG_X86)
157# define VM_PAT VM_ARCH_1
158#elif defined(CONFIG_PPC)
159# define VM_SAO VM_ARCH_1
160#elif defined(CONFIG_PARISC)
161# define VM_GROWSUP VM_ARCH_1
162#elif defined(CONFIG_METAG)
163# define VM_GROWSUP VM_ARCH_1
164#elif defined(CONFIG_IA64)
165# define VM_GROWSUP VM_ARCH_1
166#elif !defined(CONFIG_MMU)
167# define VM_MAPPED_COPY VM_ARCH_1
168#endif
169
170#if defined(CONFIG_X86)
171
172# define VM_MPX VM_ARCH_2
173#endif
174
175#ifndef VM_GROWSUP
176# define VM_GROWSUP VM_NONE
177#endif
178
179
180#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ)
181
182#ifndef VM_STACK_DEFAULT_FLAGS
183#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
184#endif
185
186#ifdef CONFIG_STACK_GROWSUP
187#define VM_STACK_FLAGS (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
188#else
189#define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
190#endif
191
192
193
194
195
196#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
197
198
199#define VM_INIT_DEF_MASK VM_NOHUGEPAGE
200
201
202
203
204
205extern pgprot_t protection_map[16];
206
207#define FAULT_FLAG_WRITE 0x01
208#define FAULT_FLAG_MKWRITE 0x02
209#define FAULT_FLAG_ALLOW_RETRY 0x04
210#define FAULT_FLAG_RETRY_NOWAIT 0x08
211#define FAULT_FLAG_KILLABLE 0x10
212#define FAULT_FLAG_TRIED 0x20
213#define FAULT_FLAG_USER 0x40
214
215
216
217
218
219
220
221
222struct vm_fault {
223 unsigned int flags;
224 pgoff_t pgoff;
225 void __user *virtual_address;
226
227 struct page *cow_page;
228 struct page *page;
229
230
231
232
233
234 pgoff_t max_pgoff;
235
236 pte_t *pte;
237};
238
239
240
241
242
243
244struct vm_operations_struct {
245 void (*open)(struct vm_area_struct * area);
246 void (*close)(struct vm_area_struct * area);
247 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
248 void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf);
249
250
251
252 int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
253
254
255 int (*pfn_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
256
257
258
259
260 int (*access)(struct vm_area_struct *vma, unsigned long addr,
261 void *buf, int len, int write);
262
263
264
265
266 const char *(*name)(struct vm_area_struct *vma);
267
268#ifdef CONFIG_NUMA
269
270
271
272
273
274
275
276 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
277
278
279
280
281
282
283
284
285
286
287
288 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
289 unsigned long addr);
290#endif
291
292
293
294
295
296 struct page *(*find_special_page)(struct vm_area_struct *vma,
297 unsigned long addr);
298};
299
300struct mmu_gather;
301struct inode;
302
303#define page_private(page) ((page)->private)
304#define set_page_private(page, v) ((page)->private = (v))
305
306
307static inline void set_freepage_migratetype(struct page *page, int migratetype)
308{
309 page->index = migratetype;
310}
311
312
313static inline int get_freepage_migratetype(struct page *page)
314{
315 return page->index;
316}
317
318
319
320
321
322#include <linux/page-flags.h>
323#include <linux/huge_mm.h>
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341static inline int put_page_testzero(struct page *page)
342{
343 VM_BUG_ON_PAGE(atomic_read(&page->_count) == 0, page);
344 return atomic_dec_and_test(&page->_count);
345}
346
347
348
349
350
351
352
353static inline int get_page_unless_zero(struct page *page)
354{
355 return atomic_inc_not_zero(&page->_count);
356}
357
358
359
360
361
362
363
364
365static inline int put_page_unless_one(struct page *page)
366{
367 return atomic_add_unless(&page->_count, -1, 1);
368}
369
370extern int page_is_ram(unsigned long pfn);
371extern int region_is_ram(resource_size_t phys_addr, unsigned long size);
372
373
374struct page *vmalloc_to_page(const void *addr);
375unsigned long vmalloc_to_pfn(const void *addr);
376
377
378
379
380
381
382
383static inline int is_vmalloc_addr(const void *x)
384{
385#ifdef CONFIG_MMU
386 unsigned long addr = (unsigned long)x;
387
388 return addr >= VMALLOC_START && addr < VMALLOC_END;
389#else
390 return 0;
391#endif
392}
393#ifdef CONFIG_MMU
394extern int is_vmalloc_or_module_addr(const void *x);
395#else
396static inline int is_vmalloc_or_module_addr(const void *x)
397{
398 return 0;
399}
400#endif
401
402extern void kvfree(const void *addr);
403
404static inline void compound_lock(struct page *page)
405{
406#ifdef CONFIG_TRANSPARENT_HUGEPAGE
407 VM_BUG_ON_PAGE(PageSlab(page), page);
408 bit_spin_lock(PG_compound_lock, &page->flags);
409#endif
410}
411
412static inline void compound_unlock(struct page *page)
413{
414#ifdef CONFIG_TRANSPARENT_HUGEPAGE
415 VM_BUG_ON_PAGE(PageSlab(page), page);
416 bit_spin_unlock(PG_compound_lock, &page->flags);
417#endif
418}
419
420static inline unsigned long compound_lock_irqsave(struct page *page)
421{
422 unsigned long uninitialized_var(flags);
423#ifdef CONFIG_TRANSPARENT_HUGEPAGE
424 local_irq_save(flags);
425 compound_lock(page);
426#endif
427 return flags;
428}
429
430static inline void compound_unlock_irqrestore(struct page *page,
431 unsigned long flags)
432{
433#ifdef CONFIG_TRANSPARENT_HUGEPAGE
434 compound_unlock(page);
435 local_irq_restore(flags);
436#endif
437}
438
439static inline struct page *compound_head_by_tail(struct page *tail)
440{
441 struct page *head = tail->first_page;
442
443
444
445
446
447
448 smp_rmb();
449 if (likely(PageTail(tail)))
450 return head;
451 return tail;
452}
453
454
455
456
457
458
459
460static inline struct page *compound_head(struct page *page)
461{
462 if (unlikely(PageTail(page)))
463 return compound_head_by_tail(page);
464 return page;
465}
466
467
468
469
470
471
472static inline struct page *compound_head_fast(struct page *page)
473{
474 if (unlikely(PageTail(page)))
475 return page->first_page;
476 return page;
477}
478
479
480
481
482
483
484static inline void page_mapcount_reset(struct page *page)
485{
486 atomic_set(&(page)->_mapcount, -1);
487}
488
489static inline int page_mapcount(struct page *page)
490{
491 VM_BUG_ON_PAGE(PageSlab(page), page);
492 return atomic_read(&page->_mapcount) + 1;
493}
494
495static inline int page_count(struct page *page)
496{
497 return atomic_read(&compound_head(page)->_count);
498}
499
500static inline bool __compound_tail_refcounted(struct page *page)
501{
502 return !PageSlab(page) && !PageHeadHuge(page);
503}
504
505
506
507
508
509
510
511
512
513static inline bool compound_tail_refcounted(struct page *page)
514{
515 VM_BUG_ON_PAGE(!PageHead(page), page);
516 return __compound_tail_refcounted(page);
517}
518
519static inline void get_huge_page_tail(struct page *page)
520{
521
522
523
524 VM_BUG_ON_PAGE(!PageTail(page), page);
525 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
526 VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page);
527 if (compound_tail_refcounted(page->first_page))
528 atomic_inc(&page->_mapcount);
529}
530
531extern bool __get_page_tail(struct page *page);
532
533static inline void get_page(struct page *page)
534{
535 if (unlikely(PageTail(page)))
536 if (likely(__get_page_tail(page)))
537 return;
538
539
540
541
542 VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page);
543 atomic_inc(&page->_count);
544}
545
546static inline struct page *virt_to_head_page(const void *x)
547{
548 struct page *page = virt_to_page(x);
549
550
551
552
553
554
555
556 return compound_head_fast(page);
557}
558
559
560
561
562
563static inline void init_page_count(struct page *page)
564{
565 atomic_set(&page->_count, 1);
566}
567
568void put_page(struct page *page);
569void put_pages_list(struct list_head *pages);
570
571void split_page(struct page *page, unsigned int order);
572int split_free_page(struct page *page);
573
574
575
576
577
578
579
580static inline void set_compound_page_dtor(struct page *page,
581 compound_page_dtor *dtor)
582{
583 page[1].compound_dtor = dtor;
584}
585
586static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
587{
588 return page[1].compound_dtor;
589}
590
591static inline int compound_order(struct page *page)
592{
593 if (!PageHead(page))
594 return 0;
595 return page[1].compound_order;
596}
597
598static inline void set_compound_order(struct page *page, unsigned long order)
599{
600 page[1].compound_order = order;
601}
602
603#ifdef CONFIG_MMU
604
605
606
607
608
609
610static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
611{
612 if (likely(vma->vm_flags & VM_WRITE))
613 pte = pte_mkwrite(pte);
614 return pte;
615}
616
617void do_set_pte(struct vm_area_struct *vma, unsigned long address,
618 struct page *page, pte_t *pte, bool write, bool anon);
619#endif
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
688#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
689#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
690#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
691
692
693
694
695
696
697#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
698#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
699#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
700#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
701
702
703#ifdef NODE_NOT_IN_PAGE_FLAGS
704#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
705#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \
706 SECTIONS_PGOFF : ZONES_PGOFF)
707#else
708#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
709#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \
710 NODES_PGOFF : ZONES_PGOFF)
711#endif
712
713#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
714
715#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
716#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
717#endif
718
719#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
720#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
721#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
722#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
723#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
724
725static inline enum zone_type page_zonenum(const struct page *page)
726{
727 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
728}
729
730#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
731#define SECTION_IN_PAGE_FLAGS
732#endif
733
734
735
736
737
738
739
740
741
742static inline int page_zone_id(struct page *page)
743{
744 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
745}
746
747static inline int zone_to_nid(struct zone *zone)
748{
749#ifdef CONFIG_NUMA
750 return zone->node;
751#else
752 return 0;
753#endif
754}
755
756#ifdef NODE_NOT_IN_PAGE_FLAGS
757extern int page_to_nid(const struct page *page);
758#else
759static inline int page_to_nid(const struct page *page)
760{
761 return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
762}
763#endif
764
765#ifdef CONFIG_NUMA_BALANCING
766static inline int cpu_pid_to_cpupid(int cpu, int pid)
767{
768 return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
769}
770
771static inline int cpupid_to_pid(int cpupid)
772{
773 return cpupid & LAST__PID_MASK;
774}
775
776static inline int cpupid_to_cpu(int cpupid)
777{
778 return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
779}
780
781static inline int cpupid_to_nid(int cpupid)
782{
783 return cpu_to_node(cpupid_to_cpu(cpupid));
784}
785
786static inline bool cpupid_pid_unset(int cpupid)
787{
788 return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
789}
790
791static inline bool cpupid_cpu_unset(int cpupid)
792{
793 return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
794}
795
796static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
797{
798 return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
799}
800
801#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
802#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
803static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
804{
805 return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
806}
807
808static inline int page_cpupid_last(struct page *page)
809{
810 return page->_last_cpupid;
811}
812static inline void page_cpupid_reset_last(struct page *page)
813{
814 page->_last_cpupid = -1 & LAST_CPUPID_MASK;
815}
816#else
817static inline int page_cpupid_last(struct page *page)
818{
819 return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
820}
821
822extern int page_cpupid_xchg_last(struct page *page, int cpupid);
823
824static inline void page_cpupid_reset_last(struct page *page)
825{
826 int cpupid = (1 << LAST_CPUPID_SHIFT) - 1;
827
828 page->flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
829 page->flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
830}
831#endif
832#else
833static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
834{
835 return page_to_nid(page);
836}
837
838static inline int page_cpupid_last(struct page *page)
839{
840 return page_to_nid(page);
841}
842
843static inline int cpupid_to_nid(int cpupid)
844{
845 return -1;
846}
847
848static inline int cpupid_to_pid(int cpupid)
849{
850 return -1;
851}
852
853static inline int cpupid_to_cpu(int cpupid)
854{
855 return -1;
856}
857
858static inline int cpu_pid_to_cpupid(int nid, int pid)
859{
860 return -1;
861}
862
863static inline bool cpupid_pid_unset(int cpupid)
864{
865 return 1;
866}
867
868static inline void page_cpupid_reset_last(struct page *page)
869{
870}
871
872static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
873{
874 return false;
875}
876#endif
877
878static inline struct zone *page_zone(const struct page *page)
879{
880 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
881}
882
883#ifdef SECTION_IN_PAGE_FLAGS
884static inline void set_page_section(struct page *page, unsigned long section)
885{
886 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
887 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
888}
889
890static inline unsigned long page_to_section(const struct page *page)
891{
892 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
893}
894#endif
895
896static inline void set_page_zone(struct page *page, enum zone_type zone)
897{
898 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
899 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
900}
901
902static inline void set_page_node(struct page *page, unsigned long node)
903{
904 page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
905 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
906}
907
908static inline void set_page_links(struct page *page, enum zone_type zone,
909 unsigned long node, unsigned long pfn)
910{
911 set_page_zone(page, zone);
912 set_page_node(page, node);
913#ifdef SECTION_IN_PAGE_FLAGS
914 set_page_section(page, pfn_to_section_nr(pfn));
915#endif
916}
917
918
919
920
921#include <linux/vmstat.h>
922
923static __always_inline void *lowmem_page_address(const struct page *page)
924{
925 return __va(PFN_PHYS(page_to_pfn(page)));
926}
927
928#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
929#define HASHED_PAGE_VIRTUAL
930#endif
931
932#if defined(WANT_PAGE_VIRTUAL)
933static inline void *page_address(const struct page *page)
934{
935 return page->virtual;
936}
937static inline void set_page_address(struct page *page, void *address)
938{
939 page->virtual = address;
940}
941#define page_address_init() do { } while(0)
942#endif
943
944#if defined(HASHED_PAGE_VIRTUAL)
945void *page_address(const struct page *page);
946void set_page_address(struct page *page, void *virtual);
947void page_address_init(void);
948#endif
949
950#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
951#define page_address(page) lowmem_page_address(page)
952#define set_page_address(page, address) do { } while(0)
953#define page_address_init() do { } while(0)
954#endif
955
956extern void *page_rmapping(struct page *page);
957extern struct anon_vma *page_anon_vma(struct page *page);
958extern struct address_space *page_mapping(struct page *page);
959
960extern struct address_space *__page_file_mapping(struct page *);
961
962static inline
963struct address_space *page_file_mapping(struct page *page)
964{
965 if (unlikely(PageSwapCache(page)))
966 return __page_file_mapping(page);
967
968 return page->mapping;
969}
970
971
972
973
974
975static inline pgoff_t page_index(struct page *page)
976{
977 if (unlikely(PageSwapCache(page)))
978 return page_private(page);
979 return page->index;
980}
981
982extern pgoff_t __page_file_index(struct page *page);
983
984
985
986
987
988static inline pgoff_t page_file_index(struct page *page)
989{
990 if (unlikely(PageSwapCache(page)))
991 return __page_file_index(page);
992
993 return page->index;
994}
995
996
997
998
999static inline int page_mapped(struct page *page)
1000{
1001 return atomic_read(&(page)->_mapcount) >= 0;
1002}
1003
1004
1005
1006
1007
1008
1009
1010#define VM_FAULT_MINOR 0
1011
1012#define VM_FAULT_OOM 0x0001
1013#define VM_FAULT_SIGBUS 0x0002
1014#define VM_FAULT_MAJOR 0x0004
1015#define VM_FAULT_WRITE 0x0008
1016#define VM_FAULT_HWPOISON 0x0010
1017#define VM_FAULT_HWPOISON_LARGE 0x0020
1018#define VM_FAULT_SIGSEGV 0x0040
1019
1020#define VM_FAULT_NOPAGE 0x0100
1021#define VM_FAULT_LOCKED 0x0200
1022#define VM_FAULT_RETRY 0x0400
1023#define VM_FAULT_FALLBACK 0x0800
1024
1025#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000
1026
1027#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \
1028 VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \
1029 VM_FAULT_FALLBACK)
1030
1031
1032#define VM_FAULT_SET_HINDEX(x) ((x) << 12)
1033#define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf)
1034
1035
1036
1037
1038extern void pagefault_out_of_memory(void);
1039
1040#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
1041
1042
1043
1044
1045
1046#define SHOW_MEM_FILTER_NODES (0x0001u)
1047
1048extern void show_free_areas(unsigned int flags);
1049extern bool skip_free_areas_node(unsigned int flags, int nid);
1050
1051int shmem_zero_setup(struct vm_area_struct *);
1052#ifdef CONFIG_SHMEM
1053bool shmem_mapping(struct address_space *mapping);
1054#else
1055static inline bool shmem_mapping(struct address_space *mapping)
1056{
1057 return false;
1058}
1059#endif
1060
1061extern int can_do_mlock(void);
1062extern int user_shm_lock(size_t, struct user_struct *);
1063extern void user_shm_unlock(size_t, struct user_struct *);
1064
1065
1066
1067
1068struct zap_details {
1069 struct address_space *check_mapping;
1070 pgoff_t first_index;
1071 pgoff_t last_index;
1072};
1073
1074struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
1075 pte_t pte);
1076
1077int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1078 unsigned long size);
1079void zap_page_range(struct vm_area_struct *vma, unsigned long address,
1080 unsigned long size, struct zap_details *);
1081void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
1082 unsigned long start, unsigned long end);
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104struct mm_walk {
1105 int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
1106 unsigned long next, struct mm_walk *walk);
1107 int (*pte_entry)(pte_t *pte, unsigned long addr,
1108 unsigned long next, struct mm_walk *walk);
1109 int (*pte_hole)(unsigned long addr, unsigned long next,
1110 struct mm_walk *walk);
1111 int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
1112 unsigned long addr, unsigned long next,
1113 struct mm_walk *walk);
1114 int (*test_walk)(unsigned long addr, unsigned long next,
1115 struct mm_walk *walk);
1116 struct mm_struct *mm;
1117 struct vm_area_struct *vma;
1118 void *private;
1119};
1120
1121int walk_page_range(unsigned long addr, unsigned long end,
1122 struct mm_walk *walk);
1123int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk);
1124void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
1125 unsigned long end, unsigned long floor, unsigned long ceiling);
1126int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
1127 struct vm_area_struct *vma);
1128void unmap_mapping_range(struct address_space *mapping,
1129 loff_t const holebegin, loff_t const holelen, int even_cows);
1130int follow_pfn(struct vm_area_struct *vma, unsigned long address,
1131 unsigned long *pfn);
1132int follow_phys(struct vm_area_struct *vma, unsigned long address,
1133 unsigned int flags, unsigned long *prot, resource_size_t *phys);
1134int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
1135 void *buf, int len, int write);
1136
1137static inline void unmap_shared_mapping_range(struct address_space *mapping,
1138 loff_t const holebegin, loff_t const holelen)
1139{
1140 unmap_mapping_range(mapping, holebegin, holelen, 0);
1141}
1142
1143extern void truncate_pagecache(struct inode *inode, loff_t new);
1144extern void truncate_setsize(struct inode *inode, loff_t newsize);
1145void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
1146void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
1147int truncate_inode_page(struct address_space *mapping, struct page *page);
1148int generic_error_remove_page(struct address_space *mapping, struct page *page);
1149int invalidate_inode_page(struct page *page);
1150
1151#ifdef CONFIG_MMU
1152extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
1153 unsigned long address, unsigned int flags);
1154extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
1155 unsigned long address, unsigned int fault_flags);
1156#else
1157static inline int handle_mm_fault(struct mm_struct *mm,
1158 struct vm_area_struct *vma, unsigned long address,
1159 unsigned int flags)
1160{
1161
1162 BUG();
1163 return VM_FAULT_SIGBUS;
1164}
1165static inline int fixup_user_fault(struct task_struct *tsk,
1166 struct mm_struct *mm, unsigned long address,
1167 unsigned int fault_flags)
1168{
1169
1170 BUG();
1171 return -EFAULT;
1172}
1173#endif
1174
1175extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
1176extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1177 void *buf, int len, int write);
1178
1179long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1180 unsigned long start, unsigned long nr_pages,
1181 unsigned int foll_flags, struct page **pages,
1182 struct vm_area_struct **vmas, int *nonblocking);
1183long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1184 unsigned long start, unsigned long nr_pages,
1185 int write, int force, struct page **pages,
1186 struct vm_area_struct **vmas);
1187long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm,
1188 unsigned long start, unsigned long nr_pages,
1189 int write, int force, struct page **pages,
1190 int *locked);
1191long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
1192 unsigned long start, unsigned long nr_pages,
1193 int write, int force, struct page **pages,
1194 unsigned int gup_flags);
1195long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
1196 unsigned long start, unsigned long nr_pages,
1197 int write, int force, struct page **pages);
1198int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1199 struct page **pages);
1200struct kvec;
1201int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
1202 struct page **pages);
1203int get_kernel_page(unsigned long start, int write, struct page **pages);
1204struct page *get_dump_page(unsigned long addr);
1205
1206extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
1207extern void do_invalidatepage(struct page *page, unsigned int offset,
1208 unsigned int length);
1209
1210int __set_page_dirty_nobuffers(struct page *page);
1211int __set_page_dirty_no_writeback(struct page *page);
1212int redirty_page_for_writepage(struct writeback_control *wbc,
1213 struct page *page);
1214void account_page_dirtied(struct page *page, struct address_space *mapping);
1215void account_page_cleaned(struct page *page, struct address_space *mapping);
1216int set_page_dirty(struct page *page);
1217int set_page_dirty_lock(struct page *page);
1218int clear_page_dirty_for_io(struct page *page);
1219
1220int get_cmdline(struct task_struct *task, char *buffer, int buflen);
1221
1222
1223static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
1224{
1225 return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
1226}
1227
1228static inline int stack_guard_page_start(struct vm_area_struct *vma,
1229 unsigned long addr)
1230{
1231 return (vma->vm_flags & VM_GROWSDOWN) &&
1232 (vma->vm_start == addr) &&
1233 !vma_growsdown(vma->vm_prev, addr);
1234}
1235
1236
1237static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
1238{
1239 return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
1240}
1241
1242static inline int stack_guard_page_end(struct vm_area_struct *vma,
1243 unsigned long addr)
1244{
1245 return (vma->vm_flags & VM_GROWSUP) &&
1246 (vma->vm_end == addr) &&
1247 !vma_growsup(vma->vm_next, addr);
1248}
1249
1250extern struct task_struct *task_of_stack(struct task_struct *task,
1251 struct vm_area_struct *vma, bool in_group);
1252
1253extern unsigned long move_page_tables(struct vm_area_struct *vma,
1254 unsigned long old_addr, struct vm_area_struct *new_vma,
1255 unsigned long new_addr, unsigned long len,
1256 bool need_rmap_locks);
1257extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
1258 unsigned long end, pgprot_t newprot,
1259 int dirty_accountable, int prot_numa);
1260extern int mprotect_fixup(struct vm_area_struct *vma,
1261 struct vm_area_struct **pprev, unsigned long start,
1262 unsigned long end, unsigned long newflags);
1263
1264
1265
1266
1267int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1268 struct page **pages);
1269
1270
1271
1272static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
1273{
1274 long val = atomic_long_read(&mm->rss_stat.count[member]);
1275
1276#ifdef SPLIT_RSS_COUNTING
1277
1278
1279
1280
1281 if (val < 0)
1282 val = 0;
1283#endif
1284 return (unsigned long)val;
1285}
1286
1287static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1288{
1289 atomic_long_add(value, &mm->rss_stat.count[member]);
1290}
1291
1292static inline void inc_mm_counter(struct mm_struct *mm, int member)
1293{
1294 atomic_long_inc(&mm->rss_stat.count[member]);
1295}
1296
1297static inline void dec_mm_counter(struct mm_struct *mm, int member)
1298{
1299 atomic_long_dec(&mm->rss_stat.count[member]);
1300}
1301
1302static inline unsigned long get_mm_rss(struct mm_struct *mm)
1303{
1304 return get_mm_counter(mm, MM_FILEPAGES) +
1305 get_mm_counter(mm, MM_ANONPAGES);
1306}
1307
1308static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
1309{
1310 return max(mm->hiwater_rss, get_mm_rss(mm));
1311}
1312
1313static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
1314{
1315 return max(mm->hiwater_vm, mm->total_vm);
1316}
1317
1318static inline void update_hiwater_rss(struct mm_struct *mm)
1319{
1320 unsigned long _rss = get_mm_rss(mm);
1321
1322 if ((mm)->hiwater_rss < _rss)
1323 (mm)->hiwater_rss = _rss;
1324}
1325
1326static inline void update_hiwater_vm(struct mm_struct *mm)
1327{
1328 if (mm->hiwater_vm < mm->total_vm)
1329 mm->hiwater_vm = mm->total_vm;
1330}
1331
1332static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
1333{
1334 mm->hiwater_rss = get_mm_rss(mm);
1335}
1336
1337static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
1338 struct mm_struct *mm)
1339{
1340 unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
1341
1342 if (*maxrss < hiwater_rss)
1343 *maxrss = hiwater_rss;
1344}
1345
1346#if defined(SPLIT_RSS_COUNTING)
1347void sync_mm_rss(struct mm_struct *mm);
1348#else
1349static inline void sync_mm_rss(struct mm_struct *mm)
1350{
1351}
1352#endif
1353
1354int vma_wants_writenotify(struct vm_area_struct *vma);
1355
1356extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1357 spinlock_t **ptl);
1358static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
1359 spinlock_t **ptl)
1360{
1361 pte_t *ptep;
1362 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
1363 return ptep;
1364}
1365
1366#ifdef __PAGETABLE_PUD_FOLDED
1367static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
1368 unsigned long address)
1369{
1370 return 0;
1371}
1372#else
1373int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
1374#endif
1375
1376#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
1377static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
1378 unsigned long address)
1379{
1380 return 0;
1381}
1382
1383static inline void mm_nr_pmds_init(struct mm_struct *mm) {}
1384
1385static inline unsigned long mm_nr_pmds(struct mm_struct *mm)
1386{
1387 return 0;
1388}
1389
1390static inline void mm_inc_nr_pmds(struct mm_struct *mm) {}
1391static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
1392
1393#else
1394int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
1395
1396static inline void mm_nr_pmds_init(struct mm_struct *mm)
1397{
1398 atomic_long_set(&mm->nr_pmds, 0);
1399}
1400
1401static inline unsigned long mm_nr_pmds(struct mm_struct *mm)
1402{
1403 return atomic_long_read(&mm->nr_pmds);
1404}
1405
1406static inline void mm_inc_nr_pmds(struct mm_struct *mm)
1407{
1408 atomic_long_inc(&mm->nr_pmds);
1409}
1410
1411static inline void mm_dec_nr_pmds(struct mm_struct *mm)
1412{
1413 atomic_long_dec(&mm->nr_pmds);
1414}
1415#endif
1416
1417int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
1418 pmd_t *pmd, unsigned long address);
1419int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
1420
1421
1422
1423
1424
1425#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
1426static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
1427{
1428 return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))?
1429 NULL: pud_offset(pgd, address);
1430}
1431
1432static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
1433{
1434 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
1435 NULL: pmd_offset(pud, address);
1436}
1437#endif
1438
1439#if USE_SPLIT_PTE_PTLOCKS
1440#if ALLOC_SPLIT_PTLOCKS
1441void __init ptlock_cache_init(void);
1442extern bool ptlock_alloc(struct page *page);
1443extern void ptlock_free(struct page *page);
1444
1445static inline spinlock_t *ptlock_ptr(struct page *page)
1446{
1447 return page->ptl;
1448}
1449#else
1450static inline void ptlock_cache_init(void)
1451{
1452}
1453
1454static inline bool ptlock_alloc(struct page *page)
1455{
1456 return true;
1457}
1458
1459static inline void ptlock_free(struct page *page)
1460{
1461}
1462
1463static inline spinlock_t *ptlock_ptr(struct page *page)
1464{
1465 return &page->ptl;
1466}
1467#endif
1468
1469static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1470{
1471 return ptlock_ptr(pmd_page(*pmd));
1472}
1473
1474static inline bool ptlock_init(struct page *page)
1475{
1476
1477
1478
1479
1480
1481
1482
1483
1484 VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
1485 if (!ptlock_alloc(page))
1486 return false;
1487 spin_lock_init(ptlock_ptr(page));
1488 return true;
1489}
1490
1491
1492static inline void pte_lock_deinit(struct page *page)
1493{
1494 page->mapping = NULL;
1495 ptlock_free(page);
1496}
1497
1498#else
1499
1500
1501
1502static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1503{
1504 return &mm->page_table_lock;
1505}
1506static inline void ptlock_cache_init(void) {}
1507static inline bool ptlock_init(struct page *page) { return true; }
1508static inline void pte_lock_deinit(struct page *page) {}
1509#endif
1510
1511static inline void pgtable_init(void)
1512{
1513 ptlock_cache_init();
1514 pgtable_cache_init();
1515}
1516
1517static inline bool pgtable_page_ctor(struct page *page)
1518{
1519 inc_zone_page_state(page, NR_PAGETABLE);
1520 return ptlock_init(page);
1521}
1522
1523static inline void pgtable_page_dtor(struct page *page)
1524{
1525 pte_lock_deinit(page);
1526 dec_zone_page_state(page, NR_PAGETABLE);
1527}
1528
1529#define pte_offset_map_lock(mm, pmd, address, ptlp) \
1530({ \
1531 spinlock_t *__ptl = pte_lockptr(mm, pmd); \
1532 pte_t *__pte = pte_offset_map(pmd, address); \
1533 *(ptlp) = __ptl; \
1534 spin_lock(__ptl); \
1535 __pte; \
1536})
1537
1538#define pte_unmap_unlock(pte, ptl) do { \
1539 spin_unlock(ptl); \
1540 pte_unmap(pte); \
1541} while (0)
1542
1543#define pte_alloc_map(mm, vma, pmd, address) \
1544 ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, vma, \
1545 pmd, address))? \
1546 NULL: pte_offset_map(pmd, address))
1547
1548#define pte_alloc_map_lock(mm, pmd, address, ptlp) \
1549 ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, NULL, \
1550 pmd, address))? \
1551 NULL: pte_offset_map_lock(mm, pmd, address, ptlp))
1552
1553#define pte_alloc_kernel(pmd, address) \
1554 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
1555 NULL: pte_offset_kernel(pmd, address))
1556
1557#if USE_SPLIT_PMD_PTLOCKS
1558
1559static struct page *pmd_to_page(pmd_t *pmd)
1560{
1561 unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
1562 return virt_to_page((void *)((unsigned long) pmd & mask));
1563}
1564
1565static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
1566{
1567 return ptlock_ptr(pmd_to_page(pmd));
1568}
1569
1570static inline bool pgtable_pmd_page_ctor(struct page *page)
1571{
1572#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1573 page->pmd_huge_pte = NULL;
1574#endif
1575 return ptlock_init(page);
1576}
1577
1578static inline void pgtable_pmd_page_dtor(struct page *page)
1579{
1580#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1581 VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
1582#endif
1583 ptlock_free(page);
1584}
1585
1586#define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte)
1587
1588#else
1589
1590static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
1591{
1592 return &mm->page_table_lock;
1593}
1594
1595static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; }
1596static inline void pgtable_pmd_page_dtor(struct page *page) {}
1597
1598#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
1599
1600#endif
1601
1602static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
1603{
1604 spinlock_t *ptl = pmd_lockptr(mm, pmd);
1605 spin_lock(ptl);
1606 return ptl;
1607}
1608
1609extern void free_area_init(unsigned long * zones_size);
1610extern void free_area_init_node(int nid, unsigned long * zones_size,
1611 unsigned long zone_start_pfn, unsigned long *zholes_size);
1612extern void free_initmem(void);
1613
1614
1615
1616
1617
1618
1619
1620extern unsigned long free_reserved_area(void *start, void *end,
1621 int poison, char *s);
1622
1623#ifdef CONFIG_HIGHMEM
1624
1625
1626
1627
1628extern void free_highmem_page(struct page *page);
1629#endif
1630
1631extern void adjust_managed_page_count(struct page *page, long count);
1632extern void mem_init_print_info(const char *str);
1633
1634
1635static inline void __free_reserved_page(struct page *page)
1636{
1637 ClearPageReserved(page);
1638 init_page_count(page);
1639 __free_page(page);
1640}
1641
1642static inline void free_reserved_page(struct page *page)
1643{
1644 __free_reserved_page(page);
1645 adjust_managed_page_count(page, 1);
1646}
1647
1648static inline void mark_page_reserved(struct page *page)
1649{
1650 SetPageReserved(page);
1651 adjust_managed_page_count(page, -1);
1652}
1653
1654
1655
1656
1657
1658
1659
1660static inline unsigned long free_initmem_default(int poison)
1661{
1662 extern char __init_begin[], __init_end[];
1663
1664 return free_reserved_area(&__init_begin, &__init_end,
1665 poison, "unused kernel");
1666}
1667
1668static inline unsigned long get_num_physpages(void)
1669{
1670 int nid;
1671 unsigned long phys_pages = 0;
1672
1673 for_each_online_node(nid)
1674 phys_pages += node_present_pages(nid);
1675
1676 return phys_pages;
1677}
1678
1679#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706extern void free_area_init_nodes(unsigned long *max_zone_pfn);
1707unsigned long node_map_pfn_alignment(void);
1708unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
1709 unsigned long end_pfn);
1710extern unsigned long absent_pages_in_range(unsigned long start_pfn,
1711 unsigned long end_pfn);
1712extern void get_pfn_range_for_nid(unsigned int nid,
1713 unsigned long *start_pfn, unsigned long *end_pfn);
1714extern unsigned long find_min_pfn_with_active_regions(void);
1715extern void free_bootmem_with_active_regions(int nid,
1716 unsigned long max_low_pfn);
1717extern void sparse_memory_present_with_active_regions(int nid);
1718
1719#endif
1720
1721#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
1722 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
1723static inline int __early_pfn_to_nid(unsigned long pfn)
1724{
1725 return 0;
1726}
1727#else
1728
1729extern int __meminit early_pfn_to_nid(unsigned long pfn);
1730
1731extern int __meminit __early_pfn_to_nid(unsigned long pfn);
1732#endif
1733
1734extern void set_dma_reserve(unsigned long new_dma_reserve);
1735extern void memmap_init_zone(unsigned long, int, unsigned long,
1736 unsigned long, enum memmap_context);
1737extern void setup_per_zone_wmarks(void);
1738extern int __meminit init_per_zone_wmark_min(void);
1739extern void mem_init(void);
1740extern void __init mmap_init(void);
1741extern void show_mem(unsigned int flags);
1742extern void si_meminfo(struct sysinfo * val);
1743extern void si_meminfo_node(struct sysinfo *val, int nid);
1744
1745extern __printf(3, 4)
1746void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...);
1747
1748extern void setup_per_cpu_pageset(void);
1749
1750extern void zone_pcp_update(struct zone *zone);
1751extern void zone_pcp_reset(struct zone *zone);
1752
1753
1754extern int min_free_kbytes;
1755
1756
1757extern atomic_long_t mmap_pages_allocated;
1758extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
1759
1760
1761void vma_interval_tree_insert(struct vm_area_struct *node,
1762 struct rb_root *root);
1763void vma_interval_tree_insert_after(struct vm_area_struct *node,
1764 struct vm_area_struct *prev,
1765 struct rb_root *root);
1766void vma_interval_tree_remove(struct vm_area_struct *node,
1767 struct rb_root *root);
1768struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root *root,
1769 unsigned long start, unsigned long last);
1770struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
1771 unsigned long start, unsigned long last);
1772
1773#define vma_interval_tree_foreach(vma, root, start, last) \
1774 for (vma = vma_interval_tree_iter_first(root, start, last); \
1775 vma; vma = vma_interval_tree_iter_next(vma, start, last))
1776
1777void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
1778 struct rb_root *root);
1779void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
1780 struct rb_root *root);
1781struct anon_vma_chain *anon_vma_interval_tree_iter_first(
1782 struct rb_root *root, unsigned long start, unsigned long last);
1783struct anon_vma_chain *anon_vma_interval_tree_iter_next(
1784 struct anon_vma_chain *node, unsigned long start, unsigned long last);
1785#ifdef CONFIG_DEBUG_VM_RB
1786void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
1787#endif
1788
1789#define anon_vma_interval_tree_foreach(avc, root, start, last) \
1790 for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
1791 avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
1792
1793
1794extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
1795extern int vma_adjust(struct vm_area_struct *vma, unsigned long start,
1796 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert);
1797extern struct vm_area_struct *vma_merge(struct mm_struct *,
1798 struct vm_area_struct *prev, unsigned long addr, unsigned long end,
1799 unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
1800 struct mempolicy *);
1801extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
1802extern int split_vma(struct mm_struct *,
1803 struct vm_area_struct *, unsigned long addr, int new_below);
1804extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
1805extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
1806 struct rb_node **, struct rb_node *);
1807extern void unlink_file_vma(struct vm_area_struct *);
1808extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
1809 unsigned long addr, unsigned long len, pgoff_t pgoff,
1810 bool *need_rmap_locks);
1811extern void exit_mmap(struct mm_struct *);
1812
1813static inline int check_data_rlimit(unsigned long rlim,
1814 unsigned long new,
1815 unsigned long start,
1816 unsigned long end_data,
1817 unsigned long start_data)
1818{
1819 if (rlim < RLIM_INFINITY) {
1820 if (((new - start) + (end_data - start_data)) > rlim)
1821 return -ENOSPC;
1822 }
1823
1824 return 0;
1825}
1826
1827extern int mm_take_all_locks(struct mm_struct *mm);
1828extern void mm_drop_all_locks(struct mm_struct *mm);
1829
1830extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
1831extern struct file *get_mm_exe_file(struct mm_struct *mm);
1832
1833extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
1834extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
1835 unsigned long addr, unsigned long len,
1836 unsigned long flags,
1837 const struct vm_special_mapping *spec);
1838
1839extern int install_special_mapping(struct mm_struct *mm,
1840 unsigned long addr, unsigned long len,
1841 unsigned long flags, struct page **pages);
1842
1843extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
1844
1845extern unsigned long mmap_region(struct file *file, unsigned long addr,
1846 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
1847extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
1848 unsigned long len, unsigned long prot, unsigned long flags,
1849 unsigned long pgoff, unsigned long *populate);
1850extern int do_munmap(struct mm_struct *, unsigned long, size_t);
1851
1852#ifdef CONFIG_MMU
1853extern int __mm_populate(unsigned long addr, unsigned long len,
1854 int ignore_errors);
1855static inline void mm_populate(unsigned long addr, unsigned long len)
1856{
1857
1858 (void) __mm_populate(addr, len, 1);
1859}
1860#else
1861static inline void mm_populate(unsigned long addr, unsigned long len) {}
1862#endif
1863
1864
1865extern unsigned long vm_brk(unsigned long, unsigned long);
1866extern int vm_munmap(unsigned long, size_t);
1867extern unsigned long vm_mmap(struct file *, unsigned long,
1868 unsigned long, unsigned long,
1869 unsigned long, unsigned long);
1870
1871struct vm_unmapped_area_info {
1872#define VM_UNMAPPED_AREA_TOPDOWN 1
1873 unsigned long flags;
1874 unsigned long length;
1875 unsigned long low_limit;
1876 unsigned long high_limit;
1877 unsigned long align_mask;
1878 unsigned long align_offset;
1879};
1880
1881extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
1882extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893static inline unsigned long
1894vm_unmapped_area(struct vm_unmapped_area_info *info)
1895{
1896 if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
1897 return unmapped_area_topdown(info);
1898 else
1899 return unmapped_area(info);
1900}
1901
1902
1903extern void truncate_inode_pages(struct address_space *, loff_t);
1904extern void truncate_inode_pages_range(struct address_space *,
1905 loff_t lstart, loff_t lend);
1906extern void truncate_inode_pages_final(struct address_space *);
1907
1908
1909extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
1910extern void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf);
1911extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
1912
1913
1914int write_one_page(struct page *page, int wait);
1915void task_dirty_inc(struct task_struct *tsk);
1916
1917
1918#define VM_MAX_READAHEAD 128
1919#define VM_MIN_READAHEAD 16
1920
1921int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
1922 pgoff_t offset, unsigned long nr_to_read);
1923
1924void page_cache_sync_readahead(struct address_space *mapping,
1925 struct file_ra_state *ra,
1926 struct file *filp,
1927 pgoff_t offset,
1928 unsigned long size);
1929
1930void page_cache_async_readahead(struct address_space *mapping,
1931 struct file_ra_state *ra,
1932 struct file *filp,
1933 struct page *pg,
1934 pgoff_t offset,
1935 unsigned long size);
1936
1937unsigned long max_sane_readahead(unsigned long nr);
1938
1939
1940extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
1941
1942
1943extern int expand_downwards(struct vm_area_struct *vma,
1944 unsigned long address);
1945#if VM_GROWSUP
1946extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
1947#else
1948 #define expand_upwards(vma, address) (0)
1949#endif
1950
1951
1952extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
1953extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
1954 struct vm_area_struct **pprev);
1955
1956
1957
1958static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
1959{
1960 struct vm_area_struct * vma = find_vma(mm,start_addr);
1961
1962 if (vma && end_addr <= vma->vm_start)
1963 vma = NULL;
1964 return vma;
1965}
1966
1967static inline unsigned long vma_pages(struct vm_area_struct *vma)
1968{
1969 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
1970}
1971
1972
1973static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
1974 unsigned long vm_start, unsigned long vm_end)
1975{
1976 struct vm_area_struct *vma = find_vma(mm, vm_start);
1977
1978 if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
1979 vma = NULL;
1980
1981 return vma;
1982}
1983
1984#ifdef CONFIG_MMU
1985pgprot_t vm_get_page_prot(unsigned long vm_flags);
1986void vma_set_page_prot(struct vm_area_struct *vma);
1987#else
1988static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
1989{
1990 return __pgprot(0);
1991}
1992static inline void vma_set_page_prot(struct vm_area_struct *vma)
1993{
1994 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1995}
1996#endif
1997
1998#ifdef CONFIG_NUMA_BALANCING
1999unsigned long change_prot_numa(struct vm_area_struct *vma,
2000 unsigned long start, unsigned long end);
2001#endif
2002
2003struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
2004int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
2005 unsigned long pfn, unsigned long size, pgprot_t);
2006int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
2007int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2008 unsigned long pfn);
2009int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2010 unsigned long pfn);
2011int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
2012
2013
2014struct page *follow_page_mask(struct vm_area_struct *vma,
2015 unsigned long address, unsigned int foll_flags,
2016 unsigned int *page_mask);
2017
2018static inline struct page *follow_page(struct vm_area_struct *vma,
2019 unsigned long address, unsigned int foll_flags)
2020{
2021 unsigned int unused_page_mask;
2022 return follow_page_mask(vma, address, foll_flags, &unused_page_mask);
2023}
2024
2025#define FOLL_WRITE 0x01
2026#define FOLL_TOUCH 0x02
2027#define FOLL_GET 0x04
2028#define FOLL_DUMP 0x08
2029#define FOLL_FORCE 0x10
2030#define FOLL_NOWAIT 0x20
2031
2032#define FOLL_POPULATE 0x40
2033#define FOLL_SPLIT 0x80
2034#define FOLL_HWPOISON 0x100
2035#define FOLL_NUMA 0x200
2036#define FOLL_MIGRATION 0x400
2037#define FOLL_TRIED 0x800
2038
2039typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
2040 void *data);
2041extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
2042 unsigned long size, pte_fn_t fn, void *data);
2043
2044#ifdef CONFIG_PROC_FS
2045void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
2046#else
2047static inline void vm_stat_account(struct mm_struct *mm,
2048 unsigned long flags, struct file *file, long pages)
2049{
2050 mm->total_vm += pages;
2051}
2052#endif
2053
2054#ifdef CONFIG_DEBUG_PAGEALLOC
2055extern bool _debug_pagealloc_enabled;
2056extern void __kernel_map_pages(struct page *page, int numpages, int enable);
2057
2058static inline bool debug_pagealloc_enabled(void)
2059{
2060 return _debug_pagealloc_enabled;
2061}
2062
2063static inline void
2064kernel_map_pages(struct page *page, int numpages, int enable)
2065{
2066 if (!debug_pagealloc_enabled())
2067 return;
2068
2069 __kernel_map_pages(page, numpages, enable);
2070}
2071#ifdef CONFIG_HIBERNATION
2072extern bool kernel_page_present(struct page *page);
2073#endif
2074#else
2075static inline void
2076kernel_map_pages(struct page *page, int numpages, int enable) {}
2077#ifdef CONFIG_HIBERNATION
2078static inline bool kernel_page_present(struct page *page) { return true; }
2079#endif
2080#endif
2081
2082#ifdef __HAVE_ARCH_GATE_AREA
2083extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
2084extern int in_gate_area_no_mm(unsigned long addr);
2085extern int in_gate_area(struct mm_struct *mm, unsigned long addr);
2086#else
2087static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
2088{
2089 return NULL;
2090}
2091static inline int in_gate_area_no_mm(unsigned long addr) { return 0; }
2092static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
2093{
2094 return 0;
2095}
2096#endif
2097
2098#ifdef CONFIG_SYSCTL
2099extern int sysctl_drop_caches;
2100int drop_caches_sysctl_handler(struct ctl_table *, int,
2101 void __user *, size_t *, loff_t *);
2102#endif
2103
2104void drop_slab(void);
2105void drop_slab_node(int nid);
2106
2107#ifndef CONFIG_MMU
2108#define randomize_va_space 0
2109#else
2110extern int randomize_va_space;
2111#endif
2112
2113const char * arch_vma_name(struct vm_area_struct *vma);
2114void print_vma_addr(char *prefix, unsigned long rip);
2115
2116void sparse_mem_maps_populate_node(struct page **map_map,
2117 unsigned long pnum_begin,
2118 unsigned long pnum_end,
2119 unsigned long map_count,
2120 int nodeid);
2121
2122struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
2123pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
2124pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
2125pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
2126pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
2127void *vmemmap_alloc_block(unsigned long size, int node);
2128void *vmemmap_alloc_block_buf(unsigned long size, int node);
2129void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
2130int vmemmap_populate_basepages(unsigned long start, unsigned long end,
2131 int node);
2132int vmemmap_populate(unsigned long start, unsigned long end, int node);
2133void vmemmap_populate_print_last(void);
2134#ifdef CONFIG_MEMORY_HOTPLUG
2135void vmemmap_free(unsigned long start, unsigned long end);
2136#endif
2137void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
2138 unsigned long size);
2139
2140enum mf_flags {
2141 MF_COUNT_INCREASED = 1 << 0,
2142 MF_ACTION_REQUIRED = 1 << 1,
2143 MF_MUST_KILL = 1 << 2,
2144 MF_SOFT_OFFLINE = 1 << 3,
2145};
2146extern int memory_failure(unsigned long pfn, int trapno, int flags);
2147extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
2148extern int unpoison_memory(unsigned long pfn);
2149extern int sysctl_memory_failure_early_kill;
2150extern int sysctl_memory_failure_recovery;
2151extern void shake_page(struct page *p, int access);
2152extern atomic_long_t num_poisoned_pages;
2153extern int soft_offline_page(struct page *page, int flags);
2154
2155#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
2156extern void clear_huge_page(struct page *page,
2157 unsigned long addr,
2158 unsigned int pages_per_huge_page);
2159extern void copy_user_huge_page(struct page *dst, struct page *src,
2160 unsigned long addr, struct vm_area_struct *vma,
2161 unsigned int pages_per_huge_page);
2162#endif
2163
2164extern struct page_ext_operations debug_guardpage_ops;
2165extern struct page_ext_operations page_poisoning_ops;
2166
2167#ifdef CONFIG_DEBUG_PAGEALLOC
2168extern unsigned int _debug_guardpage_minorder;
2169extern bool _debug_guardpage_enabled;
2170
2171static inline unsigned int debug_guardpage_minorder(void)
2172{
2173 return _debug_guardpage_minorder;
2174}
2175
2176static inline bool debug_guardpage_enabled(void)
2177{
2178 return _debug_guardpage_enabled;
2179}
2180
2181static inline bool page_is_guard(struct page *page)
2182{
2183 struct page_ext *page_ext;
2184
2185 if (!debug_guardpage_enabled())
2186 return false;
2187
2188 page_ext = lookup_page_ext(page);
2189 return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
2190}
2191#else
2192static inline unsigned int debug_guardpage_minorder(void) { return 0; }
2193static inline bool debug_guardpage_enabled(void) { return false; }
2194static inline bool page_is_guard(struct page *page) { return false; }
2195#endif
2196
2197#if MAX_NUMNODES > 1
2198void __init setup_nr_node_ids(void);
2199#else
2200static inline void setup_nr_node_ids(void) {}
2201#endif
2202
2203#endif
2204#endif
2205