1#ifndef _LINUX_MM_H
2#define _LINUX_MM_H
3
4#include <linux/errno.h>
5
6#ifdef __KERNEL__
7
8#include <linux/mmdebug.h>
9#include <linux/gfp.h>
10#include <linux/bug.h>
11#include <linux/list.h>
12#include <linux/mmzone.h>
13#include <linux/rbtree.h>
14#include <linux/atomic.h>
15#include <linux/debug_locks.h>
16#include <linux/mm_types.h>
17#include <linux/range.h>
18#include <linux/pfn.h>
19#include <linux/bit_spinlock.h>
20#include <linux/shrinker.h>
21#include <linux/resource.h>
22#include <linux/page_ext.h>
23
24struct mempolicy;
25struct anon_vma;
26struct anon_vma_chain;
27struct file_ra_state;
28struct user_struct;
29struct writeback_control;
30
31#ifndef CONFIG_NEED_MULTIPLE_NODES
32extern unsigned long max_mapnr;
33
34static inline void set_max_mapnr(unsigned long limit)
35{
36 max_mapnr = limit;
37}
38#else
39static inline void set_max_mapnr(unsigned long limit) { }
40#endif
41
42extern unsigned long totalram_pages;
43extern void * high_memory;
44extern int page_cluster;
45
46#ifdef CONFIG_SYSCTL
47extern int sysctl_legacy_va_layout;
48#else
49#define sysctl_legacy_va_layout 0
50#endif
51
52#include <asm/page.h>
53#include <asm/pgtable.h>
54#include <asm/processor.h>
55
56#ifndef __pa_symbol
57#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
58#endif
59
60
61
62
63
64
65
66
67#ifndef mm_forbids_zeropage
68#define mm_forbids_zeropage(X) (0)
69#endif
70
71extern unsigned long sysctl_user_reserve_kbytes;
72extern unsigned long sysctl_admin_reserve_kbytes;
73
74extern int sysctl_overcommit_memory;
75extern int sysctl_overcommit_ratio;
76extern unsigned long sysctl_overcommit_kbytes;
77
78extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *,
79 size_t *, loff_t *);
80extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
81 size_t *, loff_t *);
82
83#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
84
85
86#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
87
88
89#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)addr, PAGE_SIZE)
90
91
92
93
94
95
96
97
98
99
100extern struct kmem_cache *vm_area_cachep;
101
102#ifndef CONFIG_MMU
103extern struct rb_root nommu_region_tree;
104extern struct rw_semaphore nommu_region_sem;
105
106extern unsigned int kobjsize(const void *objp);
107#endif
108
109
110
111
112#define VM_NONE 0x00000000
113
114#define VM_READ 0x00000001
115#define VM_WRITE 0x00000002
116#define VM_EXEC 0x00000004
117#define VM_SHARED 0x00000008
118
119
120#define VM_MAYREAD 0x00000010
121#define VM_MAYWRITE 0x00000020
122#define VM_MAYEXEC 0x00000040
123#define VM_MAYSHARE 0x00000080
124
125#define VM_GROWSDOWN 0x00000100
126#define VM_PFNMAP 0x00000400
127#define VM_DENYWRITE 0x00000800
128
129#define VM_LOCKED 0x00002000
130#define VM_IO 0x00004000
131
132
133#define VM_SEQ_READ 0x00008000
134#define VM_RAND_READ 0x00010000
135
136#define VM_DONTCOPY 0x00020000
137#define VM_DONTEXPAND 0x00040000
138#define VM_ACCOUNT 0x00100000
139#define VM_NORESERVE 0x00200000
140#define VM_HUGETLB 0x00400000
141#define VM_NONLINEAR 0x00800000
142#define VM_ARCH_1 0x01000000
143#define VM_ARCH_2 0x02000000
144#define VM_DONTDUMP 0x04000000
145
146#ifdef CONFIG_MEM_SOFT_DIRTY
147# define VM_SOFTDIRTY 0x08000000
148#else
149# define VM_SOFTDIRTY 0
150#endif
151
152#define VM_MIXEDMAP 0x10000000
153#define VM_HUGEPAGE 0x20000000
154#define VM_NOHUGEPAGE 0x40000000
155#define VM_MERGEABLE 0x80000000
156
157#if defined(CONFIG_X86)
158# define VM_PAT VM_ARCH_1
159#elif defined(CONFIG_PPC)
160# define VM_SAO VM_ARCH_1
161#elif defined(CONFIG_PARISC)
162# define VM_GROWSUP VM_ARCH_1
163#elif defined(CONFIG_METAG)
164# define VM_GROWSUP VM_ARCH_1
165#elif defined(CONFIG_IA64)
166# define VM_GROWSUP VM_ARCH_1
167#elif !defined(CONFIG_MMU)
168# define VM_MAPPED_COPY VM_ARCH_1
169#endif
170
171#if defined(CONFIG_X86)
172
173# define VM_MPX VM_ARCH_2
174#endif
175
176#ifndef VM_GROWSUP
177# define VM_GROWSUP VM_NONE
178#endif
179
180
181#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ)
182
183#ifndef VM_STACK_DEFAULT_FLAGS
184#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
185#endif
186
187#ifdef CONFIG_STACK_GROWSUP
188#define VM_STACK_FLAGS (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
189#else
190#define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
191#endif
192
193
194
195
196
197#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
198
199
200#define VM_INIT_DEF_MASK VM_NOHUGEPAGE
201
202
203
204
205
206extern pgprot_t protection_map[16];
207
208#define FAULT_FLAG_WRITE 0x01
209#define FAULT_FLAG_NONLINEAR 0x02
210#define FAULT_FLAG_MKWRITE 0x04
211#define FAULT_FLAG_ALLOW_RETRY 0x08
212#define FAULT_FLAG_RETRY_NOWAIT 0x10
213#define FAULT_FLAG_KILLABLE 0x20
214#define FAULT_FLAG_TRIED 0x40
215#define FAULT_FLAG_USER 0x80
216
217
218
219
220
221
222
223
224
225struct vm_fault {
226 unsigned int flags;
227 pgoff_t pgoff;
228 void __user *virtual_address;
229
230 struct page *page;
231
232
233
234
235
236 pgoff_t max_pgoff;
237
238 pte_t *pte;
239};
240
241
242
243
244
245
246struct vm_operations_struct {
247 void (*open)(struct vm_area_struct * area);
248 void (*close)(struct vm_area_struct * area);
249 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
250 void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf);
251
252
253
254 int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
255
256
257
258
259 int (*access)(struct vm_area_struct *vma, unsigned long addr,
260 void *buf, int len, int write);
261
262
263
264
265 const char *(*name)(struct vm_area_struct *vma);
266
267#ifdef CONFIG_NUMA
268
269
270
271
272
273
274
275 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
276
277
278
279
280
281
282
283
284
285
286
287 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
288 unsigned long addr);
289#endif
290
291 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
292 unsigned long size, pgoff_t pgoff);
293};
294
295struct mmu_gather;
296struct inode;
297
298#define page_private(page) ((page)->private)
299#define set_page_private(page, v) ((page)->private = (v))
300
301
302static inline void set_freepage_migratetype(struct page *page, int migratetype)
303{
304 page->index = migratetype;
305}
306
307
308static inline int get_freepage_migratetype(struct page *page)
309{
310 return page->index;
311}
312
313
314
315
316
317#include <linux/page-flags.h>
318#include <linux/huge_mm.h>
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336static inline int put_page_testzero(struct page *page)
337{
338 VM_BUG_ON_PAGE(atomic_read(&page->_count) == 0, page);
339 return atomic_dec_and_test(&page->_count);
340}
341
342
343
344
345
346
347
348static inline int get_page_unless_zero(struct page *page)
349{
350 return atomic_inc_not_zero(&page->_count);
351}
352
353
354
355
356
357
358
359
360static inline int put_page_unless_one(struct page *page)
361{
362 return atomic_add_unless(&page->_count, -1, 1);
363}
364
365extern int page_is_ram(unsigned long pfn);
366extern int region_is_ram(resource_size_t phys_addr, unsigned long size);
367
368
369struct page *vmalloc_to_page(const void *addr);
370unsigned long vmalloc_to_pfn(const void *addr);
371
372
373
374
375
376
377
378static inline int is_vmalloc_addr(const void *x)
379{
380#ifdef CONFIG_MMU
381 unsigned long addr = (unsigned long)x;
382
383 return addr >= VMALLOC_START && addr < VMALLOC_END;
384#else
385 return 0;
386#endif
387}
388#ifdef CONFIG_MMU
389extern int is_vmalloc_or_module_addr(const void *x);
390#else
391static inline int is_vmalloc_or_module_addr(const void *x)
392{
393 return 0;
394}
395#endif
396
397extern void kvfree(const void *addr);
398
399static inline void compound_lock(struct page *page)
400{
401#ifdef CONFIG_TRANSPARENT_HUGEPAGE
402 VM_BUG_ON_PAGE(PageSlab(page), page);
403 bit_spin_lock(PG_compound_lock, &page->flags);
404#endif
405}
406
407static inline void compound_unlock(struct page *page)
408{
409#ifdef CONFIG_TRANSPARENT_HUGEPAGE
410 VM_BUG_ON_PAGE(PageSlab(page), page);
411 bit_spin_unlock(PG_compound_lock, &page->flags);
412#endif
413}
414
415static inline unsigned long compound_lock_irqsave(struct page *page)
416{
417 unsigned long uninitialized_var(flags);
418#ifdef CONFIG_TRANSPARENT_HUGEPAGE
419 local_irq_save(flags);
420 compound_lock(page);
421#endif
422 return flags;
423}
424
425static inline void compound_unlock_irqrestore(struct page *page,
426 unsigned long flags)
427{
428#ifdef CONFIG_TRANSPARENT_HUGEPAGE
429 compound_unlock(page);
430 local_irq_restore(flags);
431#endif
432}
433
434static inline struct page *compound_head_by_tail(struct page *tail)
435{
436 struct page *head = tail->first_page;
437
438
439
440
441
442
443 smp_rmb();
444 if (likely(PageTail(tail)))
445 return head;
446 return tail;
447}
448
449static inline struct page *compound_head(struct page *page)
450{
451 if (unlikely(PageTail(page)))
452 return compound_head_by_tail(page);
453 return page;
454}
455
456
457
458
459
460
461static inline void page_mapcount_reset(struct page *page)
462{
463 atomic_set(&(page)->_mapcount, -1);
464}
465
466static inline int page_mapcount(struct page *page)
467{
468 return atomic_read(&(page)->_mapcount) + 1;
469}
470
471static inline int page_count(struct page *page)
472{
473 return atomic_read(&compound_head(page)->_count);
474}
475
476#ifdef CONFIG_HUGETLB_PAGE
477extern int PageHeadHuge(struct page *page_head);
478#else
479static inline int PageHeadHuge(struct page *page_head)
480{
481 return 0;
482}
483#endif
484
485static inline bool __compound_tail_refcounted(struct page *page)
486{
487 return !PageSlab(page) && !PageHeadHuge(page);
488}
489
490
491
492
493
494
495
496
497
498static inline bool compound_tail_refcounted(struct page *page)
499{
500 VM_BUG_ON_PAGE(!PageHead(page), page);
501 return __compound_tail_refcounted(page);
502}
503
504static inline void get_huge_page_tail(struct page *page)
505{
506
507
508
509 VM_BUG_ON_PAGE(!PageTail(page), page);
510 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
511 VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page);
512 if (compound_tail_refcounted(page->first_page))
513 atomic_inc(&page->_mapcount);
514}
515
516extern bool __get_page_tail(struct page *page);
517
518static inline void get_page(struct page *page)
519{
520 if (unlikely(PageTail(page)))
521 if (likely(__get_page_tail(page)))
522 return;
523
524
525
526
527 VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page);
528 atomic_inc(&page->_count);
529}
530
531static inline struct page *virt_to_head_page(const void *x)
532{
533 struct page *page = virt_to_page(x);
534 return compound_head(page);
535}
536
537
538
539
540
541static inline void init_page_count(struct page *page)
542{
543 atomic_set(&page->_count, 1);
544}
545
546
547
548
549
550
551
552
553
554
555#define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
556
557static inline int PageBuddy(struct page *page)
558{
559 return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE;
560}
561
562static inline void __SetPageBuddy(struct page *page)
563{
564 VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
565 atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
566}
567
568static inline void __ClearPageBuddy(struct page *page)
569{
570 VM_BUG_ON_PAGE(!PageBuddy(page), page);
571 atomic_set(&page->_mapcount, -1);
572}
573
574#define PAGE_BALLOON_MAPCOUNT_VALUE (-256)
575
576static inline int PageBalloon(struct page *page)
577{
578 return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE;
579}
580
581static inline void __SetPageBalloon(struct page *page)
582{
583 VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
584 atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE);
585}
586
587static inline void __ClearPageBalloon(struct page *page)
588{
589 VM_BUG_ON_PAGE(!PageBalloon(page), page);
590 atomic_set(&page->_mapcount, -1);
591}
592
593void put_page(struct page *page);
594void put_pages_list(struct list_head *pages);
595
596void split_page(struct page *page, unsigned int order);
597int split_free_page(struct page *page);
598
599
600
601
602
603
604typedef void compound_page_dtor(struct page *);
605
606static inline void set_compound_page_dtor(struct page *page,
607 compound_page_dtor *dtor)
608{
609 page[1].lru.next = (void *)dtor;
610}
611
612static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
613{
614 return (compound_page_dtor *)page[1].lru.next;
615}
616
617static inline int compound_order(struct page *page)
618{
619 if (!PageHead(page))
620 return 0;
621 return (unsigned long)page[1].lru.prev;
622}
623
624static inline void set_compound_order(struct page *page, unsigned long order)
625{
626 page[1].lru.prev = (void *)order;
627}
628
629#ifdef CONFIG_MMU
630
631
632
633
634
635
636static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
637{
638 if (likely(vma->vm_flags & VM_WRITE))
639 pte = pte_mkwrite(pte);
640 return pte;
641}
642
643void do_set_pte(struct vm_area_struct *vma, unsigned long address,
644 struct page *page, pte_t *pte, bool write, bool anon);
645#endif
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
714#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
715#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
716#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
717
718
719
720
721
722
723#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
724#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
725#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
726#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
727
728
729#ifdef NODE_NOT_IN_PAGE_FLAGS
730#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
731#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \
732 SECTIONS_PGOFF : ZONES_PGOFF)
733#else
734#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
735#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \
736 NODES_PGOFF : ZONES_PGOFF)
737#endif
738
739#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
740
741#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
742#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
743#endif
744
745#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
746#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
747#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
748#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
749#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
750
751static inline enum zone_type page_zonenum(const struct page *page)
752{
753 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
754}
755
756#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
757#define SECTION_IN_PAGE_FLAGS
758#endif
759
760
761
762
763
764
765
766
767
768static inline int page_zone_id(struct page *page)
769{
770 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
771}
772
773static inline int zone_to_nid(struct zone *zone)
774{
775#ifdef CONFIG_NUMA
776 return zone->node;
777#else
778 return 0;
779#endif
780}
781
782#ifdef NODE_NOT_IN_PAGE_FLAGS
783extern int page_to_nid(const struct page *page);
784#else
785static inline int page_to_nid(const struct page *page)
786{
787 return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
788}
789#endif
790
791#ifdef CONFIG_NUMA_BALANCING
792static inline int cpu_pid_to_cpupid(int cpu, int pid)
793{
794 return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
795}
796
797static inline int cpupid_to_pid(int cpupid)
798{
799 return cpupid & LAST__PID_MASK;
800}
801
802static inline int cpupid_to_cpu(int cpupid)
803{
804 return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
805}
806
807static inline int cpupid_to_nid(int cpupid)
808{
809 return cpu_to_node(cpupid_to_cpu(cpupid));
810}
811
812static inline bool cpupid_pid_unset(int cpupid)
813{
814 return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
815}
816
817static inline bool cpupid_cpu_unset(int cpupid)
818{
819 return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
820}
821
822static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
823{
824 return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
825}
826
827#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
828#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
829static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
830{
831 return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
832}
833
834static inline int page_cpupid_last(struct page *page)
835{
836 return page->_last_cpupid;
837}
838static inline void page_cpupid_reset_last(struct page *page)
839{
840 page->_last_cpupid = -1 & LAST_CPUPID_MASK;
841}
842#else
843static inline int page_cpupid_last(struct page *page)
844{
845 return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
846}
847
848extern int page_cpupid_xchg_last(struct page *page, int cpupid);
849
850static inline void page_cpupid_reset_last(struct page *page)
851{
852 int cpupid = (1 << LAST_CPUPID_SHIFT) - 1;
853
854 page->flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
855 page->flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
856}
857#endif
858#else
859static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
860{
861 return page_to_nid(page);
862}
863
864static inline int page_cpupid_last(struct page *page)
865{
866 return page_to_nid(page);
867}
868
869static inline int cpupid_to_nid(int cpupid)
870{
871 return -1;
872}
873
874static inline int cpupid_to_pid(int cpupid)
875{
876 return -1;
877}
878
879static inline int cpupid_to_cpu(int cpupid)
880{
881 return -1;
882}
883
884static inline int cpu_pid_to_cpupid(int nid, int pid)
885{
886 return -1;
887}
888
889static inline bool cpupid_pid_unset(int cpupid)
890{
891 return 1;
892}
893
894static inline void page_cpupid_reset_last(struct page *page)
895{
896}
897
898static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
899{
900 return false;
901}
902#endif
903
904static inline struct zone *page_zone(const struct page *page)
905{
906 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
907}
908
909#ifdef SECTION_IN_PAGE_FLAGS
910static inline void set_page_section(struct page *page, unsigned long section)
911{
912 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
913 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
914}
915
916static inline unsigned long page_to_section(const struct page *page)
917{
918 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
919}
920#endif
921
922static inline void set_page_zone(struct page *page, enum zone_type zone)
923{
924 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
925 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
926}
927
928static inline void set_page_node(struct page *page, unsigned long node)
929{
930 page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
931 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
932}
933
934static inline void set_page_links(struct page *page, enum zone_type zone,
935 unsigned long node, unsigned long pfn)
936{
937 set_page_zone(page, zone);
938 set_page_node(page, node);
939#ifdef SECTION_IN_PAGE_FLAGS
940 set_page_section(page, pfn_to_section_nr(pfn));
941#endif
942}
943
944
945
946
947#include <linux/vmstat.h>
948
949static __always_inline void *lowmem_page_address(const struct page *page)
950{
951 return __va(PFN_PHYS(page_to_pfn(page)));
952}
953
954#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
955#define HASHED_PAGE_VIRTUAL
956#endif
957
958#if defined(WANT_PAGE_VIRTUAL)
959static inline void *page_address(const struct page *page)
960{
961 return page->virtual;
962}
963static inline void set_page_address(struct page *page, void *address)
964{
965 page->virtual = address;
966}
967#define page_address_init() do { } while(0)
968#endif
969
970#if defined(HASHED_PAGE_VIRTUAL)
971void *page_address(const struct page *page);
972void set_page_address(struct page *page, void *virtual);
973void page_address_init(void);
974#endif
975
976#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
977#define page_address(page) lowmem_page_address(page)
978#define set_page_address(page, address) do { } while(0)
979#define page_address_init() do { } while(0)
980#endif
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998#define PAGE_MAPPING_ANON 1
999#define PAGE_MAPPING_KSM 2
1000#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
1001
1002extern struct address_space *page_mapping(struct page *page);
1003
1004
1005static inline void *page_rmapping(struct page *page)
1006{
1007 return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
1008}
1009
1010extern struct address_space *__page_file_mapping(struct page *);
1011
1012static inline
1013struct address_space *page_file_mapping(struct page *page)
1014{
1015 if (unlikely(PageSwapCache(page)))
1016 return __page_file_mapping(page);
1017
1018 return page->mapping;
1019}
1020
1021static inline int PageAnon(struct page *page)
1022{
1023 return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
1024}
1025
1026
1027
1028
1029
1030static inline pgoff_t page_index(struct page *page)
1031{
1032 if (unlikely(PageSwapCache(page)))
1033 return page_private(page);
1034 return page->index;
1035}
1036
1037extern pgoff_t __page_file_index(struct page *page);
1038
1039
1040
1041
1042
1043static inline pgoff_t page_file_index(struct page *page)
1044{
1045 if (unlikely(PageSwapCache(page)))
1046 return __page_file_index(page);
1047
1048 return page->index;
1049}
1050
1051
1052
1053
1054static inline int page_mapped(struct page *page)
1055{
1056 return atomic_read(&(page)->_mapcount) >= 0;
1057}
1058
1059
1060
1061
1062
1063
1064
1065#define VM_FAULT_MINOR 0
1066
1067#define VM_FAULT_OOM 0x0001
1068#define VM_FAULT_SIGBUS 0x0002
1069#define VM_FAULT_MAJOR 0x0004
1070#define VM_FAULT_WRITE 0x0008
1071#define VM_FAULT_HWPOISON 0x0010
1072#define VM_FAULT_HWPOISON_LARGE 0x0020
1073#define VM_FAULT_SIGSEGV 0x0040
1074
1075#define VM_FAULT_NOPAGE 0x0100
1076#define VM_FAULT_LOCKED 0x0200
1077#define VM_FAULT_RETRY 0x0400
1078#define VM_FAULT_FALLBACK 0x0800
1079
1080#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000
1081
1082#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \
1083 VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \
1084 VM_FAULT_FALLBACK)
1085
1086
1087#define VM_FAULT_SET_HINDEX(x) ((x) << 12)
1088#define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf)
1089
1090
1091
1092
1093extern void pagefault_out_of_memory(void);
1094
1095#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
1096
1097
1098
1099
1100
1101#define SHOW_MEM_FILTER_NODES (0x0001u)
1102
1103extern void show_free_areas(unsigned int flags);
1104extern bool skip_free_areas_node(unsigned int flags, int nid);
1105
1106int shmem_zero_setup(struct vm_area_struct *);
1107#ifdef CONFIG_SHMEM
1108bool shmem_mapping(struct address_space *mapping);
1109#else
1110static inline bool shmem_mapping(struct address_space *mapping)
1111{
1112 return false;
1113}
1114#endif
1115
1116extern int can_do_mlock(void);
1117extern int user_shm_lock(size_t, struct user_struct *);
1118extern void user_shm_unlock(size_t, struct user_struct *);
1119
1120
1121
1122
1123struct zap_details {
1124 struct vm_area_struct *nonlinear_vma;
1125 struct address_space *check_mapping;
1126 pgoff_t first_index;
1127 pgoff_t last_index;
1128};
1129
1130struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
1131 pte_t pte);
1132
1133int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1134 unsigned long size);
1135void zap_page_range(struct vm_area_struct *vma, unsigned long address,
1136 unsigned long size, struct zap_details *);
1137void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
1138 unsigned long start, unsigned long end);
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156struct mm_walk {
1157 int (*pgd_entry)(pgd_t *pgd, unsigned long addr,
1158 unsigned long next, struct mm_walk *walk);
1159 int (*pud_entry)(pud_t *pud, unsigned long addr,
1160 unsigned long next, struct mm_walk *walk);
1161 int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
1162 unsigned long next, struct mm_walk *walk);
1163 int (*pte_entry)(pte_t *pte, unsigned long addr,
1164 unsigned long next, struct mm_walk *walk);
1165 int (*pte_hole)(unsigned long addr, unsigned long next,
1166 struct mm_walk *walk);
1167 int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
1168 unsigned long addr, unsigned long next,
1169 struct mm_walk *walk);
1170 struct mm_struct *mm;
1171 void *private;
1172};
1173
1174int walk_page_range(unsigned long addr, unsigned long end,
1175 struct mm_walk *walk);
1176void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
1177 unsigned long end, unsigned long floor, unsigned long ceiling);
1178int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
1179 struct vm_area_struct *vma);
1180void unmap_mapping_range(struct address_space *mapping,
1181 loff_t const holebegin, loff_t const holelen, int even_cows);
1182int follow_pfn(struct vm_area_struct *vma, unsigned long address,
1183 unsigned long *pfn);
1184int follow_phys(struct vm_area_struct *vma, unsigned long address,
1185 unsigned int flags, unsigned long *prot, resource_size_t *phys);
1186int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
1187 void *buf, int len, int write);
1188
1189static inline void unmap_shared_mapping_range(struct address_space *mapping,
1190 loff_t const holebegin, loff_t const holelen)
1191{
1192 unmap_mapping_range(mapping, holebegin, holelen, 0);
1193}
1194
1195extern void truncate_pagecache(struct inode *inode, loff_t new);
1196extern void truncate_setsize(struct inode *inode, loff_t newsize);
1197void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
1198void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
1199int truncate_inode_page(struct address_space *mapping, struct page *page);
1200int generic_error_remove_page(struct address_space *mapping, struct page *page);
1201int invalidate_inode_page(struct page *page);
1202
1203#ifdef CONFIG_MMU
1204extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
1205 unsigned long address, unsigned int flags);
1206extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
1207 unsigned long address, unsigned int fault_flags);
1208#else
1209static inline int handle_mm_fault(struct mm_struct *mm,
1210 struct vm_area_struct *vma, unsigned long address,
1211 unsigned int flags)
1212{
1213
1214 BUG();
1215 return VM_FAULT_SIGBUS;
1216}
1217static inline int fixup_user_fault(struct task_struct *tsk,
1218 struct mm_struct *mm, unsigned long address,
1219 unsigned int fault_flags)
1220{
1221
1222 BUG();
1223 return -EFAULT;
1224}
1225#endif
1226
1227extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
1228extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1229 void *buf, int len, int write);
1230
1231long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1232 unsigned long start, unsigned long nr_pages,
1233 unsigned int foll_flags, struct page **pages,
1234 struct vm_area_struct **vmas, int *nonblocking);
1235long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1236 unsigned long start, unsigned long nr_pages,
1237 int write, int force, struct page **pages,
1238 struct vm_area_struct **vmas);
1239int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1240 struct page **pages);
1241struct kvec;
1242int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
1243 struct page **pages);
1244int get_kernel_page(unsigned long start, int write, struct page **pages);
1245struct page *get_dump_page(unsigned long addr);
1246
1247extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
1248extern void do_invalidatepage(struct page *page, unsigned int offset,
1249 unsigned int length);
1250
1251int __set_page_dirty_nobuffers(struct page *page);
1252int __set_page_dirty_no_writeback(struct page *page);
1253int redirty_page_for_writepage(struct writeback_control *wbc,
1254 struct page *page);
1255void account_page_dirtied(struct page *page, struct address_space *mapping);
1256int set_page_dirty(struct page *page);
1257int set_page_dirty_lock(struct page *page);
1258int clear_page_dirty_for_io(struct page *page);
1259int get_cmdline(struct task_struct *task, char *buffer, int buflen);
1260
1261
1262static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
1263{
1264 return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
1265}
1266
1267static inline int stack_guard_page_start(struct vm_area_struct *vma,
1268 unsigned long addr)
1269{
1270 return (vma->vm_flags & VM_GROWSDOWN) &&
1271 (vma->vm_start == addr) &&
1272 !vma_growsdown(vma->vm_prev, addr);
1273}
1274
1275
1276static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
1277{
1278 return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
1279}
1280
1281static inline int stack_guard_page_end(struct vm_area_struct *vma,
1282 unsigned long addr)
1283{
1284 return (vma->vm_flags & VM_GROWSUP) &&
1285 (vma->vm_end == addr) &&
1286 !vma_growsup(vma->vm_next, addr);
1287}
1288
1289extern struct task_struct *task_of_stack(struct task_struct *task,
1290 struct vm_area_struct *vma, bool in_group);
1291
1292extern unsigned long move_page_tables(struct vm_area_struct *vma,
1293 unsigned long old_addr, struct vm_area_struct *new_vma,
1294 unsigned long new_addr, unsigned long len,
1295 bool need_rmap_locks);
1296extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
1297 unsigned long end, pgprot_t newprot,
1298 int dirty_accountable, int prot_numa);
1299extern int mprotect_fixup(struct vm_area_struct *vma,
1300 struct vm_area_struct **pprev, unsigned long start,
1301 unsigned long end, unsigned long newflags);
1302
1303
1304
1305
1306int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1307 struct page **pages);
1308
1309
1310
1311static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
1312{
1313 long val = atomic_long_read(&mm->rss_stat.count[member]);
1314
1315#ifdef SPLIT_RSS_COUNTING
1316
1317
1318
1319
1320 if (val < 0)
1321 val = 0;
1322#endif
1323 return (unsigned long)val;
1324}
1325
1326static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1327{
1328 atomic_long_add(value, &mm->rss_stat.count[member]);
1329}
1330
1331static inline void inc_mm_counter(struct mm_struct *mm, int member)
1332{
1333 atomic_long_inc(&mm->rss_stat.count[member]);
1334}
1335
1336static inline void dec_mm_counter(struct mm_struct *mm, int member)
1337{
1338 atomic_long_dec(&mm->rss_stat.count[member]);
1339}
1340
1341static inline unsigned long get_mm_rss(struct mm_struct *mm)
1342{
1343 return get_mm_counter(mm, MM_FILEPAGES) +
1344 get_mm_counter(mm, MM_ANONPAGES);
1345}
1346
1347static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
1348{
1349 return max(mm->hiwater_rss, get_mm_rss(mm));
1350}
1351
1352static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
1353{
1354 return max(mm->hiwater_vm, mm->total_vm);
1355}
1356
1357static inline void update_hiwater_rss(struct mm_struct *mm)
1358{
1359 unsigned long _rss = get_mm_rss(mm);
1360
1361 if ((mm)->hiwater_rss < _rss)
1362 (mm)->hiwater_rss = _rss;
1363}
1364
1365static inline void update_hiwater_vm(struct mm_struct *mm)
1366{
1367 if (mm->hiwater_vm < mm->total_vm)
1368 mm->hiwater_vm = mm->total_vm;
1369}
1370
1371static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
1372 struct mm_struct *mm)
1373{
1374 unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
1375
1376 if (*maxrss < hiwater_rss)
1377 *maxrss = hiwater_rss;
1378}
1379
1380#if defined(SPLIT_RSS_COUNTING)
1381void sync_mm_rss(struct mm_struct *mm);
1382#else
1383static inline void sync_mm_rss(struct mm_struct *mm)
1384{
1385}
1386#endif
1387
1388int vma_wants_writenotify(struct vm_area_struct *vma);
1389
1390extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1391 spinlock_t **ptl);
1392static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
1393 spinlock_t **ptl)
1394{
1395 pte_t *ptep;
1396 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
1397 return ptep;
1398}
1399
1400#ifdef __PAGETABLE_PUD_FOLDED
1401static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
1402 unsigned long address)
1403{
1404 return 0;
1405}
1406#else
1407int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
1408#endif
1409
1410#ifdef __PAGETABLE_PMD_FOLDED
1411static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
1412 unsigned long address)
1413{
1414 return 0;
1415}
1416#else
1417int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
1418#endif
1419
1420int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
1421 pmd_t *pmd, unsigned long address);
1422int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
1423
1424
1425
1426
1427
1428#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
1429static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
1430{
1431 return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))?
1432 NULL: pud_offset(pgd, address);
1433}
1434
1435static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
1436{
1437 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
1438 NULL: pmd_offset(pud, address);
1439}
1440#endif
1441
1442#if USE_SPLIT_PTE_PTLOCKS
1443#if ALLOC_SPLIT_PTLOCKS
1444void __init ptlock_cache_init(void);
1445extern bool ptlock_alloc(struct page *page);
1446extern void ptlock_free(struct page *page);
1447
1448static inline spinlock_t *ptlock_ptr(struct page *page)
1449{
1450 return page->ptl;
1451}
1452#else
1453static inline void ptlock_cache_init(void)
1454{
1455}
1456
1457static inline bool ptlock_alloc(struct page *page)
1458{
1459 return true;
1460}
1461
1462static inline void ptlock_free(struct page *page)
1463{
1464}
1465
1466static inline spinlock_t *ptlock_ptr(struct page *page)
1467{
1468 return &page->ptl;
1469}
1470#endif
1471
1472static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1473{
1474 return ptlock_ptr(pmd_page(*pmd));
1475}
1476
1477static inline bool ptlock_init(struct page *page)
1478{
1479
1480
1481
1482
1483
1484
1485
1486
1487 VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
1488 if (!ptlock_alloc(page))
1489 return false;
1490 spin_lock_init(ptlock_ptr(page));
1491 return true;
1492}
1493
1494
1495static inline void pte_lock_deinit(struct page *page)
1496{
1497 page->mapping = NULL;
1498 ptlock_free(page);
1499}
1500
1501#else
1502
1503
1504
1505static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1506{
1507 return &mm->page_table_lock;
1508}
1509static inline void ptlock_cache_init(void) {}
1510static inline bool ptlock_init(struct page *page) { return true; }
1511static inline void pte_lock_deinit(struct page *page) {}
1512#endif
1513
1514static inline void pgtable_init(void)
1515{
1516 ptlock_cache_init();
1517 pgtable_cache_init();
1518}
1519
1520static inline bool pgtable_page_ctor(struct page *page)
1521{
1522 inc_zone_page_state(page, NR_PAGETABLE);
1523 return ptlock_init(page);
1524}
1525
1526static inline void pgtable_page_dtor(struct page *page)
1527{
1528 pte_lock_deinit(page);
1529 dec_zone_page_state(page, NR_PAGETABLE);
1530}
1531
1532#define pte_offset_map_lock(mm, pmd, address, ptlp) \
1533({ \
1534 spinlock_t *__ptl = pte_lockptr(mm, pmd); \
1535 pte_t *__pte = pte_offset_map(pmd, address); \
1536 *(ptlp) = __ptl; \
1537 spin_lock(__ptl); \
1538 __pte; \
1539})
1540
1541#define pte_unmap_unlock(pte, ptl) do { \
1542 spin_unlock(ptl); \
1543 pte_unmap(pte); \
1544} while (0)
1545
1546#define pte_alloc_map(mm, vma, pmd, address) \
1547 ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, vma, \
1548 pmd, address))? \
1549 NULL: pte_offset_map(pmd, address))
1550
1551#define pte_alloc_map_lock(mm, pmd, address, ptlp) \
1552 ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, NULL, \
1553 pmd, address))? \
1554 NULL: pte_offset_map_lock(mm, pmd, address, ptlp))
1555
1556#define pte_alloc_kernel(pmd, address) \
1557 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
1558 NULL: pte_offset_kernel(pmd, address))
1559
1560#if USE_SPLIT_PMD_PTLOCKS
1561
1562static struct page *pmd_to_page(pmd_t *pmd)
1563{
1564 unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
1565 return virt_to_page((void *)((unsigned long) pmd & mask));
1566}
1567
1568static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
1569{
1570 return ptlock_ptr(pmd_to_page(pmd));
1571}
1572
1573static inline bool pgtable_pmd_page_ctor(struct page *page)
1574{
1575#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1576 page->pmd_huge_pte = NULL;
1577#endif
1578 return ptlock_init(page);
1579}
1580
1581static inline void pgtable_pmd_page_dtor(struct page *page)
1582{
1583#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1584 VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
1585#endif
1586 ptlock_free(page);
1587}
1588
1589#define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte)
1590
1591#else
1592
1593static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
1594{
1595 return &mm->page_table_lock;
1596}
1597
1598static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; }
1599static inline void pgtable_pmd_page_dtor(struct page *page) {}
1600
1601#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
1602
1603#endif
1604
1605static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
1606{
1607 spinlock_t *ptl = pmd_lockptr(mm, pmd);
1608 spin_lock(ptl);
1609 return ptl;
1610}
1611
1612extern void free_area_init(unsigned long * zones_size);
1613extern void free_area_init_node(int nid, unsigned long * zones_size,
1614 unsigned long zone_start_pfn, unsigned long *zholes_size);
1615extern void free_initmem(void);
1616
1617
1618
1619
1620
1621
1622
1623extern unsigned long free_reserved_area(void *start, void *end,
1624 int poison, char *s);
1625
1626#ifdef CONFIG_HIGHMEM
1627
1628
1629
1630
1631extern void free_highmem_page(struct page *page);
1632#endif
1633
1634extern void adjust_managed_page_count(struct page *page, long count);
1635extern void mem_init_print_info(const char *str);
1636
1637
1638static inline void __free_reserved_page(struct page *page)
1639{
1640 ClearPageReserved(page);
1641 init_page_count(page);
1642 __free_page(page);
1643}
1644
1645static inline void free_reserved_page(struct page *page)
1646{
1647 __free_reserved_page(page);
1648 adjust_managed_page_count(page, 1);
1649}
1650
1651static inline void mark_page_reserved(struct page *page)
1652{
1653 SetPageReserved(page);
1654 adjust_managed_page_count(page, -1);
1655}
1656
1657
1658
1659
1660
1661
1662
1663static inline unsigned long free_initmem_default(int poison)
1664{
1665 extern char __init_begin[], __init_end[];
1666
1667 return free_reserved_area(&__init_begin, &__init_end,
1668 poison, "unused kernel");
1669}
1670
1671static inline unsigned long get_num_physpages(void)
1672{
1673 int nid;
1674 unsigned long phys_pages = 0;
1675
1676 for_each_online_node(nid)
1677 phys_pages += node_present_pages(nid);
1678
1679 return phys_pages;
1680}
1681
1682#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709extern void free_area_init_nodes(unsigned long *max_zone_pfn);
1710unsigned long node_map_pfn_alignment(void);
1711unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
1712 unsigned long end_pfn);
1713extern unsigned long absent_pages_in_range(unsigned long start_pfn,
1714 unsigned long end_pfn);
1715extern void get_pfn_range_for_nid(unsigned int nid,
1716 unsigned long *start_pfn, unsigned long *end_pfn);
1717extern unsigned long find_min_pfn_with_active_regions(void);
1718extern void free_bootmem_with_active_regions(int nid,
1719 unsigned long max_low_pfn);
1720extern void sparse_memory_present_with_active_regions(int nid);
1721
1722#endif
1723
1724#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
1725 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
1726static inline int __early_pfn_to_nid(unsigned long pfn)
1727{
1728 return 0;
1729}
1730#else
1731
1732extern int __meminit early_pfn_to_nid(unsigned long pfn);
1733
1734extern int __meminit __early_pfn_to_nid(unsigned long pfn);
1735#endif
1736
1737extern void set_dma_reserve(unsigned long new_dma_reserve);
1738extern void memmap_init_zone(unsigned long, int, unsigned long,
1739 unsigned long, enum memmap_context);
1740extern void setup_per_zone_wmarks(void);
1741extern int __meminit init_per_zone_wmark_min(void);
1742extern void mem_init(void);
1743extern void __init mmap_init(void);
1744extern void show_mem(unsigned int flags);
1745extern void si_meminfo(struct sysinfo * val);
1746extern void si_meminfo_node(struct sysinfo *val, int nid);
1747
1748extern __printf(3, 4)
1749void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...);
1750
1751extern void setup_per_cpu_pageset(void);
1752
1753extern void zone_pcp_update(struct zone *zone);
1754extern void zone_pcp_reset(struct zone *zone);
1755
1756
1757extern int min_free_kbytes;
1758
1759
1760extern atomic_long_t mmap_pages_allocated;
1761extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
1762
1763
1764void vma_interval_tree_insert(struct vm_area_struct *node,
1765 struct rb_root *root);
1766void vma_interval_tree_insert_after(struct vm_area_struct *node,
1767 struct vm_area_struct *prev,
1768 struct rb_root *root);
1769void vma_interval_tree_remove(struct vm_area_struct *node,
1770 struct rb_root *root);
1771struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root *root,
1772 unsigned long start, unsigned long last);
1773struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
1774 unsigned long start, unsigned long last);
1775
1776#define vma_interval_tree_foreach(vma, root, start, last) \
1777 for (vma = vma_interval_tree_iter_first(root, start, last); \
1778 vma; vma = vma_interval_tree_iter_next(vma, start, last))
1779
1780static inline void vma_nonlinear_insert(struct vm_area_struct *vma,
1781 struct list_head *list)
1782{
1783 list_add_tail(&vma->shared.nonlinear, list);
1784}
1785
1786void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
1787 struct rb_root *root);
1788void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
1789 struct rb_root *root);
1790struct anon_vma_chain *anon_vma_interval_tree_iter_first(
1791 struct rb_root *root, unsigned long start, unsigned long last);
1792struct anon_vma_chain *anon_vma_interval_tree_iter_next(
1793 struct anon_vma_chain *node, unsigned long start, unsigned long last);
1794#ifdef CONFIG_DEBUG_VM_RB
1795void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
1796#endif
1797
1798#define anon_vma_interval_tree_foreach(avc, root, start, last) \
1799 for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
1800 avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
1801
1802
1803extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
1804extern int vma_adjust(struct vm_area_struct *vma, unsigned long start,
1805 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert);
1806extern struct vm_area_struct *vma_merge(struct mm_struct *,
1807 struct vm_area_struct *prev, unsigned long addr, unsigned long end,
1808 unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
1809 struct mempolicy *);
1810extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
1811extern int split_vma(struct mm_struct *,
1812 struct vm_area_struct *, unsigned long addr, int new_below);
1813extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
1814extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
1815 struct rb_node **, struct rb_node *);
1816extern void unlink_file_vma(struct vm_area_struct *);
1817extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
1818 unsigned long addr, unsigned long len, pgoff_t pgoff,
1819 bool *need_rmap_locks);
1820extern void exit_mmap(struct mm_struct *);
1821
1822static inline int check_data_rlimit(unsigned long rlim,
1823 unsigned long new,
1824 unsigned long start,
1825 unsigned long end_data,
1826 unsigned long start_data)
1827{
1828 if (rlim < RLIM_INFINITY) {
1829 if (((new - start) + (end_data - start_data)) > rlim)
1830 return -ENOSPC;
1831 }
1832
1833 return 0;
1834}
1835
1836extern int mm_take_all_locks(struct mm_struct *mm);
1837extern void mm_drop_all_locks(struct mm_struct *mm);
1838
1839extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
1840extern struct file *get_mm_exe_file(struct mm_struct *mm);
1841
1842extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
1843extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
1844 unsigned long addr, unsigned long len,
1845 unsigned long flags,
1846 const struct vm_special_mapping *spec);
1847
1848extern int install_special_mapping(struct mm_struct *mm,
1849 unsigned long addr, unsigned long len,
1850 unsigned long flags, struct page **pages);
1851
1852extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
1853
1854extern unsigned long mmap_region(struct file *file, unsigned long addr,
1855 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
1856extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
1857 unsigned long len, unsigned long prot, unsigned long flags,
1858 unsigned long pgoff, unsigned long *populate);
1859extern int do_munmap(struct mm_struct *, unsigned long, size_t);
1860
1861#ifdef CONFIG_MMU
1862extern int __mm_populate(unsigned long addr, unsigned long len,
1863 int ignore_errors);
1864static inline void mm_populate(unsigned long addr, unsigned long len)
1865{
1866
1867 (void) __mm_populate(addr, len, 1);
1868}
1869#else
1870static inline void mm_populate(unsigned long addr, unsigned long len) {}
1871#endif
1872
1873
1874extern unsigned long vm_brk(unsigned long, unsigned long);
1875extern int vm_munmap(unsigned long, size_t);
1876extern unsigned long vm_mmap(struct file *, unsigned long,
1877 unsigned long, unsigned long,
1878 unsigned long, unsigned long);
1879
1880struct vm_unmapped_area_info {
1881#define VM_UNMAPPED_AREA_TOPDOWN 1
1882 unsigned long flags;
1883 unsigned long length;
1884 unsigned long low_limit;
1885 unsigned long high_limit;
1886 unsigned long align_mask;
1887 unsigned long align_offset;
1888};
1889
1890extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
1891extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902static inline unsigned long
1903vm_unmapped_area(struct vm_unmapped_area_info *info)
1904{
1905 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
1906 return unmapped_area(info);
1907 else
1908 return unmapped_area_topdown(info);
1909}
1910
1911
1912extern void truncate_inode_pages(struct address_space *, loff_t);
1913extern void truncate_inode_pages_range(struct address_space *,
1914 loff_t lstart, loff_t lend);
1915extern void truncate_inode_pages_final(struct address_space *);
1916
1917
1918extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
1919extern void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf);
1920extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
1921
1922
1923int write_one_page(struct page *page, int wait);
1924void task_dirty_inc(struct task_struct *tsk);
1925
1926
1927#define VM_MAX_READAHEAD 128
1928#define VM_MIN_READAHEAD 16
1929
1930int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
1931 pgoff_t offset, unsigned long nr_to_read);
1932
1933void page_cache_sync_readahead(struct address_space *mapping,
1934 struct file_ra_state *ra,
1935 struct file *filp,
1936 pgoff_t offset,
1937 unsigned long size);
1938
1939void page_cache_async_readahead(struct address_space *mapping,
1940 struct file_ra_state *ra,
1941 struct file *filp,
1942 struct page *pg,
1943 pgoff_t offset,
1944 unsigned long size);
1945
1946unsigned long max_sane_readahead(unsigned long nr);
1947
1948
1949extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
1950
1951
1952extern int expand_downwards(struct vm_area_struct *vma,
1953 unsigned long address);
1954#if VM_GROWSUP
1955extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
1956#else
1957 #define expand_upwards(vma, address) (0)
1958#endif
1959
1960
1961extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
1962extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
1963 struct vm_area_struct **pprev);
1964
1965
1966
1967static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
1968{
1969 struct vm_area_struct * vma = find_vma(mm,start_addr);
1970
1971 if (vma && end_addr <= vma->vm_start)
1972 vma = NULL;
1973 return vma;
1974}
1975
1976static inline unsigned long vma_pages(struct vm_area_struct *vma)
1977{
1978 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
1979}
1980
1981
1982static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
1983 unsigned long vm_start, unsigned long vm_end)
1984{
1985 struct vm_area_struct *vma = find_vma(mm, vm_start);
1986
1987 if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
1988 vma = NULL;
1989
1990 return vma;
1991}
1992
1993#ifdef CONFIG_MMU
1994pgprot_t vm_get_page_prot(unsigned long vm_flags);
1995void vma_set_page_prot(struct vm_area_struct *vma);
1996#else
1997static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
1998{
1999 return __pgprot(0);
2000}
2001static inline void vma_set_page_prot(struct vm_area_struct *vma)
2002{
2003 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2004}
2005#endif
2006
2007#ifdef CONFIG_NUMA_BALANCING
2008unsigned long change_prot_numa(struct vm_area_struct *vma,
2009 unsigned long start, unsigned long end);
2010#endif
2011
2012struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
2013int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
2014 unsigned long pfn, unsigned long size, pgprot_t);
2015int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
2016int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2017 unsigned long pfn);
2018int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2019 unsigned long pfn);
2020int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
2021
2022
2023struct page *follow_page_mask(struct vm_area_struct *vma,
2024 unsigned long address, unsigned int foll_flags,
2025 unsigned int *page_mask);
2026
2027static inline struct page *follow_page(struct vm_area_struct *vma,
2028 unsigned long address, unsigned int foll_flags)
2029{
2030 unsigned int unused_page_mask;
2031 return follow_page_mask(vma, address, foll_flags, &unused_page_mask);
2032}
2033
2034#define FOLL_WRITE 0x01
2035#define FOLL_TOUCH 0x02
2036#define FOLL_GET 0x04
2037#define FOLL_DUMP 0x08
2038#define FOLL_FORCE 0x10
2039#define FOLL_NOWAIT 0x20
2040
2041#define FOLL_MLOCK 0x40
2042#define FOLL_SPLIT 0x80
2043#define FOLL_HWPOISON 0x100
2044#define FOLL_NUMA 0x200
2045#define FOLL_MIGRATION 0x400
2046#define FOLL_TRIED 0x800
2047
2048typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
2049 void *data);
2050extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
2051 unsigned long size, pte_fn_t fn, void *data);
2052
2053#ifdef CONFIG_PROC_FS
2054void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
2055#else
2056static inline void vm_stat_account(struct mm_struct *mm,
2057 unsigned long flags, struct file *file, long pages)
2058{
2059 mm->total_vm += pages;
2060}
2061#endif
2062
2063#ifdef CONFIG_DEBUG_PAGEALLOC
2064extern bool _debug_pagealloc_enabled;
2065extern void __kernel_map_pages(struct page *page, int numpages, int enable);
2066
2067static inline bool debug_pagealloc_enabled(void)
2068{
2069 return _debug_pagealloc_enabled;
2070}
2071
2072static inline void
2073kernel_map_pages(struct page *page, int numpages, int enable)
2074{
2075 if (!debug_pagealloc_enabled())
2076 return;
2077
2078 __kernel_map_pages(page, numpages, enable);
2079}
2080#ifdef CONFIG_HIBERNATION
2081extern bool kernel_page_present(struct page *page);
2082#endif
2083#else
2084static inline void
2085kernel_map_pages(struct page *page, int numpages, int enable) {}
2086#ifdef CONFIG_HIBERNATION
2087static inline bool kernel_page_present(struct page *page) { return true; }
2088#endif
2089#endif
2090
2091#ifdef __HAVE_ARCH_GATE_AREA
2092extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
2093extern int in_gate_area_no_mm(unsigned long addr);
2094extern int in_gate_area(struct mm_struct *mm, unsigned long addr);
2095#else
2096static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
2097{
2098 return NULL;
2099}
2100static inline int in_gate_area_no_mm(unsigned long addr) { return 0; }
2101static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
2102{
2103 return 0;
2104}
2105#endif
2106
2107#ifdef CONFIG_SYSCTL
2108extern int sysctl_drop_caches;
2109int drop_caches_sysctl_handler(struct ctl_table *, int,
2110 void __user *, size_t *, loff_t *);
2111#endif
2112
2113unsigned long shrink_node_slabs(gfp_t gfp_mask, int nid,
2114 unsigned long nr_scanned,
2115 unsigned long nr_eligible);
2116
2117#ifndef CONFIG_MMU
2118#define randomize_va_space 0
2119#else
2120extern int randomize_va_space;
2121#endif
2122
2123const char * arch_vma_name(struct vm_area_struct *vma);
2124void print_vma_addr(char *prefix, unsigned long rip);
2125
2126void sparse_mem_maps_populate_node(struct page **map_map,
2127 unsigned long pnum_begin,
2128 unsigned long pnum_end,
2129 unsigned long map_count,
2130 int nodeid);
2131
2132struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
2133pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
2134pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
2135pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
2136pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
2137void *vmemmap_alloc_block(unsigned long size, int node);
2138void *vmemmap_alloc_block_buf(unsigned long size, int node);
2139void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
2140int vmemmap_populate_basepages(unsigned long start, unsigned long end,
2141 int node);
2142int vmemmap_populate(unsigned long start, unsigned long end, int node);
2143void vmemmap_populate_print_last(void);
2144#ifdef CONFIG_MEMORY_HOTPLUG
2145void vmemmap_free(unsigned long start, unsigned long end);
2146#endif
2147void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
2148 unsigned long size);
2149
2150enum mf_flags {
2151 MF_COUNT_INCREASED = 1 << 0,
2152 MF_ACTION_REQUIRED = 1 << 1,
2153 MF_MUST_KILL = 1 << 2,
2154 MF_SOFT_OFFLINE = 1 << 3,
2155};
2156extern int memory_failure(unsigned long pfn, int trapno, int flags);
2157extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
2158extern int unpoison_memory(unsigned long pfn);
2159extern int sysctl_memory_failure_early_kill;
2160extern int sysctl_memory_failure_recovery;
2161extern void shake_page(struct page *p, int access);
2162extern atomic_long_t num_poisoned_pages;
2163extern int soft_offline_page(struct page *page, int flags);
2164
2165#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
2166extern void clear_huge_page(struct page *page,
2167 unsigned long addr,
2168 unsigned int pages_per_huge_page);
2169extern void copy_user_huge_page(struct page *dst, struct page *src,
2170 unsigned long addr, struct vm_area_struct *vma,
2171 unsigned int pages_per_huge_page);
2172#endif
2173
2174extern struct page_ext_operations debug_guardpage_ops;
2175extern struct page_ext_operations page_poisoning_ops;
2176
2177#ifdef CONFIG_DEBUG_PAGEALLOC
2178extern unsigned int _debug_guardpage_minorder;
2179extern bool _debug_guardpage_enabled;
2180
2181static inline unsigned int debug_guardpage_minorder(void)
2182{
2183 return _debug_guardpage_minorder;
2184}
2185
2186static inline bool debug_guardpage_enabled(void)
2187{
2188 return _debug_guardpage_enabled;
2189}
2190
2191static inline bool page_is_guard(struct page *page)
2192{
2193 struct page_ext *page_ext;
2194
2195 if (!debug_guardpage_enabled())
2196 return false;
2197
2198 page_ext = lookup_page_ext(page);
2199 return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
2200}
2201#else
2202static inline unsigned int debug_guardpage_minorder(void) { return 0; }
2203static inline bool debug_guardpage_enabled(void) { return false; }
2204static inline bool page_is_guard(struct page *page) { return false; }
2205#endif
2206
2207#if MAX_NUMNODES > 1
2208void __init setup_nr_node_ids(void);
2209#else
2210static inline void setup_nr_node_ids(void) {}
2211#endif
2212
2213#endif
2214#endif
2215