1#ifndef _LINUX_MM_H
2#define _LINUX_MM_H
3
4#include <linux/errno.h>
5
6#ifdef __KERNEL__
7
8#include <linux/mmdebug.h>
9#include <linux/gfp.h>
10#include <linux/bug.h>
11#include <linux/list.h>
12#include <linux/mmzone.h>
13#include <linux/rbtree.h>
14#include <linux/atomic.h>
15#include <linux/debug_locks.h>
16#include <linux/mm_types.h>
17#include <linux/range.h>
18#include <linux/pfn.h>
19#include <linux/percpu-refcount.h>
20#include <linux/bit_spinlock.h>
21#include <linux/shrinker.h>
22#include <linux/resource.h>
23#include <linux/page_ext.h>
24#include <linux/err.h>
25
26struct mempolicy;
27struct anon_vma;
28struct anon_vma_chain;
29struct file_ra_state;
30struct user_struct;
31struct writeback_control;
32struct bdi_writeback;
33
34#ifndef CONFIG_NEED_MULTIPLE_NODES
35extern unsigned long max_mapnr;
36
37static inline void set_max_mapnr(unsigned long limit)
38{
39 max_mapnr = limit;
40}
41#else
42static inline void set_max_mapnr(unsigned long limit) { }
43#endif
44
45extern unsigned long totalram_pages;
46extern void * high_memory;
47extern int page_cluster;
48
49#ifdef CONFIG_SYSCTL
50extern int sysctl_legacy_va_layout;
51#else
52#define sysctl_legacy_va_layout 0
53#endif
54
55#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
56extern const int mmap_rnd_bits_min;
57extern const int mmap_rnd_bits_max;
58extern int mmap_rnd_bits __read_mostly;
59#endif
60#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
61extern const int mmap_rnd_compat_bits_min;
62extern const int mmap_rnd_compat_bits_max;
63extern int mmap_rnd_compat_bits __read_mostly;
64#endif
65
66#include <asm/page.h>
67#include <asm/pgtable.h>
68#include <asm/processor.h>
69
70#ifndef __pa_symbol
71#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
72#endif
73
74
75
76
77
78
79
80
81#ifndef mm_forbids_zeropage
82#define mm_forbids_zeropage(X) (0)
83#endif
84
85extern unsigned long sysctl_user_reserve_kbytes;
86extern unsigned long sysctl_admin_reserve_kbytes;
87
88extern int sysctl_overcommit_memory;
89extern int sysctl_overcommit_ratio;
90extern unsigned long sysctl_overcommit_kbytes;
91
92extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *,
93 size_t *, loff_t *);
94extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
95 size_t *, loff_t *);
96
97#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
98
99
100#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
101
102
103#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)addr, PAGE_SIZE)
104
105
106
107
108
109
110
111
112
113
114extern struct kmem_cache *vm_area_cachep;
115
116#ifndef CONFIG_MMU
117extern struct rb_root nommu_region_tree;
118extern struct rw_semaphore nommu_region_sem;
119
120extern unsigned int kobjsize(const void *objp);
121#endif
122
123
124
125
126#define VM_NONE 0x00000000
127
128#define VM_READ 0x00000001
129#define VM_WRITE 0x00000002
130#define VM_EXEC 0x00000004
131#define VM_SHARED 0x00000008
132
133
134#define VM_MAYREAD 0x00000010
135#define VM_MAYWRITE 0x00000020
136#define VM_MAYEXEC 0x00000040
137#define VM_MAYSHARE 0x00000080
138
139#define VM_GROWSDOWN 0x00000100
140#define VM_UFFD_MISSING 0x00000200
141#define VM_PFNMAP 0x00000400
142#define VM_DENYWRITE 0x00000800
143#define VM_UFFD_WP 0x00001000
144
145#define VM_LOCKED 0x00002000
146#define VM_IO 0x00004000
147
148
149#define VM_SEQ_READ 0x00008000
150#define VM_RAND_READ 0x00010000
151
152#define VM_DONTCOPY 0x00020000
153#define VM_DONTEXPAND 0x00040000
154#define VM_LOCKONFAULT 0x00080000
155#define VM_ACCOUNT 0x00100000
156#define VM_NORESERVE 0x00200000
157#define VM_HUGETLB 0x00400000
158#define VM_ARCH_1 0x01000000
159#define VM_ARCH_2 0x02000000
160#define VM_DONTDUMP 0x04000000
161
162#ifdef CONFIG_MEM_SOFT_DIRTY
163# define VM_SOFTDIRTY 0x08000000
164#else
165# define VM_SOFTDIRTY 0
166#endif
167
168#define VM_MIXEDMAP 0x10000000
169#define VM_HUGEPAGE 0x20000000
170#define VM_NOHUGEPAGE 0x40000000
171#define VM_MERGEABLE 0x80000000
172
173#if defined(CONFIG_X86)
174# define VM_PAT VM_ARCH_1
175#elif defined(CONFIG_PPC)
176# define VM_SAO VM_ARCH_1
177#elif defined(CONFIG_PARISC)
178# define VM_GROWSUP VM_ARCH_1
179#elif defined(CONFIG_METAG)
180# define VM_GROWSUP VM_ARCH_1
181#elif defined(CONFIG_IA64)
182# define VM_GROWSUP VM_ARCH_1
183#elif !defined(CONFIG_MMU)
184# define VM_MAPPED_COPY VM_ARCH_1
185#endif
186
187#if defined(CONFIG_X86)
188
189# define VM_MPX VM_ARCH_2
190#endif
191
192#ifndef VM_GROWSUP
193# define VM_GROWSUP VM_NONE
194#endif
195
196
197#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ)
198
199#ifndef VM_STACK_DEFAULT_FLAGS
200#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
201#endif
202
203#ifdef CONFIG_STACK_GROWSUP
204#define VM_STACK VM_GROWSUP
205#else
206#define VM_STACK VM_GROWSDOWN
207#endif
208
209#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
210
211
212
213
214
215#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
216
217
218#define VM_INIT_DEF_MASK VM_NOHUGEPAGE
219
220
221#define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT))
222
223
224
225
226
227extern pgprot_t protection_map[16];
228
229#define FAULT_FLAG_WRITE 0x01
230#define FAULT_FLAG_MKWRITE 0x02
231#define FAULT_FLAG_ALLOW_RETRY 0x04
232#define FAULT_FLAG_RETRY_NOWAIT 0x08
233#define FAULT_FLAG_KILLABLE 0x10
234#define FAULT_FLAG_TRIED 0x20
235#define FAULT_FLAG_USER 0x40
236
237
238
239
240
241
242
243
244
245
246
247struct vm_fault {
248 unsigned int flags;
249 gfp_t gfp_mask;
250 pgoff_t pgoff;
251 void __user *virtual_address;
252
253 struct page *cow_page;
254 struct page *page;
255
256
257
258
259
260 pgoff_t max_pgoff;
261
262 pte_t *pte;
263};
264
265
266
267
268
269
270struct vm_operations_struct {
271 void (*open)(struct vm_area_struct * area);
272 void (*close)(struct vm_area_struct * area);
273 int (*mremap)(struct vm_area_struct * area);
274 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
275 int (*pmd_fault)(struct vm_area_struct *, unsigned long address,
276 pmd_t *, unsigned int flags);
277 void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf);
278
279
280
281 int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
282
283
284 int (*pfn_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
285
286
287
288
289 int (*access)(struct vm_area_struct *vma, unsigned long addr,
290 void *buf, int len, int write);
291
292
293
294
295 const char *(*name)(struct vm_area_struct *vma);
296
297#ifdef CONFIG_NUMA
298
299
300
301
302
303
304
305 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
306
307
308
309
310
311
312
313
314
315
316
317 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
318 unsigned long addr);
319#endif
320
321
322
323
324
325 struct page *(*find_special_page)(struct vm_area_struct *vma,
326 unsigned long addr);
327};
328
329struct mmu_gather;
330struct inode;
331
332#define page_private(page) ((page)->private)
333#define set_page_private(page, v) ((page)->private = (v))
334
335#if !defined(__HAVE_ARCH_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
336static inline int pmd_devmap(pmd_t pmd)
337{
338 return 0;
339}
340#endif
341
342
343
344
345
346#include <linux/page-flags.h>
347#include <linux/huge_mm.h>
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365static inline int put_page_testzero(struct page *page)
366{
367 VM_BUG_ON_PAGE(atomic_read(&page->_count) == 0, page);
368 return atomic_dec_and_test(&page->_count);
369}
370
371
372
373
374
375
376
377static inline int get_page_unless_zero(struct page *page)
378{
379 return atomic_inc_not_zero(&page->_count);
380}
381
382extern int page_is_ram(unsigned long pfn);
383
384enum {
385 REGION_INTERSECTS,
386 REGION_DISJOINT,
387 REGION_MIXED,
388};
389
390int region_intersects(resource_size_t offset, size_t size, const char *type);
391
392
393struct page *vmalloc_to_page(const void *addr);
394unsigned long vmalloc_to_pfn(const void *addr);
395
396
397
398
399
400
401
402static inline int is_vmalloc_addr(const void *x)
403{
404#ifdef CONFIG_MMU
405 unsigned long addr = (unsigned long)x;
406
407 return addr >= VMALLOC_START && addr < VMALLOC_END;
408#else
409 return 0;
410#endif
411}
412#ifdef CONFIG_MMU
413extern int is_vmalloc_or_module_addr(const void *x);
414#else
415static inline int is_vmalloc_or_module_addr(const void *x)
416{
417 return 0;
418}
419#endif
420
421extern void kvfree(const void *addr);
422
423static inline atomic_t *compound_mapcount_ptr(struct page *page)
424{
425 return &page[1].compound_mapcount;
426}
427
428static inline int compound_mapcount(struct page *page)
429{
430 if (!PageCompound(page))
431 return 0;
432 page = compound_head(page);
433 return atomic_read(compound_mapcount_ptr(page)) + 1;
434}
435
436
437
438
439
440
441static inline void page_mapcount_reset(struct page *page)
442{
443 atomic_set(&(page)->_mapcount, -1);
444}
445
446int __page_mapcount(struct page *page);
447
448static inline int page_mapcount(struct page *page)
449{
450 VM_BUG_ON_PAGE(PageSlab(page), page);
451
452 if (unlikely(PageCompound(page)))
453 return __page_mapcount(page);
454 return atomic_read(&page->_mapcount) + 1;
455}
456
457#ifdef CONFIG_TRANSPARENT_HUGEPAGE
458int total_mapcount(struct page *page);
459#else
460static inline int total_mapcount(struct page *page)
461{
462 return page_mapcount(page);
463}
464#endif
465
466static inline int page_count(struct page *page)
467{
468 return atomic_read(&compound_head(page)->_count);
469}
470
471static inline struct page *virt_to_head_page(const void *x)
472{
473 struct page *page = virt_to_page(x);
474
475 return compound_head(page);
476}
477
478
479
480
481
482static inline void init_page_count(struct page *page)
483{
484 atomic_set(&page->_count, 1);
485}
486
487void __put_page(struct page *page);
488
489void put_pages_list(struct list_head *pages);
490
491void split_page(struct page *page, unsigned int order);
492int split_free_page(struct page *page);
493
494
495
496
497
498
499typedef void compound_page_dtor(struct page *);
500
501
502enum compound_dtor_id {
503 NULL_COMPOUND_DTOR,
504 COMPOUND_PAGE_DTOR,
505#ifdef CONFIG_HUGETLB_PAGE
506 HUGETLB_PAGE_DTOR,
507#endif
508#ifdef CONFIG_TRANSPARENT_HUGEPAGE
509 TRANSHUGE_PAGE_DTOR,
510#endif
511 NR_COMPOUND_DTORS,
512};
513extern compound_page_dtor * const compound_page_dtors[];
514
515static inline void set_compound_page_dtor(struct page *page,
516 enum compound_dtor_id compound_dtor)
517{
518 VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page);
519 page[1].compound_dtor = compound_dtor;
520}
521
522static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
523{
524 VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page);
525 return compound_page_dtors[page[1].compound_dtor];
526}
527
528static inline unsigned int compound_order(struct page *page)
529{
530 if (!PageHead(page))
531 return 0;
532 return page[1].compound_order;
533}
534
535static inline void set_compound_order(struct page *page, unsigned int order)
536{
537 page[1].compound_order = order;
538}
539
540void free_compound_page(struct page *page);
541
542#ifdef CONFIG_MMU
543
544
545
546
547
548
549static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
550{
551 if (likely(vma->vm_flags & VM_WRITE))
552 pte = pte_mkwrite(pte);
553 return pte;
554}
555
556void do_set_pte(struct vm_area_struct *vma, unsigned long address,
557 struct page *page, pte_t *pte, bool write, bool anon);
558#endif
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
627#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
628#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
629#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
630
631
632
633
634
635
636#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
637#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
638#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
639#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
640
641
642#ifdef NODE_NOT_IN_PAGE_FLAGS
643#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
644#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \
645 SECTIONS_PGOFF : ZONES_PGOFF)
646#else
647#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
648#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \
649 NODES_PGOFF : ZONES_PGOFF)
650#endif
651
652#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
653
654#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
655#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
656#endif
657
658#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
659#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
660#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
661#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
662#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
663
664static inline enum zone_type page_zonenum(const struct page *page)
665{
666 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
667}
668
669#ifdef CONFIG_ZONE_DEVICE
670void get_zone_device_page(struct page *page);
671void put_zone_device_page(struct page *page);
672static inline bool is_zone_device_page(const struct page *page)
673{
674 return page_zonenum(page) == ZONE_DEVICE;
675}
676#else
677static inline void get_zone_device_page(struct page *page)
678{
679}
680static inline void put_zone_device_page(struct page *page)
681{
682}
683static inline bool is_zone_device_page(const struct page *page)
684{
685 return false;
686}
687#endif
688
689static inline void get_page(struct page *page)
690{
691 page = compound_head(page);
692
693
694
695
696 VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page);
697 atomic_inc(&page->_count);
698
699 if (unlikely(is_zone_device_page(page)))
700 get_zone_device_page(page);
701}
702
703static inline void put_page(struct page *page)
704{
705 page = compound_head(page);
706
707 if (put_page_testzero(page))
708 __put_page(page);
709
710 if (unlikely(is_zone_device_page(page)))
711 put_zone_device_page(page);
712}
713
714#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
715#define SECTION_IN_PAGE_FLAGS
716#endif
717
718
719
720
721
722
723
724
725
726static inline int page_zone_id(struct page *page)
727{
728 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
729}
730
731static inline int zone_to_nid(struct zone *zone)
732{
733#ifdef CONFIG_NUMA
734 return zone->node;
735#else
736 return 0;
737#endif
738}
739
740#ifdef NODE_NOT_IN_PAGE_FLAGS
741extern int page_to_nid(const struct page *page);
742#else
743static inline int page_to_nid(const struct page *page)
744{
745 return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
746}
747#endif
748
749#ifdef CONFIG_NUMA_BALANCING
750static inline int cpu_pid_to_cpupid(int cpu, int pid)
751{
752 return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
753}
754
755static inline int cpupid_to_pid(int cpupid)
756{
757 return cpupid & LAST__PID_MASK;
758}
759
760static inline int cpupid_to_cpu(int cpupid)
761{
762 return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
763}
764
765static inline int cpupid_to_nid(int cpupid)
766{
767 return cpu_to_node(cpupid_to_cpu(cpupid));
768}
769
770static inline bool cpupid_pid_unset(int cpupid)
771{
772 return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
773}
774
775static inline bool cpupid_cpu_unset(int cpupid)
776{
777 return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
778}
779
780static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
781{
782 return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
783}
784
785#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
786#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
787static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
788{
789 return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
790}
791
792static inline int page_cpupid_last(struct page *page)
793{
794 return page->_last_cpupid;
795}
796static inline void page_cpupid_reset_last(struct page *page)
797{
798 page->_last_cpupid = -1 & LAST_CPUPID_MASK;
799}
800#else
801static inline int page_cpupid_last(struct page *page)
802{
803 return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
804}
805
806extern int page_cpupid_xchg_last(struct page *page, int cpupid);
807
808static inline void page_cpupid_reset_last(struct page *page)
809{
810 int cpupid = (1 << LAST_CPUPID_SHIFT) - 1;
811
812 page->flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
813 page->flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
814}
815#endif
816#else
817static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
818{
819 return page_to_nid(page);
820}
821
822static inline int page_cpupid_last(struct page *page)
823{
824 return page_to_nid(page);
825}
826
827static inline int cpupid_to_nid(int cpupid)
828{
829 return -1;
830}
831
832static inline int cpupid_to_pid(int cpupid)
833{
834 return -1;
835}
836
837static inline int cpupid_to_cpu(int cpupid)
838{
839 return -1;
840}
841
842static inline int cpu_pid_to_cpupid(int nid, int pid)
843{
844 return -1;
845}
846
847static inline bool cpupid_pid_unset(int cpupid)
848{
849 return 1;
850}
851
852static inline void page_cpupid_reset_last(struct page *page)
853{
854}
855
856static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
857{
858 return false;
859}
860#endif
861
862static inline struct zone *page_zone(const struct page *page)
863{
864 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
865}
866
867#ifdef SECTION_IN_PAGE_FLAGS
868static inline void set_page_section(struct page *page, unsigned long section)
869{
870 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
871 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
872}
873
874static inline unsigned long page_to_section(const struct page *page)
875{
876 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
877}
878#endif
879
880static inline void set_page_zone(struct page *page, enum zone_type zone)
881{
882 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
883 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
884}
885
886static inline void set_page_node(struct page *page, unsigned long node)
887{
888 page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
889 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
890}
891
892static inline void set_page_links(struct page *page, enum zone_type zone,
893 unsigned long node, unsigned long pfn)
894{
895 set_page_zone(page, zone);
896 set_page_node(page, node);
897#ifdef SECTION_IN_PAGE_FLAGS
898 set_page_section(page, pfn_to_section_nr(pfn));
899#endif
900}
901
902#ifdef CONFIG_MEMCG
903static inline struct mem_cgroup *page_memcg(struct page *page)
904{
905 return page->mem_cgroup;
906}
907
908static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg)
909{
910 page->mem_cgroup = memcg;
911}
912#else
913static inline struct mem_cgroup *page_memcg(struct page *page)
914{
915 return NULL;
916}
917
918static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg)
919{
920}
921#endif
922
923
924
925
926#include <linux/vmstat.h>
927
928static __always_inline void *lowmem_page_address(const struct page *page)
929{
930 return __va(PFN_PHYS(page_to_pfn(page)));
931}
932
933#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
934#define HASHED_PAGE_VIRTUAL
935#endif
936
937#if defined(WANT_PAGE_VIRTUAL)
938static inline void *page_address(const struct page *page)
939{
940 return page->virtual;
941}
942static inline void set_page_address(struct page *page, void *address)
943{
944 page->virtual = address;
945}
946#define page_address_init() do { } while(0)
947#endif
948
949#if defined(HASHED_PAGE_VIRTUAL)
950void *page_address(const struct page *page);
951void set_page_address(struct page *page, void *virtual);
952void page_address_init(void);
953#endif
954
955#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
956#define page_address(page) lowmem_page_address(page)
957#define set_page_address(page, address) do { } while(0)
958#define page_address_init() do { } while(0)
959#endif
960
961extern void *page_rmapping(struct page *page);
962extern struct anon_vma *page_anon_vma(struct page *page);
963extern struct address_space *page_mapping(struct page *page);
964
965extern struct address_space *__page_file_mapping(struct page *);
966
967static inline
968struct address_space *page_file_mapping(struct page *page)
969{
970 if (unlikely(PageSwapCache(page)))
971 return __page_file_mapping(page);
972
973 return page->mapping;
974}
975
976
977
978
979
980static inline pgoff_t page_index(struct page *page)
981{
982 if (unlikely(PageSwapCache(page)))
983 return page_private(page);
984 return page->index;
985}
986
987extern pgoff_t __page_file_index(struct page *page);
988
989
990
991
992
993static inline pgoff_t page_file_index(struct page *page)
994{
995 if (unlikely(PageSwapCache(page)))
996 return __page_file_index(page);
997
998 return page->index;
999}
1000
1001
1002
1003
1004
1005static inline bool page_mapped(struct page *page)
1006{
1007 int i;
1008 if (likely(!PageCompound(page)))
1009 return atomic_read(&page->_mapcount) >= 0;
1010 page = compound_head(page);
1011 if (atomic_read(compound_mapcount_ptr(page)) >= 0)
1012 return true;
1013 for (i = 0; i < hpage_nr_pages(page); i++) {
1014 if (atomic_read(&page[i]._mapcount) >= 0)
1015 return true;
1016 }
1017 return false;
1018}
1019
1020
1021
1022
1023
1024
1025static inline bool page_is_pfmemalloc(struct page *page)
1026{
1027
1028
1029
1030
1031 return page->index == -1UL;
1032}
1033
1034
1035
1036
1037
1038static inline void set_page_pfmemalloc(struct page *page)
1039{
1040 page->index = -1UL;
1041}
1042
1043static inline void clear_page_pfmemalloc(struct page *page)
1044{
1045 page->index = 0;
1046}
1047
1048
1049
1050
1051
1052
1053
1054#define VM_FAULT_MINOR 0
1055
1056#define VM_FAULT_OOM 0x0001
1057#define VM_FAULT_SIGBUS 0x0002
1058#define VM_FAULT_MAJOR 0x0004
1059#define VM_FAULT_WRITE 0x0008
1060#define VM_FAULT_HWPOISON 0x0010
1061#define VM_FAULT_HWPOISON_LARGE 0x0020
1062#define VM_FAULT_SIGSEGV 0x0040
1063
1064#define VM_FAULT_NOPAGE 0x0100
1065#define VM_FAULT_LOCKED 0x0200
1066#define VM_FAULT_RETRY 0x0400
1067#define VM_FAULT_FALLBACK 0x0800
1068
1069#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000
1070
1071#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \
1072 VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \
1073 VM_FAULT_FALLBACK)
1074
1075
1076#define VM_FAULT_SET_HINDEX(x) ((x) << 12)
1077#define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf)
1078
1079
1080
1081
1082extern void pagefault_out_of_memory(void);
1083
1084#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
1085
1086
1087
1088
1089
1090#define SHOW_MEM_FILTER_NODES (0x0001u)
1091
1092extern void show_free_areas(unsigned int flags);
1093extern bool skip_free_areas_node(unsigned int flags, int nid);
1094
1095int shmem_zero_setup(struct vm_area_struct *);
1096#ifdef CONFIG_SHMEM
1097bool shmem_mapping(struct address_space *mapping);
1098#else
1099static inline bool shmem_mapping(struct address_space *mapping)
1100{
1101 return false;
1102}
1103#endif
1104
1105extern bool can_do_mlock(void);
1106extern int user_shm_lock(size_t, struct user_struct *);
1107extern void user_shm_unlock(size_t, struct user_struct *);
1108
1109
1110
1111
1112struct zap_details {
1113 struct address_space *check_mapping;
1114 pgoff_t first_index;
1115 pgoff_t last_index;
1116};
1117
1118struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
1119 pte_t pte);
1120
1121int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1122 unsigned long size);
1123void zap_page_range(struct vm_area_struct *vma, unsigned long address,
1124 unsigned long size, struct zap_details *);
1125void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
1126 unsigned long start, unsigned long end);
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148struct mm_walk {
1149 int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
1150 unsigned long next, struct mm_walk *walk);
1151 int (*pte_entry)(pte_t *pte, unsigned long addr,
1152 unsigned long next, struct mm_walk *walk);
1153 int (*pte_hole)(unsigned long addr, unsigned long next,
1154 struct mm_walk *walk);
1155 int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
1156 unsigned long addr, unsigned long next,
1157 struct mm_walk *walk);
1158 int (*test_walk)(unsigned long addr, unsigned long next,
1159 struct mm_walk *walk);
1160 struct mm_struct *mm;
1161 struct vm_area_struct *vma;
1162 void *private;
1163};
1164
1165int walk_page_range(unsigned long addr, unsigned long end,
1166 struct mm_walk *walk);
1167int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk);
1168void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
1169 unsigned long end, unsigned long floor, unsigned long ceiling);
1170int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
1171 struct vm_area_struct *vma);
1172void unmap_mapping_range(struct address_space *mapping,
1173 loff_t const holebegin, loff_t const holelen, int even_cows);
1174int follow_pfn(struct vm_area_struct *vma, unsigned long address,
1175 unsigned long *pfn);
1176int follow_phys(struct vm_area_struct *vma, unsigned long address,
1177 unsigned int flags, unsigned long *prot, resource_size_t *phys);
1178int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
1179 void *buf, int len, int write);
1180
1181static inline void unmap_shared_mapping_range(struct address_space *mapping,
1182 loff_t const holebegin, loff_t const holelen)
1183{
1184 unmap_mapping_range(mapping, holebegin, holelen, 0);
1185}
1186
1187extern void truncate_pagecache(struct inode *inode, loff_t new);
1188extern void truncate_setsize(struct inode *inode, loff_t newsize);
1189void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
1190void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
1191int truncate_inode_page(struct address_space *mapping, struct page *page);
1192int generic_error_remove_page(struct address_space *mapping, struct page *page);
1193int invalidate_inode_page(struct page *page);
1194
1195#ifdef CONFIG_MMU
1196extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
1197 unsigned long address, unsigned int flags);
1198extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
1199 unsigned long address, unsigned int fault_flags,
1200 bool *unlocked);
1201#else
1202static inline int handle_mm_fault(struct mm_struct *mm,
1203 struct vm_area_struct *vma, unsigned long address,
1204 unsigned int flags)
1205{
1206
1207 BUG();
1208 return VM_FAULT_SIGBUS;
1209}
1210static inline int fixup_user_fault(struct task_struct *tsk,
1211 struct mm_struct *mm, unsigned long address,
1212 unsigned int fault_flags, bool *unlocked)
1213{
1214
1215 BUG();
1216 return -EFAULT;
1217}
1218#endif
1219
1220extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
1221extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1222 void *buf, int len, int write);
1223
1224long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1225 unsigned long start, unsigned long nr_pages,
1226 unsigned int foll_flags, struct page **pages,
1227 struct vm_area_struct **vmas, int *nonblocking);
1228long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1229 unsigned long start, unsigned long nr_pages,
1230 int write, int force, struct page **pages,
1231 struct vm_area_struct **vmas);
1232long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm,
1233 unsigned long start, unsigned long nr_pages,
1234 int write, int force, struct page **pages,
1235 int *locked);
1236long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
1237 unsigned long start, unsigned long nr_pages,
1238 int write, int force, struct page **pages,
1239 unsigned int gup_flags);
1240long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
1241 unsigned long start, unsigned long nr_pages,
1242 int write, int force, struct page **pages);
1243int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1244 struct page **pages);
1245
1246
1247struct frame_vector {
1248 unsigned int nr_allocated;
1249 unsigned int nr_frames;
1250 bool got_ref;
1251 bool is_pfns;
1252 void *ptrs[0];
1253
1254
1255};
1256
1257struct frame_vector *frame_vector_create(unsigned int nr_frames);
1258void frame_vector_destroy(struct frame_vector *vec);
1259int get_vaddr_frames(unsigned long start, unsigned int nr_pfns,
1260 bool write, bool force, struct frame_vector *vec);
1261void put_vaddr_frames(struct frame_vector *vec);
1262int frame_vector_to_pages(struct frame_vector *vec);
1263void frame_vector_to_pfns(struct frame_vector *vec);
1264
1265static inline unsigned int frame_vector_count(struct frame_vector *vec)
1266{
1267 return vec->nr_frames;
1268}
1269
1270static inline struct page **frame_vector_pages(struct frame_vector *vec)
1271{
1272 if (vec->is_pfns) {
1273 int err = frame_vector_to_pages(vec);
1274
1275 if (err)
1276 return ERR_PTR(err);
1277 }
1278 return (struct page **)(vec->ptrs);
1279}
1280
1281static inline unsigned long *frame_vector_pfns(struct frame_vector *vec)
1282{
1283 if (!vec->is_pfns)
1284 frame_vector_to_pfns(vec);
1285 return (unsigned long *)(vec->ptrs);
1286}
1287
1288struct kvec;
1289int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
1290 struct page **pages);
1291int get_kernel_page(unsigned long start, int write, struct page **pages);
1292struct page *get_dump_page(unsigned long addr);
1293
1294extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
1295extern void do_invalidatepage(struct page *page, unsigned int offset,
1296 unsigned int length);
1297
1298int __set_page_dirty_nobuffers(struct page *page);
1299int __set_page_dirty_no_writeback(struct page *page);
1300int redirty_page_for_writepage(struct writeback_control *wbc,
1301 struct page *page);
1302void account_page_dirtied(struct page *page, struct address_space *mapping,
1303 struct mem_cgroup *memcg);
1304void account_page_cleaned(struct page *page, struct address_space *mapping,
1305 struct mem_cgroup *memcg, struct bdi_writeback *wb);
1306int set_page_dirty(struct page *page);
1307int set_page_dirty_lock(struct page *page);
1308void cancel_dirty_page(struct page *page);
1309int clear_page_dirty_for_io(struct page *page);
1310
1311int get_cmdline(struct task_struct *task, char *buffer, int buflen);
1312
1313
1314static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
1315{
1316 return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
1317}
1318
1319static inline bool vma_is_anonymous(struct vm_area_struct *vma)
1320{
1321 return !vma->vm_ops;
1322}
1323
1324static inline int stack_guard_page_start(struct vm_area_struct *vma,
1325 unsigned long addr)
1326{
1327 return (vma->vm_flags & VM_GROWSDOWN) &&
1328 (vma->vm_start == addr) &&
1329 !vma_growsdown(vma->vm_prev, addr);
1330}
1331
1332
1333static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
1334{
1335 return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
1336}
1337
1338static inline int stack_guard_page_end(struct vm_area_struct *vma,
1339 unsigned long addr)
1340{
1341 return (vma->vm_flags & VM_GROWSUP) &&
1342 (vma->vm_end == addr) &&
1343 !vma_growsup(vma->vm_next, addr);
1344}
1345
1346int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t);
1347
1348extern unsigned long move_page_tables(struct vm_area_struct *vma,
1349 unsigned long old_addr, struct vm_area_struct *new_vma,
1350 unsigned long new_addr, unsigned long len,
1351 bool need_rmap_locks);
1352extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
1353 unsigned long end, pgprot_t newprot,
1354 int dirty_accountable, int prot_numa);
1355extern int mprotect_fixup(struct vm_area_struct *vma,
1356 struct vm_area_struct **pprev, unsigned long start,
1357 unsigned long end, unsigned long newflags);
1358
1359
1360
1361
1362int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1363 struct page **pages);
1364
1365
1366
1367static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
1368{
1369 long val = atomic_long_read(&mm->rss_stat.count[member]);
1370
1371#ifdef SPLIT_RSS_COUNTING
1372
1373
1374
1375
1376 if (val < 0)
1377 val = 0;
1378#endif
1379 return (unsigned long)val;
1380}
1381
1382static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1383{
1384 atomic_long_add(value, &mm->rss_stat.count[member]);
1385}
1386
1387static inline void inc_mm_counter(struct mm_struct *mm, int member)
1388{
1389 atomic_long_inc(&mm->rss_stat.count[member]);
1390}
1391
1392static inline void dec_mm_counter(struct mm_struct *mm, int member)
1393{
1394 atomic_long_dec(&mm->rss_stat.count[member]);
1395}
1396
1397
1398static inline int mm_counter_file(struct page *page)
1399{
1400 if (PageSwapBacked(page))
1401 return MM_SHMEMPAGES;
1402 return MM_FILEPAGES;
1403}
1404
1405static inline int mm_counter(struct page *page)
1406{
1407 if (PageAnon(page))
1408 return MM_ANONPAGES;
1409 return mm_counter_file(page);
1410}
1411
1412static inline unsigned long get_mm_rss(struct mm_struct *mm)
1413{
1414 return get_mm_counter(mm, MM_FILEPAGES) +
1415 get_mm_counter(mm, MM_ANONPAGES) +
1416 get_mm_counter(mm, MM_SHMEMPAGES);
1417}
1418
1419static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
1420{
1421 return max(mm->hiwater_rss, get_mm_rss(mm));
1422}
1423
1424static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
1425{
1426 return max(mm->hiwater_vm, mm->total_vm);
1427}
1428
1429static inline void update_hiwater_rss(struct mm_struct *mm)
1430{
1431 unsigned long _rss = get_mm_rss(mm);
1432
1433 if ((mm)->hiwater_rss < _rss)
1434 (mm)->hiwater_rss = _rss;
1435}
1436
1437static inline void update_hiwater_vm(struct mm_struct *mm)
1438{
1439 if (mm->hiwater_vm < mm->total_vm)
1440 mm->hiwater_vm = mm->total_vm;
1441}
1442
1443static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
1444{
1445 mm->hiwater_rss = get_mm_rss(mm);
1446}
1447
1448static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
1449 struct mm_struct *mm)
1450{
1451 unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
1452
1453 if (*maxrss < hiwater_rss)
1454 *maxrss = hiwater_rss;
1455}
1456
1457#if defined(SPLIT_RSS_COUNTING)
1458void sync_mm_rss(struct mm_struct *mm);
1459#else
1460static inline void sync_mm_rss(struct mm_struct *mm)
1461{
1462}
1463#endif
1464
1465#ifndef __HAVE_ARCH_PTE_DEVMAP
1466static inline int pte_devmap(pte_t pte)
1467{
1468 return 0;
1469}
1470#endif
1471
1472int vma_wants_writenotify(struct vm_area_struct *vma);
1473
1474extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1475 spinlock_t **ptl);
1476static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
1477 spinlock_t **ptl)
1478{
1479 pte_t *ptep;
1480 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
1481 return ptep;
1482}
1483
1484#ifdef __PAGETABLE_PUD_FOLDED
1485static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
1486 unsigned long address)
1487{
1488 return 0;
1489}
1490#else
1491int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
1492#endif
1493
1494#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
1495static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
1496 unsigned long address)
1497{
1498 return 0;
1499}
1500
1501static inline void mm_nr_pmds_init(struct mm_struct *mm) {}
1502
1503static inline unsigned long mm_nr_pmds(struct mm_struct *mm)
1504{
1505 return 0;
1506}
1507
1508static inline void mm_inc_nr_pmds(struct mm_struct *mm) {}
1509static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
1510
1511#else
1512int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
1513
1514static inline void mm_nr_pmds_init(struct mm_struct *mm)
1515{
1516 atomic_long_set(&mm->nr_pmds, 0);
1517}
1518
1519static inline unsigned long mm_nr_pmds(struct mm_struct *mm)
1520{
1521 return atomic_long_read(&mm->nr_pmds);
1522}
1523
1524static inline void mm_inc_nr_pmds(struct mm_struct *mm)
1525{
1526 atomic_long_inc(&mm->nr_pmds);
1527}
1528
1529static inline void mm_dec_nr_pmds(struct mm_struct *mm)
1530{
1531 atomic_long_dec(&mm->nr_pmds);
1532}
1533#endif
1534
1535int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
1536 pmd_t *pmd, unsigned long address);
1537int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
1538
1539
1540
1541
1542
1543#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
1544static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
1545{
1546 return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))?
1547 NULL: pud_offset(pgd, address);
1548}
1549
1550static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
1551{
1552 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
1553 NULL: pmd_offset(pud, address);
1554}
1555#endif
1556
1557#if USE_SPLIT_PTE_PTLOCKS
1558#if ALLOC_SPLIT_PTLOCKS
1559void __init ptlock_cache_init(void);
1560extern bool ptlock_alloc(struct page *page);
1561extern void ptlock_free(struct page *page);
1562
1563static inline spinlock_t *ptlock_ptr(struct page *page)
1564{
1565 return page->ptl;
1566}
1567#else
1568static inline void ptlock_cache_init(void)
1569{
1570}
1571
1572static inline bool ptlock_alloc(struct page *page)
1573{
1574 return true;
1575}
1576
1577static inline void ptlock_free(struct page *page)
1578{
1579}
1580
1581static inline spinlock_t *ptlock_ptr(struct page *page)
1582{
1583 return &page->ptl;
1584}
1585#endif
1586
1587static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1588{
1589 return ptlock_ptr(pmd_page(*pmd));
1590}
1591
1592static inline bool ptlock_init(struct page *page)
1593{
1594
1595
1596
1597
1598
1599
1600
1601 VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
1602 if (!ptlock_alloc(page))
1603 return false;
1604 spin_lock_init(ptlock_ptr(page));
1605 return true;
1606}
1607
1608
1609static inline void pte_lock_deinit(struct page *page)
1610{
1611 page->mapping = NULL;
1612 ptlock_free(page);
1613}
1614
1615#else
1616
1617
1618
1619static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1620{
1621 return &mm->page_table_lock;
1622}
1623static inline void ptlock_cache_init(void) {}
1624static inline bool ptlock_init(struct page *page) { return true; }
1625static inline void pte_lock_deinit(struct page *page) {}
1626#endif
1627
1628static inline void pgtable_init(void)
1629{
1630 ptlock_cache_init();
1631 pgtable_cache_init();
1632}
1633
1634static inline bool pgtable_page_ctor(struct page *page)
1635{
1636 if (!ptlock_init(page))
1637 return false;
1638 inc_zone_page_state(page, NR_PAGETABLE);
1639 return true;
1640}
1641
1642static inline void pgtable_page_dtor(struct page *page)
1643{
1644 pte_lock_deinit(page);
1645 dec_zone_page_state(page, NR_PAGETABLE);
1646}
1647
1648#define pte_offset_map_lock(mm, pmd, address, ptlp) \
1649({ \
1650 spinlock_t *__ptl = pte_lockptr(mm, pmd); \
1651 pte_t *__pte = pte_offset_map(pmd, address); \
1652 *(ptlp) = __ptl; \
1653 spin_lock(__ptl); \
1654 __pte; \
1655})
1656
1657#define pte_unmap_unlock(pte, ptl) do { \
1658 spin_unlock(ptl); \
1659 pte_unmap(pte); \
1660} while (0)
1661
1662#define pte_alloc_map(mm, vma, pmd, address) \
1663 ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, vma, \
1664 pmd, address))? \
1665 NULL: pte_offset_map(pmd, address))
1666
1667#define pte_alloc_map_lock(mm, pmd, address, ptlp) \
1668 ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, NULL, \
1669 pmd, address))? \
1670 NULL: pte_offset_map_lock(mm, pmd, address, ptlp))
1671
1672#define pte_alloc_kernel(pmd, address) \
1673 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
1674 NULL: pte_offset_kernel(pmd, address))
1675
1676#if USE_SPLIT_PMD_PTLOCKS
1677
1678static struct page *pmd_to_page(pmd_t *pmd)
1679{
1680 unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
1681 return virt_to_page((void *)((unsigned long) pmd & mask));
1682}
1683
1684static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
1685{
1686 return ptlock_ptr(pmd_to_page(pmd));
1687}
1688
1689static inline bool pgtable_pmd_page_ctor(struct page *page)
1690{
1691#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1692 page->pmd_huge_pte = NULL;
1693#endif
1694 return ptlock_init(page);
1695}
1696
1697static inline void pgtable_pmd_page_dtor(struct page *page)
1698{
1699#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1700 VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
1701#endif
1702 ptlock_free(page);
1703}
1704
1705#define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte)
1706
1707#else
1708
1709static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
1710{
1711 return &mm->page_table_lock;
1712}
1713
1714static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; }
1715static inline void pgtable_pmd_page_dtor(struct page *page) {}
1716
1717#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
1718
1719#endif
1720
1721static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
1722{
1723 spinlock_t *ptl = pmd_lockptr(mm, pmd);
1724 spin_lock(ptl);
1725 return ptl;
1726}
1727
1728extern void free_area_init(unsigned long * zones_size);
1729extern void free_area_init_node(int nid, unsigned long * zones_size,
1730 unsigned long zone_start_pfn, unsigned long *zholes_size);
1731extern void free_initmem(void);
1732
1733
1734
1735
1736
1737
1738
1739extern unsigned long free_reserved_area(void *start, void *end,
1740 int poison, char *s);
1741
1742#ifdef CONFIG_HIGHMEM
1743
1744
1745
1746
1747extern void free_highmem_page(struct page *page);
1748#endif
1749
1750extern void adjust_managed_page_count(struct page *page, long count);
1751extern void mem_init_print_info(const char *str);
1752
1753extern void reserve_bootmem_region(unsigned long start, unsigned long end);
1754
1755
1756static inline void __free_reserved_page(struct page *page)
1757{
1758 ClearPageReserved(page);
1759 init_page_count(page);
1760 __free_page(page);
1761}
1762
1763static inline void free_reserved_page(struct page *page)
1764{
1765 __free_reserved_page(page);
1766 adjust_managed_page_count(page, 1);
1767}
1768
1769static inline void mark_page_reserved(struct page *page)
1770{
1771 SetPageReserved(page);
1772 adjust_managed_page_count(page, -1);
1773}
1774
1775
1776
1777
1778
1779
1780
1781static inline unsigned long free_initmem_default(int poison)
1782{
1783 extern char __init_begin[], __init_end[];
1784
1785 return free_reserved_area(&__init_begin, &__init_end,
1786 poison, "unused kernel");
1787}
1788
1789static inline unsigned long get_num_physpages(void)
1790{
1791 int nid;
1792 unsigned long phys_pages = 0;
1793
1794 for_each_online_node(nid)
1795 phys_pages += node_present_pages(nid);
1796
1797 return phys_pages;
1798}
1799
1800#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827extern void free_area_init_nodes(unsigned long *max_zone_pfn);
1828unsigned long node_map_pfn_alignment(void);
1829unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
1830 unsigned long end_pfn);
1831extern unsigned long absent_pages_in_range(unsigned long start_pfn,
1832 unsigned long end_pfn);
1833extern void get_pfn_range_for_nid(unsigned int nid,
1834 unsigned long *start_pfn, unsigned long *end_pfn);
1835extern unsigned long find_min_pfn_with_active_regions(void);
1836extern void free_bootmem_with_active_regions(int nid,
1837 unsigned long max_low_pfn);
1838extern void sparse_memory_present_with_active_regions(int nid);
1839
1840#endif
1841
1842#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
1843 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
1844static inline int __early_pfn_to_nid(unsigned long pfn,
1845 struct mminit_pfnnid_cache *state)
1846{
1847 return 0;
1848}
1849#else
1850
1851extern int __meminit early_pfn_to_nid(unsigned long pfn);
1852
1853extern int __meminit __early_pfn_to_nid(unsigned long pfn,
1854 struct mminit_pfnnid_cache *state);
1855#endif
1856
1857extern void set_dma_reserve(unsigned long new_dma_reserve);
1858extern void memmap_init_zone(unsigned long, int, unsigned long,
1859 unsigned long, enum memmap_context);
1860extern void setup_per_zone_wmarks(void);
1861extern int __meminit init_per_zone_wmark_min(void);
1862extern void mem_init(void);
1863extern void __init mmap_init(void);
1864extern void show_mem(unsigned int flags);
1865extern void si_meminfo(struct sysinfo * val);
1866extern void si_meminfo_node(struct sysinfo *val, int nid);
1867
1868extern __printf(3, 4)
1869void warn_alloc_failed(gfp_t gfp_mask, unsigned int order,
1870 const char *fmt, ...);
1871
1872extern void setup_per_cpu_pageset(void);
1873
1874extern void zone_pcp_update(struct zone *zone);
1875extern void zone_pcp_reset(struct zone *zone);
1876
1877
1878extern int min_free_kbytes;
1879
1880
1881extern atomic_long_t mmap_pages_allocated;
1882extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
1883
1884
1885void vma_interval_tree_insert(struct vm_area_struct *node,
1886 struct rb_root *root);
1887void vma_interval_tree_insert_after(struct vm_area_struct *node,
1888 struct vm_area_struct *prev,
1889 struct rb_root *root);
1890void vma_interval_tree_remove(struct vm_area_struct *node,
1891 struct rb_root *root);
1892struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root *root,
1893 unsigned long start, unsigned long last);
1894struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
1895 unsigned long start, unsigned long last);
1896
1897#define vma_interval_tree_foreach(vma, root, start, last) \
1898 for (vma = vma_interval_tree_iter_first(root, start, last); \
1899 vma; vma = vma_interval_tree_iter_next(vma, start, last))
1900
1901void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
1902 struct rb_root *root);
1903void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
1904 struct rb_root *root);
1905struct anon_vma_chain *anon_vma_interval_tree_iter_first(
1906 struct rb_root *root, unsigned long start, unsigned long last);
1907struct anon_vma_chain *anon_vma_interval_tree_iter_next(
1908 struct anon_vma_chain *node, unsigned long start, unsigned long last);
1909#ifdef CONFIG_DEBUG_VM_RB
1910void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
1911#endif
1912
1913#define anon_vma_interval_tree_foreach(avc, root, start, last) \
1914 for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
1915 avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
1916
1917
1918extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
1919extern int vma_adjust(struct vm_area_struct *vma, unsigned long start,
1920 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert);
1921extern struct vm_area_struct *vma_merge(struct mm_struct *,
1922 struct vm_area_struct *prev, unsigned long addr, unsigned long end,
1923 unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
1924 struct mempolicy *, struct vm_userfaultfd_ctx);
1925extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
1926extern int split_vma(struct mm_struct *,
1927 struct vm_area_struct *, unsigned long addr, int new_below);
1928extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
1929extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
1930 struct rb_node **, struct rb_node *);
1931extern void unlink_file_vma(struct vm_area_struct *);
1932extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
1933 unsigned long addr, unsigned long len, pgoff_t pgoff,
1934 bool *need_rmap_locks);
1935extern void exit_mmap(struct mm_struct *);
1936
1937static inline int check_data_rlimit(unsigned long rlim,
1938 unsigned long new,
1939 unsigned long start,
1940 unsigned long end_data,
1941 unsigned long start_data)
1942{
1943 if (rlim < RLIM_INFINITY) {
1944 if (((new - start) + (end_data - start_data)) > rlim)
1945 return -ENOSPC;
1946 }
1947
1948 return 0;
1949}
1950
1951extern int mm_take_all_locks(struct mm_struct *mm);
1952extern void mm_drop_all_locks(struct mm_struct *mm);
1953
1954extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
1955extern struct file *get_mm_exe_file(struct mm_struct *mm);
1956
1957extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
1958extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
1959
1960extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
1961 unsigned long addr, unsigned long len,
1962 unsigned long flags,
1963 const struct vm_special_mapping *spec);
1964
1965extern int install_special_mapping(struct mm_struct *mm,
1966 unsigned long addr, unsigned long len,
1967 unsigned long flags, struct page **pages);
1968
1969extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
1970
1971extern unsigned long mmap_region(struct file *file, unsigned long addr,
1972 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
1973extern unsigned long do_mmap(struct file *file, unsigned long addr,
1974 unsigned long len, unsigned long prot, unsigned long flags,
1975 vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate);
1976extern int do_munmap(struct mm_struct *, unsigned long, size_t);
1977
1978static inline unsigned long
1979do_mmap_pgoff(struct file *file, unsigned long addr,
1980 unsigned long len, unsigned long prot, unsigned long flags,
1981 unsigned long pgoff, unsigned long *populate)
1982{
1983 return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate);
1984}
1985
1986#ifdef CONFIG_MMU
1987extern int __mm_populate(unsigned long addr, unsigned long len,
1988 int ignore_errors);
1989static inline void mm_populate(unsigned long addr, unsigned long len)
1990{
1991
1992 (void) __mm_populate(addr, len, 1);
1993}
1994#else
1995static inline void mm_populate(unsigned long addr, unsigned long len) {}
1996#endif
1997
1998
1999extern unsigned long vm_brk(unsigned long, unsigned long);
2000extern int vm_munmap(unsigned long, size_t);
2001extern unsigned long vm_mmap(struct file *, unsigned long,
2002 unsigned long, unsigned long,
2003 unsigned long, unsigned long);
2004
2005struct vm_unmapped_area_info {
2006#define VM_UNMAPPED_AREA_TOPDOWN 1
2007 unsigned long flags;
2008 unsigned long length;
2009 unsigned long low_limit;
2010 unsigned long high_limit;
2011 unsigned long align_mask;
2012 unsigned long align_offset;
2013};
2014
2015extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
2016extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027static inline unsigned long
2028vm_unmapped_area(struct vm_unmapped_area_info *info)
2029{
2030 if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
2031 return unmapped_area_topdown(info);
2032 else
2033 return unmapped_area(info);
2034}
2035
2036
2037extern void truncate_inode_pages(struct address_space *, loff_t);
2038extern void truncate_inode_pages_range(struct address_space *,
2039 loff_t lstart, loff_t lend);
2040extern void truncate_inode_pages_final(struct address_space *);
2041
2042
2043extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
2044extern void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf);
2045extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
2046
2047
2048int write_one_page(struct page *page, int wait);
2049void task_dirty_inc(struct task_struct *tsk);
2050
2051
2052#define VM_MAX_READAHEAD 128
2053#define VM_MIN_READAHEAD 16
2054
2055int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
2056 pgoff_t offset, unsigned long nr_to_read);
2057
2058void page_cache_sync_readahead(struct address_space *mapping,
2059 struct file_ra_state *ra,
2060 struct file *filp,
2061 pgoff_t offset,
2062 unsigned long size);
2063
2064void page_cache_async_readahead(struct address_space *mapping,
2065 struct file_ra_state *ra,
2066 struct file *filp,
2067 struct page *pg,
2068 pgoff_t offset,
2069 unsigned long size);
2070
2071
2072extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
2073
2074
2075extern int expand_downwards(struct vm_area_struct *vma,
2076 unsigned long address);
2077#if VM_GROWSUP
2078extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
2079#else
2080 #define expand_upwards(vma, address) (0)
2081#endif
2082
2083
2084extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
2085extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
2086 struct vm_area_struct **pprev);
2087
2088
2089
2090static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
2091{
2092 struct vm_area_struct * vma = find_vma(mm,start_addr);
2093
2094 if (vma && end_addr <= vma->vm_start)
2095 vma = NULL;
2096 return vma;
2097}
2098
2099static inline unsigned long vma_pages(struct vm_area_struct *vma)
2100{
2101 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
2102}
2103
2104
2105static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
2106 unsigned long vm_start, unsigned long vm_end)
2107{
2108 struct vm_area_struct *vma = find_vma(mm, vm_start);
2109
2110 if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
2111 vma = NULL;
2112
2113 return vma;
2114}
2115
2116#ifdef CONFIG_MMU
2117pgprot_t vm_get_page_prot(unsigned long vm_flags);
2118void vma_set_page_prot(struct vm_area_struct *vma);
2119#else
2120static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
2121{
2122 return __pgprot(0);
2123}
2124static inline void vma_set_page_prot(struct vm_area_struct *vma)
2125{
2126 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2127}
2128#endif
2129
2130#ifdef CONFIG_NUMA_BALANCING
2131unsigned long change_prot_numa(struct vm_area_struct *vma,
2132 unsigned long start, unsigned long end);
2133#endif
2134
2135struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
2136int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
2137 unsigned long pfn, unsigned long size, pgprot_t);
2138int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
2139int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2140 unsigned long pfn);
2141int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2142 pfn_t pfn);
2143int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
2144
2145
2146struct page *follow_page_mask(struct vm_area_struct *vma,
2147 unsigned long address, unsigned int foll_flags,
2148 unsigned int *page_mask);
2149
2150static inline struct page *follow_page(struct vm_area_struct *vma,
2151 unsigned long address, unsigned int foll_flags)
2152{
2153 unsigned int unused_page_mask;
2154 return follow_page_mask(vma, address, foll_flags, &unused_page_mask);
2155}
2156
2157#define FOLL_WRITE 0x01
2158#define FOLL_TOUCH 0x02
2159#define FOLL_GET 0x04
2160#define FOLL_DUMP 0x08
2161#define FOLL_FORCE 0x10
2162#define FOLL_NOWAIT 0x20
2163
2164#define FOLL_POPULATE 0x40
2165#define FOLL_SPLIT 0x80
2166#define FOLL_HWPOISON 0x100
2167#define FOLL_NUMA 0x200
2168#define FOLL_MIGRATION 0x400
2169#define FOLL_TRIED 0x800
2170#define FOLL_MLOCK 0x1000
2171
2172typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
2173 void *data);
2174extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
2175 unsigned long size, pte_fn_t fn, void *data);
2176
2177
2178#ifdef CONFIG_DEBUG_PAGEALLOC
2179extern bool _debug_pagealloc_enabled;
2180extern void __kernel_map_pages(struct page *page, int numpages, int enable);
2181
2182static inline bool debug_pagealloc_enabled(void)
2183{
2184 return _debug_pagealloc_enabled;
2185}
2186
2187static inline void
2188kernel_map_pages(struct page *page, int numpages, int enable)
2189{
2190 if (!debug_pagealloc_enabled())
2191 return;
2192
2193 __kernel_map_pages(page, numpages, enable);
2194}
2195#ifdef CONFIG_HIBERNATION
2196extern bool kernel_page_present(struct page *page);
2197#endif
2198#else
2199static inline void
2200kernel_map_pages(struct page *page, int numpages, int enable) {}
2201#ifdef CONFIG_HIBERNATION
2202static inline bool kernel_page_present(struct page *page) { return true; }
2203#endif
2204#endif
2205
2206#ifdef __HAVE_ARCH_GATE_AREA
2207extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
2208extern int in_gate_area_no_mm(unsigned long addr);
2209extern int in_gate_area(struct mm_struct *mm, unsigned long addr);
2210#else
2211static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
2212{
2213 return NULL;
2214}
2215static inline int in_gate_area_no_mm(unsigned long addr) { return 0; }
2216static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
2217{
2218 return 0;
2219}
2220#endif
2221
2222#ifdef CONFIG_SYSCTL
2223extern int sysctl_drop_caches;
2224int drop_caches_sysctl_handler(struct ctl_table *, int,
2225 void __user *, size_t *, loff_t *);
2226#endif
2227
2228void drop_slab(void);
2229void drop_slab_node(int nid);
2230
2231#ifndef CONFIG_MMU
2232#define randomize_va_space 0
2233#else
2234extern int randomize_va_space;
2235#endif
2236
2237const char * arch_vma_name(struct vm_area_struct *vma);
2238void print_vma_addr(char *prefix, unsigned long rip);
2239
2240void sparse_mem_maps_populate_node(struct page **map_map,
2241 unsigned long pnum_begin,
2242 unsigned long pnum_end,
2243 unsigned long map_count,
2244 int nodeid);
2245
2246struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
2247pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
2248pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
2249pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
2250pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
2251void *vmemmap_alloc_block(unsigned long size, int node);
2252struct vmem_altmap;
2253void *__vmemmap_alloc_block_buf(unsigned long size, int node,
2254 struct vmem_altmap *altmap);
2255static inline void *vmemmap_alloc_block_buf(unsigned long size, int node)
2256{
2257 return __vmemmap_alloc_block_buf(size, node, NULL);
2258}
2259
2260void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
2261int vmemmap_populate_basepages(unsigned long start, unsigned long end,
2262 int node);
2263int vmemmap_populate(unsigned long start, unsigned long end, int node);
2264void vmemmap_populate_print_last(void);
2265#ifdef CONFIG_MEMORY_HOTPLUG
2266void vmemmap_free(unsigned long start, unsigned long end);
2267#endif
2268void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
2269 unsigned long size);
2270
2271enum mf_flags {
2272 MF_COUNT_INCREASED = 1 << 0,
2273 MF_ACTION_REQUIRED = 1 << 1,
2274 MF_MUST_KILL = 1 << 2,
2275 MF_SOFT_OFFLINE = 1 << 3,
2276};
2277extern int memory_failure(unsigned long pfn, int trapno, int flags);
2278extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
2279extern int unpoison_memory(unsigned long pfn);
2280extern int get_hwpoison_page(struct page *page);
2281#define put_hwpoison_page(page) put_page(page)
2282extern int sysctl_memory_failure_early_kill;
2283extern int sysctl_memory_failure_recovery;
2284extern void shake_page(struct page *p, int access);
2285extern atomic_long_t num_poisoned_pages;
2286extern int soft_offline_page(struct page *page, int flags);
2287
2288
2289
2290
2291
2292enum mf_result {
2293 MF_IGNORED,
2294 MF_FAILED,
2295 MF_DELAYED,
2296 MF_RECOVERED,
2297};
2298
2299enum mf_action_page_type {
2300 MF_MSG_KERNEL,
2301 MF_MSG_KERNEL_HIGH_ORDER,
2302 MF_MSG_SLAB,
2303 MF_MSG_DIFFERENT_COMPOUND,
2304 MF_MSG_POISONED_HUGE,
2305 MF_MSG_HUGE,
2306 MF_MSG_FREE_HUGE,
2307 MF_MSG_UNMAP_FAILED,
2308 MF_MSG_DIRTY_SWAPCACHE,
2309 MF_MSG_CLEAN_SWAPCACHE,
2310 MF_MSG_DIRTY_MLOCKED_LRU,
2311 MF_MSG_CLEAN_MLOCKED_LRU,
2312 MF_MSG_DIRTY_UNEVICTABLE_LRU,
2313 MF_MSG_CLEAN_UNEVICTABLE_LRU,
2314 MF_MSG_DIRTY_LRU,
2315 MF_MSG_CLEAN_LRU,
2316 MF_MSG_TRUNCATED_LRU,
2317 MF_MSG_BUDDY,
2318 MF_MSG_BUDDY_2ND,
2319 MF_MSG_UNKNOWN,
2320};
2321
2322#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
2323extern void clear_huge_page(struct page *page,
2324 unsigned long addr,
2325 unsigned int pages_per_huge_page);
2326extern void copy_user_huge_page(struct page *dst, struct page *src,
2327 unsigned long addr, struct vm_area_struct *vma,
2328 unsigned int pages_per_huge_page);
2329#endif
2330
2331extern struct page_ext_operations debug_guardpage_ops;
2332extern struct page_ext_operations page_poisoning_ops;
2333
2334#ifdef CONFIG_DEBUG_PAGEALLOC
2335extern unsigned int _debug_guardpage_minorder;
2336extern bool _debug_guardpage_enabled;
2337
2338static inline unsigned int debug_guardpage_minorder(void)
2339{
2340 return _debug_guardpage_minorder;
2341}
2342
2343static inline bool debug_guardpage_enabled(void)
2344{
2345 return _debug_guardpage_enabled;
2346}
2347
2348static inline bool page_is_guard(struct page *page)
2349{
2350 struct page_ext *page_ext;
2351
2352 if (!debug_guardpage_enabled())
2353 return false;
2354
2355 page_ext = lookup_page_ext(page);
2356 return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
2357}
2358#else
2359static inline unsigned int debug_guardpage_minorder(void) { return 0; }
2360static inline bool debug_guardpage_enabled(void) { return false; }
2361static inline bool page_is_guard(struct page *page) { return false; }
2362#endif
2363
2364#if MAX_NUMNODES > 1
2365void __init setup_nr_node_ids(void);
2366#else
2367static inline void setup_nr_node_ids(void) {}
2368#endif
2369
2370#endif
2371#endif
2372