1#ifndef _LINUX_MM_H
2#define _LINUX_MM_H
3
4#include <linux/errno.h>
5
6#ifdef __KERNEL__
7
8#include <linux/mmdebug.h>
9#include <linux/gfp.h>
10#include <linux/bug.h>
11#include <linux/list.h>
12#include <linux/mmzone.h>
13#include <linux/rbtree.h>
14#include <linux/atomic.h>
15#include <linux/debug_locks.h>
16#include <linux/mm_types.h>
17#include <linux/range.h>
18#include <linux/pfn.h>
19#include <linux/percpu-refcount.h>
20#include <linux/bit_spinlock.h>
21#include <linux/shrinker.h>
22#include <linux/resource.h>
23#include <linux/page_ext.h>
24#include <linux/err.h>
25#include <linux/page_ref.h>
26
27struct mempolicy;
28struct anon_vma;
29struct anon_vma_chain;
30struct file_ra_state;
31struct user_struct;
32struct writeback_control;
33struct bdi_writeback;
34
35#ifndef CONFIG_NEED_MULTIPLE_NODES
36extern unsigned long max_mapnr;
37
38static inline void set_max_mapnr(unsigned long limit)
39{
40 max_mapnr = limit;
41}
42#else
43static inline void set_max_mapnr(unsigned long limit) { }
44#endif
45
46extern unsigned long totalram_pages;
47extern void * high_memory;
48extern int page_cluster;
49
50#ifdef CONFIG_SYSCTL
51extern int sysctl_legacy_va_layout;
52#else
53#define sysctl_legacy_va_layout 0
54#endif
55
56#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
57extern const int mmap_rnd_bits_min;
58extern const int mmap_rnd_bits_max;
59extern int mmap_rnd_bits __read_mostly;
60#endif
61#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
62extern const int mmap_rnd_compat_bits_min;
63extern const int mmap_rnd_compat_bits_max;
64extern int mmap_rnd_compat_bits __read_mostly;
65#endif
66
67#include <asm/page.h>
68#include <asm/pgtable.h>
69#include <asm/processor.h>
70
71#ifndef __pa_symbol
72#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
73#endif
74
75
76
77
78
79
80
81
82#ifndef mm_forbids_zeropage
83#define mm_forbids_zeropage(X) (0)
84#endif
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102#define MAPCOUNT_ELF_CORE_MARGIN (5)
103#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
104
105extern int sysctl_max_map_count;
106
107extern unsigned long sysctl_user_reserve_kbytes;
108extern unsigned long sysctl_admin_reserve_kbytes;
109
110extern int sysctl_overcommit_memory;
111extern int sysctl_overcommit_ratio;
112extern unsigned long sysctl_overcommit_kbytes;
113
114extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *,
115 size_t *, loff_t *);
116extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
117 size_t *, loff_t *);
118
119#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
120
121
122#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
123
124
125#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)addr, PAGE_SIZE)
126
127
128
129
130
131
132
133
134
135
136extern struct kmem_cache *vm_area_cachep;
137
138#ifndef CONFIG_MMU
139extern struct rb_root nommu_region_tree;
140extern struct rw_semaphore nommu_region_sem;
141
142extern unsigned int kobjsize(const void *objp);
143#endif
144
145
146
147
148
149#define VM_NONE 0x00000000
150
151#define VM_READ 0x00000001
152#define VM_WRITE 0x00000002
153#define VM_EXEC 0x00000004
154#define VM_SHARED 0x00000008
155
156
157#define VM_MAYREAD 0x00000010
158#define VM_MAYWRITE 0x00000020
159#define VM_MAYEXEC 0x00000040
160#define VM_MAYSHARE 0x00000080
161
162#define VM_GROWSDOWN 0x00000100
163#define VM_UFFD_MISSING 0x00000200
164#define VM_PFNMAP 0x00000400
165#define VM_DENYWRITE 0x00000800
166#define VM_UFFD_WP 0x00001000
167
168#define VM_LOCKED 0x00002000
169#define VM_IO 0x00004000
170
171
172#define VM_SEQ_READ 0x00008000
173#define VM_RAND_READ 0x00010000
174
175#define VM_DONTCOPY 0x00020000
176#define VM_DONTEXPAND 0x00040000
177#define VM_LOCKONFAULT 0x00080000
178#define VM_ACCOUNT 0x00100000
179#define VM_NORESERVE 0x00200000
180#define VM_HUGETLB 0x00400000
181#define VM_ARCH_1 0x01000000
182#define VM_ARCH_2 0x02000000
183#define VM_DONTDUMP 0x04000000
184
185#ifdef CONFIG_MEM_SOFT_DIRTY
186# define VM_SOFTDIRTY 0x08000000
187#else
188# define VM_SOFTDIRTY 0
189#endif
190
191#define VM_MIXEDMAP 0x10000000
192#define VM_HUGEPAGE 0x20000000
193#define VM_NOHUGEPAGE 0x40000000
194#define VM_MERGEABLE 0x80000000
195
196#ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS
197#define VM_HIGH_ARCH_BIT_0 32
198#define VM_HIGH_ARCH_BIT_1 33
199#define VM_HIGH_ARCH_BIT_2 34
200#define VM_HIGH_ARCH_BIT_3 35
201#define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0)
202#define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1)
203#define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2)
204#define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3)
205#endif
206
207#if defined(CONFIG_X86)
208# define VM_PAT VM_ARCH_1
209#if defined (CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS)
210# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0
211# define VM_PKEY_BIT0 VM_HIGH_ARCH_0
212# define VM_PKEY_BIT1 VM_HIGH_ARCH_1
213# define VM_PKEY_BIT2 VM_HIGH_ARCH_2
214# define VM_PKEY_BIT3 VM_HIGH_ARCH_3
215#endif
216#elif defined(CONFIG_PPC)
217# define VM_SAO VM_ARCH_1
218#elif defined(CONFIG_PARISC)
219# define VM_GROWSUP VM_ARCH_1
220#elif defined(CONFIG_METAG)
221# define VM_GROWSUP VM_ARCH_1
222#elif defined(CONFIG_IA64)
223# define VM_GROWSUP VM_ARCH_1
224#elif !defined(CONFIG_MMU)
225# define VM_MAPPED_COPY VM_ARCH_1
226#endif
227
228#if defined(CONFIG_X86)
229
230# define VM_MPX VM_ARCH_2
231#endif
232
233#ifndef VM_GROWSUP
234# define VM_GROWSUP VM_NONE
235#endif
236
237
238#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ)
239
240#ifndef VM_STACK_DEFAULT_FLAGS
241#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
242#endif
243
244#ifdef CONFIG_STACK_GROWSUP
245#define VM_STACK VM_GROWSUP
246#else
247#define VM_STACK VM_GROWSDOWN
248#endif
249
250#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
251
252
253
254
255
256#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
257
258
259#define VM_INIT_DEF_MASK VM_NOHUGEPAGE
260
261
262#define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT))
263
264
265
266
267
268extern pgprot_t protection_map[16];
269
270#define FAULT_FLAG_WRITE 0x01
271#define FAULT_FLAG_MKWRITE 0x02
272#define FAULT_FLAG_ALLOW_RETRY 0x04
273#define FAULT_FLAG_RETRY_NOWAIT 0x08
274#define FAULT_FLAG_KILLABLE 0x10
275#define FAULT_FLAG_TRIED 0x20
276#define FAULT_FLAG_USER 0x40
277#define FAULT_FLAG_REMOTE 0x80
278#define FAULT_FLAG_INSTRUCTION 0x100
279
280
281
282
283
284
285
286
287
288
289
290struct vm_fault {
291 unsigned int flags;
292 gfp_t gfp_mask;
293 pgoff_t pgoff;
294 void __user *virtual_address;
295
296 struct page *cow_page;
297 struct page *page;
298
299
300
301
302
303 pgoff_t max_pgoff;
304
305 pte_t *pte;
306};
307
308
309
310
311
312
313struct vm_operations_struct {
314 void (*open)(struct vm_area_struct * area);
315 void (*close)(struct vm_area_struct * area);
316 int (*mremap)(struct vm_area_struct * area);
317 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
318 int (*pmd_fault)(struct vm_area_struct *, unsigned long address,
319 pmd_t *, unsigned int flags);
320 void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf);
321
322
323
324 int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
325
326
327 int (*pfn_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
328
329
330
331
332 int (*access)(struct vm_area_struct *vma, unsigned long addr,
333 void *buf, int len, int write);
334
335
336
337
338 const char *(*name)(struct vm_area_struct *vma);
339
340#ifdef CONFIG_NUMA
341
342
343
344
345
346
347
348 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
349
350
351
352
353
354
355
356
357
358
359
360 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
361 unsigned long addr);
362#endif
363
364
365
366
367
368 struct page *(*find_special_page)(struct vm_area_struct *vma,
369 unsigned long addr);
370};
371
372struct mmu_gather;
373struct inode;
374
375#define page_private(page) ((page)->private)
376#define set_page_private(page, v) ((page)->private = (v))
377
378#if !defined(__HAVE_ARCH_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
379static inline int pmd_devmap(pmd_t pmd)
380{
381 return 0;
382}
383#endif
384
385
386
387
388
389#include <linux/page-flags.h>
390#include <linux/huge_mm.h>
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408static inline int put_page_testzero(struct page *page)
409{
410 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
411 return page_ref_dec_and_test(page);
412}
413
414
415
416
417
418
419
420static inline int get_page_unless_zero(struct page *page)
421{
422 return page_ref_add_unless(page, 1, 0);
423}
424
425extern int page_is_ram(unsigned long pfn);
426
427enum {
428 REGION_INTERSECTS,
429 REGION_DISJOINT,
430 REGION_MIXED,
431};
432
433int region_intersects(resource_size_t offset, size_t size, unsigned long flags,
434 unsigned long desc);
435
436
437struct page *vmalloc_to_page(const void *addr);
438unsigned long vmalloc_to_pfn(const void *addr);
439
440
441
442
443
444
445
446static inline int is_vmalloc_addr(const void *x)
447{
448#ifdef CONFIG_MMU
449 unsigned long addr = (unsigned long)x;
450
451 return addr >= VMALLOC_START && addr < VMALLOC_END;
452#else
453 return 0;
454#endif
455}
456#ifdef CONFIG_MMU
457extern int is_vmalloc_or_module_addr(const void *x);
458#else
459static inline int is_vmalloc_or_module_addr(const void *x)
460{
461 return 0;
462}
463#endif
464
465extern void kvfree(const void *addr);
466
467static inline atomic_t *compound_mapcount_ptr(struct page *page)
468{
469 return &page[1].compound_mapcount;
470}
471
472static inline int compound_mapcount(struct page *page)
473{
474 if (!PageCompound(page))
475 return 0;
476 page = compound_head(page);
477 return atomic_read(compound_mapcount_ptr(page)) + 1;
478}
479
480
481
482
483
484
485static inline void page_mapcount_reset(struct page *page)
486{
487 atomic_set(&(page)->_mapcount, -1);
488}
489
490int __page_mapcount(struct page *page);
491
492static inline int page_mapcount(struct page *page)
493{
494 VM_BUG_ON_PAGE(PageSlab(page), page);
495
496 if (unlikely(PageCompound(page)))
497 return __page_mapcount(page);
498 return atomic_read(&page->_mapcount) + 1;
499}
500
501#ifdef CONFIG_TRANSPARENT_HUGEPAGE
502int total_mapcount(struct page *page);
503int page_trans_huge_mapcount(struct page *page, int *total_mapcount);
504#else
505static inline int total_mapcount(struct page *page)
506{
507 return page_mapcount(page);
508}
509static inline int page_trans_huge_mapcount(struct page *page,
510 int *total_mapcount)
511{
512 int mapcount = page_mapcount(page);
513 if (total_mapcount)
514 *total_mapcount = mapcount;
515 return mapcount;
516}
517#endif
518
519static inline struct page *virt_to_head_page(const void *x)
520{
521 struct page *page = virt_to_page(x);
522
523 return compound_head(page);
524}
525
526void __put_page(struct page *page);
527
528void put_pages_list(struct list_head *pages);
529
530void split_page(struct page *page, unsigned int order);
531int split_free_page(struct page *page);
532
533
534
535
536
537
538typedef void compound_page_dtor(struct page *);
539
540
541enum compound_dtor_id {
542 NULL_COMPOUND_DTOR,
543 COMPOUND_PAGE_DTOR,
544#ifdef CONFIG_HUGETLB_PAGE
545 HUGETLB_PAGE_DTOR,
546#endif
547#ifdef CONFIG_TRANSPARENT_HUGEPAGE
548 TRANSHUGE_PAGE_DTOR,
549#endif
550 NR_COMPOUND_DTORS,
551};
552extern compound_page_dtor * const compound_page_dtors[];
553
554static inline void set_compound_page_dtor(struct page *page,
555 enum compound_dtor_id compound_dtor)
556{
557 VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page);
558 page[1].compound_dtor = compound_dtor;
559}
560
561static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
562{
563 VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page);
564 return compound_page_dtors[page[1].compound_dtor];
565}
566
567static inline unsigned int compound_order(struct page *page)
568{
569 if (!PageHead(page))
570 return 0;
571 return page[1].compound_order;
572}
573
574static inline void set_compound_order(struct page *page, unsigned int order)
575{
576 page[1].compound_order = order;
577}
578
579void free_compound_page(struct page *page);
580
581#ifdef CONFIG_MMU
582
583
584
585
586
587
588static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
589{
590 if (likely(vma->vm_flags & VM_WRITE))
591 pte = pte_mkwrite(pte);
592 return pte;
593}
594
595void do_set_pte(struct vm_area_struct *vma, unsigned long address,
596 struct page *page, pte_t *pte, bool write, bool anon);
597#endif
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
666#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
667#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
668#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
669
670
671
672
673
674
675#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
676#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
677#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
678#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
679
680
681#ifdef NODE_NOT_IN_PAGE_FLAGS
682#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
683#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \
684 SECTIONS_PGOFF : ZONES_PGOFF)
685#else
686#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
687#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \
688 NODES_PGOFF : ZONES_PGOFF)
689#endif
690
691#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
692
693#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
694#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
695#endif
696
697#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
698#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
699#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
700#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
701#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
702
703static inline enum zone_type page_zonenum(const struct page *page)
704{
705 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
706}
707
708#ifdef CONFIG_ZONE_DEVICE
709void get_zone_device_page(struct page *page);
710void put_zone_device_page(struct page *page);
711static inline bool is_zone_device_page(const struct page *page)
712{
713 return page_zonenum(page) == ZONE_DEVICE;
714}
715#else
716static inline void get_zone_device_page(struct page *page)
717{
718}
719static inline void put_zone_device_page(struct page *page)
720{
721}
722static inline bool is_zone_device_page(const struct page *page)
723{
724 return false;
725}
726#endif
727
728static inline void get_page(struct page *page)
729{
730 page = compound_head(page);
731
732
733
734
735 VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page);
736 page_ref_inc(page);
737
738 if (unlikely(is_zone_device_page(page)))
739 get_zone_device_page(page);
740}
741
742static inline void put_page(struct page *page)
743{
744 page = compound_head(page);
745
746 if (put_page_testzero(page))
747 __put_page(page);
748
749 if (unlikely(is_zone_device_page(page)))
750 put_zone_device_page(page);
751}
752
753#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
754#define SECTION_IN_PAGE_FLAGS
755#endif
756
757
758
759
760
761
762
763
764
765static inline int page_zone_id(struct page *page)
766{
767 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
768}
769
770static inline int zone_to_nid(struct zone *zone)
771{
772#ifdef CONFIG_NUMA
773 return zone->node;
774#else
775 return 0;
776#endif
777}
778
779#ifdef NODE_NOT_IN_PAGE_FLAGS
780extern int page_to_nid(const struct page *page);
781#else
782static inline int page_to_nid(const struct page *page)
783{
784 return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
785}
786#endif
787
788#ifdef CONFIG_NUMA_BALANCING
789static inline int cpu_pid_to_cpupid(int cpu, int pid)
790{
791 return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
792}
793
794static inline int cpupid_to_pid(int cpupid)
795{
796 return cpupid & LAST__PID_MASK;
797}
798
799static inline int cpupid_to_cpu(int cpupid)
800{
801 return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
802}
803
804static inline int cpupid_to_nid(int cpupid)
805{
806 return cpu_to_node(cpupid_to_cpu(cpupid));
807}
808
809static inline bool cpupid_pid_unset(int cpupid)
810{
811 return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
812}
813
814static inline bool cpupid_cpu_unset(int cpupid)
815{
816 return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
817}
818
819static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
820{
821 return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
822}
823
824#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
825#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
826static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
827{
828 return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
829}
830
831static inline int page_cpupid_last(struct page *page)
832{
833 return page->_last_cpupid;
834}
835static inline void page_cpupid_reset_last(struct page *page)
836{
837 page->_last_cpupid = -1 & LAST_CPUPID_MASK;
838}
839#else
840static inline int page_cpupid_last(struct page *page)
841{
842 return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
843}
844
845extern int page_cpupid_xchg_last(struct page *page, int cpupid);
846
847static inline void page_cpupid_reset_last(struct page *page)
848{
849 int cpupid = (1 << LAST_CPUPID_SHIFT) - 1;
850
851 page->flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
852 page->flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
853}
854#endif
855#else
856static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
857{
858 return page_to_nid(page);
859}
860
861static inline int page_cpupid_last(struct page *page)
862{
863 return page_to_nid(page);
864}
865
866static inline int cpupid_to_nid(int cpupid)
867{
868 return -1;
869}
870
871static inline int cpupid_to_pid(int cpupid)
872{
873 return -1;
874}
875
876static inline int cpupid_to_cpu(int cpupid)
877{
878 return -1;
879}
880
881static inline int cpu_pid_to_cpupid(int nid, int pid)
882{
883 return -1;
884}
885
886static inline bool cpupid_pid_unset(int cpupid)
887{
888 return 1;
889}
890
891static inline void page_cpupid_reset_last(struct page *page)
892{
893}
894
895static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
896{
897 return false;
898}
899#endif
900
901static inline struct zone *page_zone(const struct page *page)
902{
903 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
904}
905
906#ifdef SECTION_IN_PAGE_FLAGS
907static inline void set_page_section(struct page *page, unsigned long section)
908{
909 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
910 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
911}
912
913static inline unsigned long page_to_section(const struct page *page)
914{
915 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
916}
917#endif
918
919static inline void set_page_zone(struct page *page, enum zone_type zone)
920{
921 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
922 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
923}
924
925static inline void set_page_node(struct page *page, unsigned long node)
926{
927 page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
928 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
929}
930
931static inline void set_page_links(struct page *page, enum zone_type zone,
932 unsigned long node, unsigned long pfn)
933{
934 set_page_zone(page, zone);
935 set_page_node(page, node);
936#ifdef SECTION_IN_PAGE_FLAGS
937 set_page_section(page, pfn_to_section_nr(pfn));
938#endif
939}
940
941#ifdef CONFIG_MEMCG
942static inline struct mem_cgroup *page_memcg(struct page *page)
943{
944 return page->mem_cgroup;
945}
946#else
947static inline struct mem_cgroup *page_memcg(struct page *page)
948{
949 return NULL;
950}
951#endif
952
953
954
955
956#include <linux/vmstat.h>
957
958static __always_inline void *lowmem_page_address(const struct page *page)
959{
960 return __va(PFN_PHYS(page_to_pfn(page)));
961}
962
963#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
964#define HASHED_PAGE_VIRTUAL
965#endif
966
967#if defined(WANT_PAGE_VIRTUAL)
968static inline void *page_address(const struct page *page)
969{
970 return page->virtual;
971}
972static inline void set_page_address(struct page *page, void *address)
973{
974 page->virtual = address;
975}
976#define page_address_init() do { } while(0)
977#endif
978
979#if defined(HASHED_PAGE_VIRTUAL)
980void *page_address(const struct page *page);
981void set_page_address(struct page *page, void *virtual);
982void page_address_init(void);
983#endif
984
985#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
986#define page_address(page) lowmem_page_address(page)
987#define set_page_address(page, address) do { } while(0)
988#define page_address_init() do { } while(0)
989#endif
990
991extern void *page_rmapping(struct page *page);
992extern struct anon_vma *page_anon_vma(struct page *page);
993extern struct address_space *page_mapping(struct page *page);
994
995extern struct address_space *__page_file_mapping(struct page *);
996
997static inline
998struct address_space *page_file_mapping(struct page *page)
999{
1000 if (unlikely(PageSwapCache(page)))
1001 return __page_file_mapping(page);
1002
1003 return page->mapping;
1004}
1005
1006
1007
1008
1009
1010static inline pgoff_t page_index(struct page *page)
1011{
1012 if (unlikely(PageSwapCache(page)))
1013 return page_private(page);
1014 return page->index;
1015}
1016
1017extern pgoff_t __page_file_index(struct page *page);
1018
1019
1020
1021
1022
1023static inline pgoff_t page_file_index(struct page *page)
1024{
1025 if (unlikely(PageSwapCache(page)))
1026 return __page_file_index(page);
1027
1028 return page->index;
1029}
1030
1031
1032
1033
1034
1035static inline bool page_mapped(struct page *page)
1036{
1037 int i;
1038 if (likely(!PageCompound(page)))
1039 return atomic_read(&page->_mapcount) >= 0;
1040 page = compound_head(page);
1041 if (atomic_read(compound_mapcount_ptr(page)) >= 0)
1042 return true;
1043 if (PageHuge(page))
1044 return false;
1045 for (i = 0; i < hpage_nr_pages(page); i++) {
1046 if (atomic_read(&page[i]._mapcount) >= 0)
1047 return true;
1048 }
1049 return false;
1050}
1051
1052
1053
1054
1055
1056
1057static inline bool page_is_pfmemalloc(struct page *page)
1058{
1059
1060
1061
1062
1063 return page->index == -1UL;
1064}
1065
1066
1067
1068
1069
1070static inline void set_page_pfmemalloc(struct page *page)
1071{
1072 page->index = -1UL;
1073}
1074
1075static inline void clear_page_pfmemalloc(struct page *page)
1076{
1077 page->index = 0;
1078}
1079
1080
1081
1082
1083
1084
1085
1086#define VM_FAULT_OOM 0x0001
1087#define VM_FAULT_SIGBUS 0x0002
1088#define VM_FAULT_MAJOR 0x0004
1089#define VM_FAULT_WRITE 0x0008
1090#define VM_FAULT_HWPOISON 0x0010
1091#define VM_FAULT_HWPOISON_LARGE 0x0020
1092#define VM_FAULT_SIGSEGV 0x0040
1093
1094#define VM_FAULT_NOPAGE 0x0100
1095#define VM_FAULT_LOCKED 0x0200
1096#define VM_FAULT_RETRY 0x0400
1097#define VM_FAULT_FALLBACK 0x0800
1098
1099#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000
1100
1101#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \
1102 VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \
1103 VM_FAULT_FALLBACK)
1104
1105
1106#define VM_FAULT_SET_HINDEX(x) ((x) << 12)
1107#define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf)
1108
1109
1110
1111
1112extern void pagefault_out_of_memory(void);
1113
1114#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
1115
1116
1117
1118
1119
1120#define SHOW_MEM_FILTER_NODES (0x0001u)
1121
1122extern void show_free_areas(unsigned int flags);
1123extern bool skip_free_areas_node(unsigned int flags, int nid);
1124
1125int shmem_zero_setup(struct vm_area_struct *);
1126#ifdef CONFIG_SHMEM
1127bool shmem_mapping(struct address_space *mapping);
1128#else
1129static inline bool shmem_mapping(struct address_space *mapping)
1130{
1131 return false;
1132}
1133#endif
1134
1135extern bool can_do_mlock(void);
1136extern int user_shm_lock(size_t, struct user_struct *);
1137extern void user_shm_unlock(size_t, struct user_struct *);
1138
1139
1140
1141
1142struct zap_details {
1143 struct address_space *check_mapping;
1144 pgoff_t first_index;
1145 pgoff_t last_index;
1146 bool ignore_dirty;
1147 bool check_swap_entries;
1148};
1149
1150struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
1151 pte_t pte);
1152struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
1153 pmd_t pmd);
1154
1155int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1156 unsigned long size);
1157void zap_page_range(struct vm_area_struct *vma, unsigned long address,
1158 unsigned long size, struct zap_details *);
1159void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
1160 unsigned long start, unsigned long end);
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182struct mm_walk {
1183 int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
1184 unsigned long next, struct mm_walk *walk);
1185 int (*pte_entry)(pte_t *pte, unsigned long addr,
1186 unsigned long next, struct mm_walk *walk);
1187 int (*pte_hole)(unsigned long addr, unsigned long next,
1188 struct mm_walk *walk);
1189 int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
1190 unsigned long addr, unsigned long next,
1191 struct mm_walk *walk);
1192 int (*test_walk)(unsigned long addr, unsigned long next,
1193 struct mm_walk *walk);
1194 struct mm_struct *mm;
1195 struct vm_area_struct *vma;
1196 void *private;
1197};
1198
1199int walk_page_range(unsigned long addr, unsigned long end,
1200 struct mm_walk *walk);
1201int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk);
1202void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
1203 unsigned long end, unsigned long floor, unsigned long ceiling);
1204int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
1205 struct vm_area_struct *vma);
1206void unmap_mapping_range(struct address_space *mapping,
1207 loff_t const holebegin, loff_t const holelen, int even_cows);
1208int follow_pfn(struct vm_area_struct *vma, unsigned long address,
1209 unsigned long *pfn);
1210int follow_phys(struct vm_area_struct *vma, unsigned long address,
1211 unsigned int flags, unsigned long *prot, resource_size_t *phys);
1212int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
1213 void *buf, int len, int write);
1214
1215static inline void unmap_shared_mapping_range(struct address_space *mapping,
1216 loff_t const holebegin, loff_t const holelen)
1217{
1218 unmap_mapping_range(mapping, holebegin, holelen, 0);
1219}
1220
1221extern void truncate_pagecache(struct inode *inode, loff_t new);
1222extern void truncate_setsize(struct inode *inode, loff_t newsize);
1223void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
1224void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
1225int truncate_inode_page(struct address_space *mapping, struct page *page);
1226int generic_error_remove_page(struct address_space *mapping, struct page *page);
1227int invalidate_inode_page(struct page *page);
1228
1229#ifdef CONFIG_MMU
1230extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
1231 unsigned long address, unsigned int flags);
1232extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
1233 unsigned long address, unsigned int fault_flags,
1234 bool *unlocked);
1235#else
1236static inline int handle_mm_fault(struct mm_struct *mm,
1237 struct vm_area_struct *vma, unsigned long address,
1238 unsigned int flags)
1239{
1240
1241 BUG();
1242 return VM_FAULT_SIGBUS;
1243}
1244static inline int fixup_user_fault(struct task_struct *tsk,
1245 struct mm_struct *mm, unsigned long address,
1246 unsigned int fault_flags, bool *unlocked)
1247{
1248
1249 BUG();
1250 return -EFAULT;
1251}
1252#endif
1253
1254extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
1255extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1256 void *buf, int len, int write);
1257
1258long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1259 unsigned long start, unsigned long nr_pages,
1260 unsigned int foll_flags, struct page **pages,
1261 struct vm_area_struct **vmas, int *nonblocking);
1262long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
1263 unsigned long start, unsigned long nr_pages,
1264 int write, int force, struct page **pages,
1265 struct vm_area_struct **vmas);
1266long get_user_pages(unsigned long start, unsigned long nr_pages,
1267 int write, int force, struct page **pages,
1268 struct vm_area_struct **vmas);
1269long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
1270 int write, int force, struct page **pages, int *locked);
1271long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
1272 unsigned long start, unsigned long nr_pages,
1273 int write, int force, struct page **pages,
1274 unsigned int gup_flags);
1275long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
1276 int write, int force, struct page **pages);
1277int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1278 struct page **pages);
1279
1280
1281struct frame_vector {
1282 unsigned int nr_allocated;
1283 unsigned int nr_frames;
1284 bool got_ref;
1285 bool is_pfns;
1286 void *ptrs[0];
1287
1288
1289};
1290
1291struct frame_vector *frame_vector_create(unsigned int nr_frames);
1292void frame_vector_destroy(struct frame_vector *vec);
1293int get_vaddr_frames(unsigned long start, unsigned int nr_pfns,
1294 bool write, bool force, struct frame_vector *vec);
1295void put_vaddr_frames(struct frame_vector *vec);
1296int frame_vector_to_pages(struct frame_vector *vec);
1297void frame_vector_to_pfns(struct frame_vector *vec);
1298
1299static inline unsigned int frame_vector_count(struct frame_vector *vec)
1300{
1301 return vec->nr_frames;
1302}
1303
1304static inline struct page **frame_vector_pages(struct frame_vector *vec)
1305{
1306 if (vec->is_pfns) {
1307 int err = frame_vector_to_pages(vec);
1308
1309 if (err)
1310 return ERR_PTR(err);
1311 }
1312 return (struct page **)(vec->ptrs);
1313}
1314
1315static inline unsigned long *frame_vector_pfns(struct frame_vector *vec)
1316{
1317 if (!vec->is_pfns)
1318 frame_vector_to_pfns(vec);
1319 return (unsigned long *)(vec->ptrs);
1320}
1321
1322struct kvec;
1323int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
1324 struct page **pages);
1325int get_kernel_page(unsigned long start, int write, struct page **pages);
1326struct page *get_dump_page(unsigned long addr);
1327
1328extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
1329extern void do_invalidatepage(struct page *page, unsigned int offset,
1330 unsigned int length);
1331
1332int __set_page_dirty_nobuffers(struct page *page);
1333int __set_page_dirty_no_writeback(struct page *page);
1334int redirty_page_for_writepage(struct writeback_control *wbc,
1335 struct page *page);
1336void account_page_dirtied(struct page *page, struct address_space *mapping);
1337void account_page_cleaned(struct page *page, struct address_space *mapping,
1338 struct bdi_writeback *wb);
1339int set_page_dirty(struct page *page);
1340int set_page_dirty_lock(struct page *page);
1341void cancel_dirty_page(struct page *page);
1342int clear_page_dirty_for_io(struct page *page);
1343
1344int get_cmdline(struct task_struct *task, char *buffer, int buflen);
1345
1346
1347static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
1348{
1349 return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
1350}
1351
1352static inline bool vma_is_anonymous(struct vm_area_struct *vma)
1353{
1354 return !vma->vm_ops;
1355}
1356
1357static inline int stack_guard_page_start(struct vm_area_struct *vma,
1358 unsigned long addr)
1359{
1360 return (vma->vm_flags & VM_GROWSDOWN) &&
1361 (vma->vm_start == addr) &&
1362 !vma_growsdown(vma->vm_prev, addr);
1363}
1364
1365
1366static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
1367{
1368 return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
1369}
1370
1371static inline int stack_guard_page_end(struct vm_area_struct *vma,
1372 unsigned long addr)
1373{
1374 return (vma->vm_flags & VM_GROWSUP) &&
1375 (vma->vm_end == addr) &&
1376 !vma_growsup(vma->vm_next, addr);
1377}
1378
1379int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t);
1380
1381extern unsigned long move_page_tables(struct vm_area_struct *vma,
1382 unsigned long old_addr, struct vm_area_struct *new_vma,
1383 unsigned long new_addr, unsigned long len,
1384 bool need_rmap_locks);
1385extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
1386 unsigned long end, pgprot_t newprot,
1387 int dirty_accountable, int prot_numa);
1388extern int mprotect_fixup(struct vm_area_struct *vma,
1389 struct vm_area_struct **pprev, unsigned long start,
1390 unsigned long end, unsigned long newflags);
1391
1392
1393
1394
1395int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1396 struct page **pages);
1397
1398
1399
1400static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
1401{
1402 long val = atomic_long_read(&mm->rss_stat.count[member]);
1403
1404#ifdef SPLIT_RSS_COUNTING
1405
1406
1407
1408
1409 if (val < 0)
1410 val = 0;
1411#endif
1412 return (unsigned long)val;
1413}
1414
1415static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1416{
1417 atomic_long_add(value, &mm->rss_stat.count[member]);
1418}
1419
1420static inline void inc_mm_counter(struct mm_struct *mm, int member)
1421{
1422 atomic_long_inc(&mm->rss_stat.count[member]);
1423}
1424
1425static inline void dec_mm_counter(struct mm_struct *mm, int member)
1426{
1427 atomic_long_dec(&mm->rss_stat.count[member]);
1428}
1429
1430
1431static inline int mm_counter_file(struct page *page)
1432{
1433 if (PageSwapBacked(page))
1434 return MM_SHMEMPAGES;
1435 return MM_FILEPAGES;
1436}
1437
1438static inline int mm_counter(struct page *page)
1439{
1440 if (PageAnon(page))
1441 return MM_ANONPAGES;
1442 return mm_counter_file(page);
1443}
1444
1445static inline unsigned long get_mm_rss(struct mm_struct *mm)
1446{
1447 return get_mm_counter(mm, MM_FILEPAGES) +
1448 get_mm_counter(mm, MM_ANONPAGES) +
1449 get_mm_counter(mm, MM_SHMEMPAGES);
1450}
1451
1452static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
1453{
1454 return max(mm->hiwater_rss, get_mm_rss(mm));
1455}
1456
1457static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
1458{
1459 return max(mm->hiwater_vm, mm->total_vm);
1460}
1461
1462static inline void update_hiwater_rss(struct mm_struct *mm)
1463{
1464 unsigned long _rss = get_mm_rss(mm);
1465
1466 if ((mm)->hiwater_rss < _rss)
1467 (mm)->hiwater_rss = _rss;
1468}
1469
1470static inline void update_hiwater_vm(struct mm_struct *mm)
1471{
1472 if (mm->hiwater_vm < mm->total_vm)
1473 mm->hiwater_vm = mm->total_vm;
1474}
1475
1476static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
1477{
1478 mm->hiwater_rss = get_mm_rss(mm);
1479}
1480
1481static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
1482 struct mm_struct *mm)
1483{
1484 unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
1485
1486 if (*maxrss < hiwater_rss)
1487 *maxrss = hiwater_rss;
1488}
1489
1490#if defined(SPLIT_RSS_COUNTING)
1491void sync_mm_rss(struct mm_struct *mm);
1492#else
1493static inline void sync_mm_rss(struct mm_struct *mm)
1494{
1495}
1496#endif
1497
1498#ifndef __HAVE_ARCH_PTE_DEVMAP
1499static inline int pte_devmap(pte_t pte)
1500{
1501 return 0;
1502}
1503#endif
1504
1505int vma_wants_writenotify(struct vm_area_struct *vma);
1506
1507extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1508 spinlock_t **ptl);
1509static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
1510 spinlock_t **ptl)
1511{
1512 pte_t *ptep;
1513 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
1514 return ptep;
1515}
1516
1517#ifdef __PAGETABLE_PUD_FOLDED
1518static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
1519 unsigned long address)
1520{
1521 return 0;
1522}
1523#else
1524int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
1525#endif
1526
1527#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
1528static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
1529 unsigned long address)
1530{
1531 return 0;
1532}
1533
1534static inline void mm_nr_pmds_init(struct mm_struct *mm) {}
1535
1536static inline unsigned long mm_nr_pmds(struct mm_struct *mm)
1537{
1538 return 0;
1539}
1540
1541static inline void mm_inc_nr_pmds(struct mm_struct *mm) {}
1542static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
1543
1544#else
1545int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
1546
1547static inline void mm_nr_pmds_init(struct mm_struct *mm)
1548{
1549 atomic_long_set(&mm->nr_pmds, 0);
1550}
1551
1552static inline unsigned long mm_nr_pmds(struct mm_struct *mm)
1553{
1554 return atomic_long_read(&mm->nr_pmds);
1555}
1556
1557static inline void mm_inc_nr_pmds(struct mm_struct *mm)
1558{
1559 atomic_long_inc(&mm->nr_pmds);
1560}
1561
1562static inline void mm_dec_nr_pmds(struct mm_struct *mm)
1563{
1564 atomic_long_dec(&mm->nr_pmds);
1565}
1566#endif
1567
1568int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address);
1569int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
1570
1571
1572
1573
1574
1575#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
1576static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
1577{
1578 return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))?
1579 NULL: pud_offset(pgd, address);
1580}
1581
1582static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
1583{
1584 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
1585 NULL: pmd_offset(pud, address);
1586}
1587#endif
1588
1589#if USE_SPLIT_PTE_PTLOCKS
1590#if ALLOC_SPLIT_PTLOCKS
1591void __init ptlock_cache_init(void);
1592extern bool ptlock_alloc(struct page *page);
1593extern void ptlock_free(struct page *page);
1594
1595static inline spinlock_t *ptlock_ptr(struct page *page)
1596{
1597 return page->ptl;
1598}
1599#else
1600static inline void ptlock_cache_init(void)
1601{
1602}
1603
1604static inline bool ptlock_alloc(struct page *page)
1605{
1606 return true;
1607}
1608
1609static inline void ptlock_free(struct page *page)
1610{
1611}
1612
1613static inline spinlock_t *ptlock_ptr(struct page *page)
1614{
1615 return &page->ptl;
1616}
1617#endif
1618
1619static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1620{
1621 return ptlock_ptr(pmd_page(*pmd));
1622}
1623
1624static inline bool ptlock_init(struct page *page)
1625{
1626
1627
1628
1629
1630
1631
1632
1633 VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
1634 if (!ptlock_alloc(page))
1635 return false;
1636 spin_lock_init(ptlock_ptr(page));
1637 return true;
1638}
1639
1640
1641static inline void pte_lock_deinit(struct page *page)
1642{
1643 page->mapping = NULL;
1644 ptlock_free(page);
1645}
1646
1647#else
1648
1649
1650
1651static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1652{
1653 return &mm->page_table_lock;
1654}
1655static inline void ptlock_cache_init(void) {}
1656static inline bool ptlock_init(struct page *page) { return true; }
1657static inline void pte_lock_deinit(struct page *page) {}
1658#endif
1659
1660static inline void pgtable_init(void)
1661{
1662 ptlock_cache_init();
1663 pgtable_cache_init();
1664}
1665
1666static inline bool pgtable_page_ctor(struct page *page)
1667{
1668 if (!ptlock_init(page))
1669 return false;
1670 inc_zone_page_state(page, NR_PAGETABLE);
1671 return true;
1672}
1673
1674static inline void pgtable_page_dtor(struct page *page)
1675{
1676 pte_lock_deinit(page);
1677 dec_zone_page_state(page, NR_PAGETABLE);
1678}
1679
1680#define pte_offset_map_lock(mm, pmd, address, ptlp) \
1681({ \
1682 spinlock_t *__ptl = pte_lockptr(mm, pmd); \
1683 pte_t *__pte = pte_offset_map(pmd, address); \
1684 *(ptlp) = __ptl; \
1685 spin_lock(__ptl); \
1686 __pte; \
1687})
1688
1689#define pte_unmap_unlock(pte, ptl) do { \
1690 spin_unlock(ptl); \
1691 pte_unmap(pte); \
1692} while (0)
1693
1694#define pte_alloc(mm, pmd, address) \
1695 (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd, address))
1696
1697#define pte_alloc_map(mm, pmd, address) \
1698 (pte_alloc(mm, pmd, address) ? NULL : pte_offset_map(pmd, address))
1699
1700#define pte_alloc_map_lock(mm, pmd, address, ptlp) \
1701 (pte_alloc(mm, pmd, address) ? \
1702 NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
1703
1704#define pte_alloc_kernel(pmd, address) \
1705 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
1706 NULL: pte_offset_kernel(pmd, address))
1707
1708#if USE_SPLIT_PMD_PTLOCKS
1709
1710static struct page *pmd_to_page(pmd_t *pmd)
1711{
1712 unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
1713 return virt_to_page((void *)((unsigned long) pmd & mask));
1714}
1715
1716static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
1717{
1718 return ptlock_ptr(pmd_to_page(pmd));
1719}
1720
1721static inline bool pgtable_pmd_page_ctor(struct page *page)
1722{
1723#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1724 page->pmd_huge_pte = NULL;
1725#endif
1726 return ptlock_init(page);
1727}
1728
1729static inline void pgtable_pmd_page_dtor(struct page *page)
1730{
1731#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1732 VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
1733#endif
1734 ptlock_free(page);
1735}
1736
1737#define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte)
1738
1739#else
1740
1741static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
1742{
1743 return &mm->page_table_lock;
1744}
1745
1746static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; }
1747static inline void pgtable_pmd_page_dtor(struct page *page) {}
1748
1749#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
1750
1751#endif
1752
1753static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
1754{
1755 spinlock_t *ptl = pmd_lockptr(mm, pmd);
1756 spin_lock(ptl);
1757 return ptl;
1758}
1759
1760extern void free_area_init(unsigned long * zones_size);
1761extern void free_area_init_node(int nid, unsigned long * zones_size,
1762 unsigned long zone_start_pfn, unsigned long *zholes_size);
1763extern void free_initmem(void);
1764
1765
1766
1767
1768
1769
1770
1771extern unsigned long free_reserved_area(void *start, void *end,
1772 int poison, char *s);
1773
1774#ifdef CONFIG_HIGHMEM
1775
1776
1777
1778
1779extern void free_highmem_page(struct page *page);
1780#endif
1781
1782extern void adjust_managed_page_count(struct page *page, long count);
1783extern void mem_init_print_info(const char *str);
1784
1785extern void reserve_bootmem_region(unsigned long start, unsigned long end);
1786
1787
1788static inline void __free_reserved_page(struct page *page)
1789{
1790 ClearPageReserved(page);
1791 init_page_count(page);
1792 __free_page(page);
1793}
1794
1795static inline void free_reserved_page(struct page *page)
1796{
1797 __free_reserved_page(page);
1798 adjust_managed_page_count(page, 1);
1799}
1800
1801static inline void mark_page_reserved(struct page *page)
1802{
1803 SetPageReserved(page);
1804 adjust_managed_page_count(page, -1);
1805}
1806
1807
1808
1809
1810
1811
1812
1813static inline unsigned long free_initmem_default(int poison)
1814{
1815 extern char __init_begin[], __init_end[];
1816
1817 return free_reserved_area(&__init_begin, &__init_end,
1818 poison, "unused kernel");
1819}
1820
1821static inline unsigned long get_num_physpages(void)
1822{
1823 int nid;
1824 unsigned long phys_pages = 0;
1825
1826 for_each_online_node(nid)
1827 phys_pages += node_present_pages(nid);
1828
1829 return phys_pages;
1830}
1831
1832#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859extern void free_area_init_nodes(unsigned long *max_zone_pfn);
1860unsigned long node_map_pfn_alignment(void);
1861unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
1862 unsigned long end_pfn);
1863extern unsigned long absent_pages_in_range(unsigned long start_pfn,
1864 unsigned long end_pfn);
1865extern void get_pfn_range_for_nid(unsigned int nid,
1866 unsigned long *start_pfn, unsigned long *end_pfn);
1867extern unsigned long find_min_pfn_with_active_regions(void);
1868extern void free_bootmem_with_active_regions(int nid,
1869 unsigned long max_low_pfn);
1870extern void sparse_memory_present_with_active_regions(int nid);
1871
1872#endif
1873
1874#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
1875 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
1876static inline int __early_pfn_to_nid(unsigned long pfn,
1877 struct mminit_pfnnid_cache *state)
1878{
1879 return 0;
1880}
1881#else
1882
1883extern int __meminit early_pfn_to_nid(unsigned long pfn);
1884
1885extern int __meminit __early_pfn_to_nid(unsigned long pfn,
1886 struct mminit_pfnnid_cache *state);
1887#endif
1888
1889extern void set_dma_reserve(unsigned long new_dma_reserve);
1890extern void memmap_init_zone(unsigned long, int, unsigned long,
1891 unsigned long, enum memmap_context);
1892extern void setup_per_zone_wmarks(void);
1893extern int __meminit init_per_zone_wmark_min(void);
1894extern void mem_init(void);
1895extern void __init mmap_init(void);
1896extern void show_mem(unsigned int flags);
1897extern long si_mem_available(void);
1898extern void si_meminfo(struct sysinfo * val);
1899extern void si_meminfo_node(struct sysinfo *val, int nid);
1900
1901extern __printf(3, 4)
1902void warn_alloc_failed(gfp_t gfp_mask, unsigned int order,
1903 const char *fmt, ...);
1904
1905extern void setup_per_cpu_pageset(void);
1906
1907extern void zone_pcp_update(struct zone *zone);
1908extern void zone_pcp_reset(struct zone *zone);
1909
1910
1911extern int min_free_kbytes;
1912extern int watermark_scale_factor;
1913
1914
1915extern atomic_long_t mmap_pages_allocated;
1916extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
1917
1918
1919void vma_interval_tree_insert(struct vm_area_struct *node,
1920 struct rb_root *root);
1921void vma_interval_tree_insert_after(struct vm_area_struct *node,
1922 struct vm_area_struct *prev,
1923 struct rb_root *root);
1924void vma_interval_tree_remove(struct vm_area_struct *node,
1925 struct rb_root *root);
1926struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root *root,
1927 unsigned long start, unsigned long last);
1928struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
1929 unsigned long start, unsigned long last);
1930
1931#define vma_interval_tree_foreach(vma, root, start, last) \
1932 for (vma = vma_interval_tree_iter_first(root, start, last); \
1933 vma; vma = vma_interval_tree_iter_next(vma, start, last))
1934
1935void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
1936 struct rb_root *root);
1937void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
1938 struct rb_root *root);
1939struct anon_vma_chain *anon_vma_interval_tree_iter_first(
1940 struct rb_root *root, unsigned long start, unsigned long last);
1941struct anon_vma_chain *anon_vma_interval_tree_iter_next(
1942 struct anon_vma_chain *node, unsigned long start, unsigned long last);
1943#ifdef CONFIG_DEBUG_VM_RB
1944void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
1945#endif
1946
1947#define anon_vma_interval_tree_foreach(avc, root, start, last) \
1948 for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
1949 avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
1950
1951
1952extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
1953extern int vma_adjust(struct vm_area_struct *vma, unsigned long start,
1954 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert);
1955extern struct vm_area_struct *vma_merge(struct mm_struct *,
1956 struct vm_area_struct *prev, unsigned long addr, unsigned long end,
1957 unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
1958 struct mempolicy *, struct vm_userfaultfd_ctx);
1959extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
1960extern int split_vma(struct mm_struct *,
1961 struct vm_area_struct *, unsigned long addr, int new_below);
1962extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
1963extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
1964 struct rb_node **, struct rb_node *);
1965extern void unlink_file_vma(struct vm_area_struct *);
1966extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
1967 unsigned long addr, unsigned long len, pgoff_t pgoff,
1968 bool *need_rmap_locks);
1969extern void exit_mmap(struct mm_struct *);
1970
1971static inline int check_data_rlimit(unsigned long rlim,
1972 unsigned long new,
1973 unsigned long start,
1974 unsigned long end_data,
1975 unsigned long start_data)
1976{
1977 if (rlim < RLIM_INFINITY) {
1978 if (((new - start) + (end_data - start_data)) > rlim)
1979 return -ENOSPC;
1980 }
1981
1982 return 0;
1983}
1984
1985extern int mm_take_all_locks(struct mm_struct *mm);
1986extern void mm_drop_all_locks(struct mm_struct *mm);
1987
1988extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
1989extern struct file *get_mm_exe_file(struct mm_struct *mm);
1990
1991extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
1992extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
1993
1994extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
1995 unsigned long addr, unsigned long len,
1996 unsigned long flags,
1997 const struct vm_special_mapping *spec);
1998
1999extern int install_special_mapping(struct mm_struct *mm,
2000 unsigned long addr, unsigned long len,
2001 unsigned long flags, struct page **pages);
2002
2003extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
2004
2005extern unsigned long mmap_region(struct file *file, unsigned long addr,
2006 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
2007extern unsigned long do_mmap(struct file *file, unsigned long addr,
2008 unsigned long len, unsigned long prot, unsigned long flags,
2009 vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate);
2010extern int do_munmap(struct mm_struct *, unsigned long, size_t);
2011
2012static inline unsigned long
2013do_mmap_pgoff(struct file *file, unsigned long addr,
2014 unsigned long len, unsigned long prot, unsigned long flags,
2015 unsigned long pgoff, unsigned long *populate)
2016{
2017 return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate);
2018}
2019
2020#ifdef CONFIG_MMU
2021extern int __mm_populate(unsigned long addr, unsigned long len,
2022 int ignore_errors);
2023static inline void mm_populate(unsigned long addr, unsigned long len)
2024{
2025
2026 (void) __mm_populate(addr, len, 1);
2027}
2028#else
2029static inline void mm_populate(unsigned long addr, unsigned long len) {}
2030#endif
2031
2032
2033extern unsigned long vm_brk(unsigned long, unsigned long);
2034extern int vm_munmap(unsigned long, size_t);
2035extern unsigned long vm_mmap(struct file *, unsigned long,
2036 unsigned long, unsigned long,
2037 unsigned long, unsigned long);
2038
2039struct vm_unmapped_area_info {
2040#define VM_UNMAPPED_AREA_TOPDOWN 1
2041 unsigned long flags;
2042 unsigned long length;
2043 unsigned long low_limit;
2044 unsigned long high_limit;
2045 unsigned long align_mask;
2046 unsigned long align_offset;
2047};
2048
2049extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
2050extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061static inline unsigned long
2062vm_unmapped_area(struct vm_unmapped_area_info *info)
2063{
2064 if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
2065 return unmapped_area_topdown(info);
2066 else
2067 return unmapped_area(info);
2068}
2069
2070
2071extern void truncate_inode_pages(struct address_space *, loff_t);
2072extern void truncate_inode_pages_range(struct address_space *,
2073 loff_t lstart, loff_t lend);
2074extern void truncate_inode_pages_final(struct address_space *);
2075
2076
2077extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
2078extern void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf);
2079extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
2080
2081
2082int write_one_page(struct page *page, int wait);
2083void task_dirty_inc(struct task_struct *tsk);
2084
2085
2086#define VM_MAX_READAHEAD 128
2087#define VM_MIN_READAHEAD 16
2088
2089int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
2090 pgoff_t offset, unsigned long nr_to_read);
2091
2092void page_cache_sync_readahead(struct address_space *mapping,
2093 struct file_ra_state *ra,
2094 struct file *filp,
2095 pgoff_t offset,
2096 unsigned long size);
2097
2098void page_cache_async_readahead(struct address_space *mapping,
2099 struct file_ra_state *ra,
2100 struct file *filp,
2101 struct page *pg,
2102 pgoff_t offset,
2103 unsigned long size);
2104
2105
2106extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
2107
2108
2109extern int expand_downwards(struct vm_area_struct *vma,
2110 unsigned long address);
2111#if VM_GROWSUP
2112extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
2113#else
2114 #define expand_upwards(vma, address) (0)
2115#endif
2116
2117
2118extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
2119extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
2120 struct vm_area_struct **pprev);
2121
2122
2123
2124static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
2125{
2126 struct vm_area_struct * vma = find_vma(mm,start_addr);
2127
2128 if (vma && end_addr <= vma->vm_start)
2129 vma = NULL;
2130 return vma;
2131}
2132
2133static inline unsigned long vma_pages(struct vm_area_struct *vma)
2134{
2135 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
2136}
2137
2138
2139static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
2140 unsigned long vm_start, unsigned long vm_end)
2141{
2142 struct vm_area_struct *vma = find_vma(mm, vm_start);
2143
2144 if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
2145 vma = NULL;
2146
2147 return vma;
2148}
2149
2150#ifdef CONFIG_MMU
2151pgprot_t vm_get_page_prot(unsigned long vm_flags);
2152void vma_set_page_prot(struct vm_area_struct *vma);
2153#else
2154static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
2155{
2156 return __pgprot(0);
2157}
2158static inline void vma_set_page_prot(struct vm_area_struct *vma)
2159{
2160 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2161}
2162#endif
2163
2164#ifdef CONFIG_NUMA_BALANCING
2165unsigned long change_prot_numa(struct vm_area_struct *vma,
2166 unsigned long start, unsigned long end);
2167#endif
2168
2169struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
2170int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
2171 unsigned long pfn, unsigned long size, pgprot_t);
2172int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
2173int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2174 unsigned long pfn);
2175int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2176 unsigned long pfn, pgprot_t pgprot);
2177int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2178 pfn_t pfn);
2179int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
2180
2181
2182struct page *follow_page_mask(struct vm_area_struct *vma,
2183 unsigned long address, unsigned int foll_flags,
2184 unsigned int *page_mask);
2185
2186static inline struct page *follow_page(struct vm_area_struct *vma,
2187 unsigned long address, unsigned int foll_flags)
2188{
2189 unsigned int unused_page_mask;
2190 return follow_page_mask(vma, address, foll_flags, &unused_page_mask);
2191}
2192
2193#define FOLL_WRITE 0x01
2194#define FOLL_TOUCH 0x02
2195#define FOLL_GET 0x04
2196#define FOLL_DUMP 0x08
2197#define FOLL_FORCE 0x10
2198#define FOLL_NOWAIT 0x20
2199
2200#define FOLL_POPULATE 0x40
2201#define FOLL_SPLIT 0x80
2202#define FOLL_HWPOISON 0x100
2203#define FOLL_NUMA 0x200
2204#define FOLL_MIGRATION 0x400
2205#define FOLL_TRIED 0x800
2206#define FOLL_MLOCK 0x1000
2207#define FOLL_REMOTE 0x2000
2208
2209typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
2210 void *data);
2211extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
2212 unsigned long size, pte_fn_t fn, void *data);
2213
2214
2215#ifdef CONFIG_PAGE_POISONING
2216extern bool page_poisoning_enabled(void);
2217extern void kernel_poison_pages(struct page *page, int numpages, int enable);
2218extern bool page_is_poisoned(struct page *page);
2219#else
2220static inline bool page_poisoning_enabled(void) { return false; }
2221static inline void kernel_poison_pages(struct page *page, int numpages,
2222 int enable) { }
2223static inline bool page_is_poisoned(struct page *page) { return false; }
2224#endif
2225
2226#ifdef CONFIG_DEBUG_PAGEALLOC
2227extern bool _debug_pagealloc_enabled;
2228extern void __kernel_map_pages(struct page *page, int numpages, int enable);
2229
2230static inline bool debug_pagealloc_enabled(void)
2231{
2232 return _debug_pagealloc_enabled;
2233}
2234
2235static inline void
2236kernel_map_pages(struct page *page, int numpages, int enable)
2237{
2238 if (!debug_pagealloc_enabled())
2239 return;
2240
2241 __kernel_map_pages(page, numpages, enable);
2242}
2243#ifdef CONFIG_HIBERNATION
2244extern bool kernel_page_present(struct page *page);
2245#endif
2246#else
2247static inline void
2248kernel_map_pages(struct page *page, int numpages, int enable) {}
2249#ifdef CONFIG_HIBERNATION
2250static inline bool kernel_page_present(struct page *page) { return true; }
2251#endif
2252static inline bool debug_pagealloc_enabled(void)
2253{
2254 return false;
2255}
2256#endif
2257
2258#ifdef __HAVE_ARCH_GATE_AREA
2259extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
2260extern int in_gate_area_no_mm(unsigned long addr);
2261extern int in_gate_area(struct mm_struct *mm, unsigned long addr);
2262#else
2263static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
2264{
2265 return NULL;
2266}
2267static inline int in_gate_area_no_mm(unsigned long addr) { return 0; }
2268static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
2269{
2270 return 0;
2271}
2272#endif
2273
2274#ifdef CONFIG_SYSCTL
2275extern int sysctl_drop_caches;
2276int drop_caches_sysctl_handler(struct ctl_table *, int,
2277 void __user *, size_t *, loff_t *);
2278#endif
2279
2280void drop_slab(void);
2281void drop_slab_node(int nid);
2282
2283#ifndef CONFIG_MMU
2284#define randomize_va_space 0
2285#else
2286extern int randomize_va_space;
2287#endif
2288
2289const char * arch_vma_name(struct vm_area_struct *vma);
2290void print_vma_addr(char *prefix, unsigned long rip);
2291
2292void sparse_mem_maps_populate_node(struct page **map_map,
2293 unsigned long pnum_begin,
2294 unsigned long pnum_end,
2295 unsigned long map_count,
2296 int nodeid);
2297
2298struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
2299pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
2300pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
2301pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
2302pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
2303void *vmemmap_alloc_block(unsigned long size, int node);
2304struct vmem_altmap;
2305void *__vmemmap_alloc_block_buf(unsigned long size, int node,
2306 struct vmem_altmap *altmap);
2307static inline void *vmemmap_alloc_block_buf(unsigned long size, int node)
2308{
2309 return __vmemmap_alloc_block_buf(size, node, NULL);
2310}
2311
2312void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
2313int vmemmap_populate_basepages(unsigned long start, unsigned long end,
2314 int node);
2315int vmemmap_populate(unsigned long start, unsigned long end, int node);
2316void vmemmap_populate_print_last(void);
2317#ifdef CONFIG_MEMORY_HOTPLUG
2318void vmemmap_free(unsigned long start, unsigned long end);
2319#endif
2320void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
2321 unsigned long size);
2322
2323enum mf_flags {
2324 MF_COUNT_INCREASED = 1 << 0,
2325 MF_ACTION_REQUIRED = 1 << 1,
2326 MF_MUST_KILL = 1 << 2,
2327 MF_SOFT_OFFLINE = 1 << 3,
2328};
2329extern int memory_failure(unsigned long pfn, int trapno, int flags);
2330extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
2331extern int unpoison_memory(unsigned long pfn);
2332extern int get_hwpoison_page(struct page *page);
2333#define put_hwpoison_page(page) put_page(page)
2334extern int sysctl_memory_failure_early_kill;
2335extern int sysctl_memory_failure_recovery;
2336extern void shake_page(struct page *p, int access);
2337extern atomic_long_t num_poisoned_pages;
2338extern int soft_offline_page(struct page *page, int flags);
2339
2340
2341
2342
2343
2344enum mf_result {
2345 MF_IGNORED,
2346 MF_FAILED,
2347 MF_DELAYED,
2348 MF_RECOVERED,
2349};
2350
2351enum mf_action_page_type {
2352 MF_MSG_KERNEL,
2353 MF_MSG_KERNEL_HIGH_ORDER,
2354 MF_MSG_SLAB,
2355 MF_MSG_DIFFERENT_COMPOUND,
2356 MF_MSG_POISONED_HUGE,
2357 MF_MSG_HUGE,
2358 MF_MSG_FREE_HUGE,
2359 MF_MSG_UNMAP_FAILED,
2360 MF_MSG_DIRTY_SWAPCACHE,
2361 MF_MSG_CLEAN_SWAPCACHE,
2362 MF_MSG_DIRTY_MLOCKED_LRU,
2363 MF_MSG_CLEAN_MLOCKED_LRU,
2364 MF_MSG_DIRTY_UNEVICTABLE_LRU,
2365 MF_MSG_CLEAN_UNEVICTABLE_LRU,
2366 MF_MSG_DIRTY_LRU,
2367 MF_MSG_CLEAN_LRU,
2368 MF_MSG_TRUNCATED_LRU,
2369 MF_MSG_BUDDY,
2370 MF_MSG_BUDDY_2ND,
2371 MF_MSG_UNKNOWN,
2372};
2373
2374#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
2375extern void clear_huge_page(struct page *page,
2376 unsigned long addr,
2377 unsigned int pages_per_huge_page);
2378extern void copy_user_huge_page(struct page *dst, struct page *src,
2379 unsigned long addr, struct vm_area_struct *vma,
2380 unsigned int pages_per_huge_page);
2381#endif
2382
2383extern struct page_ext_operations debug_guardpage_ops;
2384extern struct page_ext_operations page_poisoning_ops;
2385
2386#ifdef CONFIG_DEBUG_PAGEALLOC
2387extern unsigned int _debug_guardpage_minorder;
2388extern bool _debug_guardpage_enabled;
2389
2390static inline unsigned int debug_guardpage_minorder(void)
2391{
2392 return _debug_guardpage_minorder;
2393}
2394
2395static inline bool debug_guardpage_enabled(void)
2396{
2397 return _debug_guardpage_enabled;
2398}
2399
2400static inline bool page_is_guard(struct page *page)
2401{
2402 struct page_ext *page_ext;
2403
2404 if (!debug_guardpage_enabled())
2405 return false;
2406
2407 page_ext = lookup_page_ext(page);
2408 return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
2409}
2410#else
2411static inline unsigned int debug_guardpage_minorder(void) { return 0; }
2412static inline bool debug_guardpage_enabled(void) { return false; }
2413static inline bool page_is_guard(struct page *page) { return false; }
2414#endif
2415
2416#if MAX_NUMNODES > 1
2417void __init setup_nr_node_ids(void);
2418#else
2419static inline void setup_nr_node_ids(void) {}
2420#endif
2421
2422#endif
2423#endif
2424