1
2
3
4
5
6
7#ifndef __MM_INTERNAL_H
8#define __MM_INTERNAL_H
9
10#include <linux/fs.h>
11#include <linux/mm.h>
12#include <linux/pagemap.h>
13#include <linux/tracepoint-defs.h>
14
15
16
17
18
19
20
21#define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
22 __GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
23 __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
24 __GFP_ATOMIC)
25
26
27#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
28
29
30#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
31
32
33#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
34
35void page_writeback_init(void);
36
37vm_fault_t do_swap_page(struct vm_fault *vmf);
38
39void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
40 unsigned long floor, unsigned long ceiling);
41
42static inline bool can_madv_lru_vma(struct vm_area_struct *vma)
43{
44 return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP));
45}
46
47void unmap_page_range(struct mmu_gather *tlb,
48 struct vm_area_struct *vma,
49 unsigned long addr, unsigned long end,
50 struct zap_details *details);
51
52void do_page_cache_ra(struct readahead_control *, unsigned long nr_to_read,
53 unsigned long lookahead_size);
54void force_page_cache_ra(struct readahead_control *, unsigned long nr);
55static inline void force_page_cache_readahead(struct address_space *mapping,
56 struct file *file, pgoff_t index, unsigned long nr_to_read)
57{
58 DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index);
59 force_page_cache_ra(&ractl, nr_to_read);
60}
61
62unsigned find_lock_entries(struct address_space *mapping, pgoff_t start,
63 pgoff_t end, struct pagevec *pvec, pgoff_t *indices);
64
65
66
67
68
69
70
71
72
73
74
75
76
77static inline bool page_evictable(struct page *page)
78{
79 bool ret;
80
81
82 rcu_read_lock();
83 ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
84 rcu_read_unlock();
85 return ret;
86}
87
88
89
90
91
92static inline void set_page_refcounted(struct page *page)
93{
94 VM_BUG_ON_PAGE(PageTail(page), page);
95 VM_BUG_ON_PAGE(page_ref_count(page), page);
96 set_page_count(page, 1);
97}
98
99extern unsigned long highest_memmap_pfn;
100
101
102
103
104
105#define MAX_RECLAIM_RETRIES 16
106
107
108
109
110extern int isolate_lru_page(struct page *page);
111extern void putback_lru_page(struct page *page);
112
113
114
115
116extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
117
118
119
120
121extern bool cgroup_memory_nokmem;
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140struct alloc_context {
141 struct zonelist *zonelist;
142 nodemask_t *nodemask;
143 struct zoneref *preferred_zoneref;
144 int migratetype;
145
146
147
148
149
150
151
152
153
154
155
156 enum zone_type highest_zoneidx;
157 bool spread_dirty_pages;
158};
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177static inline unsigned long
178__find_buddy_pfn(unsigned long page_pfn, unsigned int order)
179{
180 return page_pfn ^ (1 << order);
181}
182
183extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
184 unsigned long end_pfn, struct zone *zone);
185
186static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
187 unsigned long end_pfn, struct zone *zone)
188{
189 if (zone->contiguous)
190 return pfn_to_page(start_pfn);
191
192 return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
193}
194
195extern int __isolate_free_page(struct page *page, unsigned int order);
196extern void __putback_isolated_page(struct page *page, unsigned int order,
197 int mt);
198extern void memblock_free_pages(struct page *page, unsigned long pfn,
199 unsigned int order);
200extern void __free_pages_core(struct page *page, unsigned int order);
201extern void prep_compound_page(struct page *page, unsigned int order);
202extern void post_alloc_hook(struct page *page, unsigned int order,
203 gfp_t gfp_flags);
204extern int user_min_free_kbytes;
205
206extern void free_unref_page(struct page *page, unsigned int order);
207extern void free_unref_page_list(struct list_head *list);
208
209extern void zone_pcp_update(struct zone *zone, int cpu_online);
210extern void zone_pcp_reset(struct zone *zone);
211extern void zone_pcp_disable(struct zone *zone);
212extern void zone_pcp_enable(struct zone *zone);
213
214extern void *memmap_alloc(phys_addr_t size, phys_addr_t align,
215 phys_addr_t min_addr,
216 int nid, bool exact_nid);
217
218#if defined CONFIG_COMPACTION || defined CONFIG_CMA
219
220
221
222
223
224
225
226
227
228
229
230struct compact_control {
231 struct list_head freepages;
232 struct list_head migratepages;
233 unsigned int nr_freepages;
234 unsigned int nr_migratepages;
235 unsigned long free_pfn;
236
237
238
239
240
241
242 unsigned long migrate_pfn;
243 unsigned long fast_start_pfn;
244 struct zone *zone;
245 unsigned long total_migrate_scanned;
246 unsigned long total_free_scanned;
247 unsigned short fast_search_fail;
248 short search_order;
249 const gfp_t gfp_mask;
250 int order;
251 int migratetype;
252 const unsigned int alloc_flags;
253 const int highest_zoneidx;
254 enum migrate_mode mode;
255 bool ignore_skip_hint;
256 bool no_set_skip_hint;
257 bool ignore_block_suitable;
258 bool direct_compaction;
259 bool proactive_compaction;
260 bool whole_zone;
261 bool contended;
262 bool rescan;
263 bool alloc_contig;
264};
265
266
267
268
269
270struct capture_control {
271 struct compact_control *cc;
272 struct page *page;
273};
274
275unsigned long
276isolate_freepages_range(struct compact_control *cc,
277 unsigned long start_pfn, unsigned long end_pfn);
278int
279isolate_migratepages_range(struct compact_control *cc,
280 unsigned long low_pfn, unsigned long end_pfn);
281#endif
282int find_suitable_fallback(struct free_area *area, unsigned int order,
283 int migratetype, bool only_stealable, bool *can_steal);
284
285
286
287
288
289
290
291
292
293static inline unsigned int buddy_order(struct page *page)
294{
295
296 return page_private(page);
297}
298
299
300
301
302
303
304
305
306
307
308
309
310#define buddy_order_unsafe(page) READ_ONCE(page_private(page))
311
312
313
314
315
316
317
318
319static inline bool is_exec_mapping(vm_flags_t flags)
320{
321 return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
322}
323
324
325
326
327
328
329
330static inline bool is_stack_mapping(vm_flags_t flags)
331{
332 return (flags & VM_STACK) == VM_STACK;
333}
334
335
336
337
338static inline bool is_data_mapping(vm_flags_t flags)
339{
340 return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
341}
342
343
344void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
345 struct vm_area_struct *prev);
346void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma);
347
348#ifdef CONFIG_MMU
349extern long populate_vma_page_range(struct vm_area_struct *vma,
350 unsigned long start, unsigned long end, int *locked);
351extern long faultin_vma_page_range(struct vm_area_struct *vma,
352 unsigned long start, unsigned long end,
353 bool write, int *locked);
354extern void munlock_vma_pages_range(struct vm_area_struct *vma,
355 unsigned long start, unsigned long end);
356static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
357{
358 munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
359}
360
361
362
363
364extern void mlock_vma_page(struct page *page);
365extern unsigned int munlock_vma_page(struct page *page);
366
367extern int mlock_future_check(struct mm_struct *mm, unsigned long flags,
368 unsigned long len);
369
370
371
372
373
374
375
376
377
378
379extern void clear_page_mlock(struct page *page);
380
381extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
382
383
384
385
386
387
388static inline unsigned long
389vma_address(struct page *page, struct vm_area_struct *vma)
390{
391 pgoff_t pgoff;
392 unsigned long address;
393
394 VM_BUG_ON_PAGE(PageKsm(page), page);
395 pgoff = page_to_pgoff(page);
396 if (pgoff >= vma->vm_pgoff) {
397 address = vma->vm_start +
398 ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
399
400 if (address < vma->vm_start || address >= vma->vm_end)
401 address = -EFAULT;
402 } else if (PageHead(page) &&
403 pgoff + compound_nr(page) - 1 >= vma->vm_pgoff) {
404
405 address = vma->vm_start;
406 } else {
407 address = -EFAULT;
408 }
409 return address;
410}
411
412
413
414
415
416
417static inline unsigned long
418vma_address_end(struct page *page, struct vm_area_struct *vma)
419{
420 pgoff_t pgoff;
421 unsigned long address;
422
423 VM_BUG_ON_PAGE(PageKsm(page), page);
424 pgoff = page_to_pgoff(page) + compound_nr(page);
425 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
426
427 if (address < vma->vm_start || address > vma->vm_end)
428 address = vma->vm_end;
429 return address;
430}
431
432static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
433 struct file *fpin)
434{
435 int flags = vmf->flags;
436
437 if (fpin)
438 return fpin;
439
440
441
442
443
444
445 if (fault_flag_allow_retry_first(flags) &&
446 !(flags & FAULT_FLAG_RETRY_NOWAIT)) {
447 fpin = get_file(vmf->vma->vm_file);
448 mmap_read_unlock(vmf->vma->vm_mm);
449 }
450 return fpin;
451}
452
453#else
454static inline void clear_page_mlock(struct page *page) { }
455static inline void mlock_vma_page(struct page *page) { }
456static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
457{
458}
459#endif
460
461
462
463
464
465
466static inline struct page *mem_map_offset(struct page *base, int offset)
467{
468 if (unlikely(offset >= MAX_ORDER_NR_PAGES))
469 return nth_page(base, offset);
470 return base + offset;
471}
472
473
474
475
476
477static inline struct page *mem_map_next(struct page *iter,
478 struct page *base, int offset)
479{
480 if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) {
481 unsigned long pfn = page_to_pfn(base) + offset;
482 if (!pfn_valid(pfn))
483 return NULL;
484 return pfn_to_page(pfn);
485 }
486 return iter + 1;
487}
488
489
490enum mminit_level {
491 MMINIT_WARNING,
492 MMINIT_VERIFY,
493 MMINIT_TRACE
494};
495
496#ifdef CONFIG_DEBUG_MEMORY_INIT
497
498extern int mminit_loglevel;
499
500#define mminit_dprintk(level, prefix, fmt, arg...) \
501do { \
502 if (level < mminit_loglevel) { \
503 if (level <= MMINIT_WARNING) \
504 pr_warn("mminit::" prefix " " fmt, ##arg); \
505 else \
506 printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
507 } \
508} while (0)
509
510extern void mminit_verify_pageflags_layout(void);
511extern void mminit_verify_zonelist(void);
512#else
513
514static inline void mminit_dprintk(enum mminit_level level,
515 const char *prefix, const char *fmt, ...)
516{
517}
518
519static inline void mminit_verify_pageflags_layout(void)
520{
521}
522
523static inline void mminit_verify_zonelist(void)
524{
525}
526#endif
527
528
529#if defined(CONFIG_SPARSEMEM)
530extern void mminit_validate_memmodel_limits(unsigned long *start_pfn,
531 unsigned long *end_pfn);
532#else
533static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
534 unsigned long *end_pfn)
535{
536}
537#endif
538
539#define NODE_RECLAIM_NOSCAN -2
540#define NODE_RECLAIM_FULL -1
541#define NODE_RECLAIM_SOME 0
542#define NODE_RECLAIM_SUCCESS 1
543
544#ifdef CONFIG_NUMA
545extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
546extern int find_next_best_node(int node, nodemask_t *used_node_mask);
547#else
548static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
549 unsigned int order)
550{
551 return NODE_RECLAIM_NOSCAN;
552}
553static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
554{
555 return NUMA_NO_NODE;
556}
557#endif
558
559extern int hwpoison_filter(struct page *p);
560
561extern u32 hwpoison_filter_dev_major;
562extern u32 hwpoison_filter_dev_minor;
563extern u64 hwpoison_filter_flags_mask;
564extern u64 hwpoison_filter_flags_value;
565extern u64 hwpoison_filter_memcg;
566extern u32 hwpoison_filter_enable;
567
568extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long,
569 unsigned long, unsigned long,
570 unsigned long, unsigned long);
571
572extern void set_pageblock_order(void);
573unsigned int reclaim_clean_pages_from_list(struct zone *zone,
574 struct list_head *page_list);
575
576#define ALLOC_WMARK_MIN WMARK_MIN
577#define ALLOC_WMARK_LOW WMARK_LOW
578#define ALLOC_WMARK_HIGH WMARK_HIGH
579#define ALLOC_NO_WATERMARKS 0x04
580
581
582#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
583
584
585
586
587
588
589#ifdef CONFIG_MMU
590#define ALLOC_OOM 0x08
591#else
592#define ALLOC_OOM ALLOC_NO_WATERMARKS
593#endif
594
595#define ALLOC_HARDER 0x10
596#define ALLOC_HIGH 0x20
597#define ALLOC_CPUSET 0x40
598#define ALLOC_CMA 0x80
599#ifdef CONFIG_ZONE_DMA32
600#define ALLOC_NOFRAGMENT 0x100
601#else
602#define ALLOC_NOFRAGMENT 0x0
603#endif
604#define ALLOC_KSWAPD 0x800
605
606enum ttu_flags;
607struct tlbflush_unmap_batch;
608
609
610
611
612
613
614extern struct workqueue_struct *mm_percpu_wq;
615
616#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
617void try_to_unmap_flush(void);
618void try_to_unmap_flush_dirty(void);
619void flush_tlb_batched_pending(struct mm_struct *mm);
620#else
621static inline void try_to_unmap_flush(void)
622{
623}
624static inline void try_to_unmap_flush_dirty(void)
625{
626}
627static inline void flush_tlb_batched_pending(struct mm_struct *mm)
628{
629}
630#endif
631
632extern const struct trace_print_flags pageflag_names[];
633extern const struct trace_print_flags vmaflag_names[];
634extern const struct trace_print_flags gfpflag_names[];
635
636static inline bool is_migrate_highatomic(enum migratetype migratetype)
637{
638 return migratetype == MIGRATE_HIGHATOMIC;
639}
640
641static inline bool is_migrate_highatomic_page(struct page *page)
642{
643 return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC;
644}
645
646void setup_zone_pageset(struct zone *zone);
647
648struct migration_target_control {
649 int nid;
650 nodemask_t *nmask;
651 gfp_t gfp_mask;
652};
653
654
655
656
657#ifdef CONFIG_MMU
658int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
659 pgprot_t prot, struct page **pages, unsigned int page_shift);
660#else
661static inline
662int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
663 pgprot_t prot, struct page **pages, unsigned int page_shift)
664{
665 return -EINVAL;
666}
667#endif
668
669void vunmap_range_noflush(unsigned long start, unsigned long end);
670
671int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
672 unsigned long addr, int page_nid, int *flags);
673
674#endif
675