1
2
3
4
5
6
7#ifndef __MM_INTERNAL_H
8#define __MM_INTERNAL_H
9
10#include <linux/fs.h>
11#include <linux/mm.h>
12#include <linux/pagemap.h>
13#include <linux/tracepoint-defs.h>
14
15
16
17
18
19
20
21#define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
22 __GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
23 __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
24 __GFP_ATOMIC)
25
26
27#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
28
29
30#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
31
32
33#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
34
35void page_writeback_init(void);
36
37vm_fault_t do_swap_page(struct vm_fault *vmf);
38
39void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
40 unsigned long floor, unsigned long ceiling);
41
42static inline bool can_madv_dontneed_vma(struct vm_area_struct *vma)
43{
44 return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP));
45}
46
47void unmap_page_range(struct mmu_gather *tlb,
48 struct vm_area_struct *vma,
49 unsigned long addr, unsigned long end,
50 struct zap_details *details);
51
52extern unsigned int __do_page_cache_readahead(struct address_space *mapping,
53 struct file *filp, pgoff_t offset, unsigned long nr_to_read,
54 unsigned long lookahead_size);
55
56
57
58
59static inline unsigned long ra_submit(struct file_ra_state *ra,
60 struct address_space *mapping, struct file *filp)
61{
62 return __do_page_cache_readahead(mapping, filp,
63 ra->start, ra->size, ra->async_size);
64}
65
66
67
68
69
70static inline void set_page_refcounted(struct page *page)
71{
72 VM_BUG_ON_PAGE(PageTail(page), page);
73 VM_BUG_ON_PAGE(page_ref_count(page), page);
74 set_page_count(page, 1);
75}
76
77extern unsigned long highest_memmap_pfn;
78
79
80
81
82
83#define MAX_RECLAIM_RETRIES 16
84
85
86
87
88extern int isolate_lru_page(struct page *page);
89extern void putback_lru_page(struct page *page);
90
91
92
93
94extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113struct alloc_context {
114 struct zonelist *zonelist;
115 nodemask_t *nodemask;
116 struct zoneref *preferred_zoneref;
117 int migratetype;
118 enum zone_type high_zoneidx;
119 bool spread_dirty_pages;
120};
121
122#define ac_classzone_idx(ac) zonelist_zone_idx(ac->preferred_zoneref)
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141static inline unsigned long
142__find_buddy_pfn(unsigned long page_pfn, unsigned int order)
143{
144 return page_pfn ^ (1 << order);
145}
146
147extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
148 unsigned long end_pfn, struct zone *zone);
149
150static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
151 unsigned long end_pfn, struct zone *zone)
152{
153 if (zone->contiguous)
154 return pfn_to_page(start_pfn);
155
156 return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
157}
158
159extern int __isolate_free_page(struct page *page, unsigned int order);
160extern void memblock_free_pages(struct page *page, unsigned long pfn,
161 unsigned int order);
162extern void __free_pages_core(struct page *page, unsigned int order);
163extern void prep_compound_page(struct page *page, unsigned int order);
164extern void post_alloc_hook(struct page *page, unsigned int order,
165 gfp_t gfp_flags);
166extern int user_min_free_kbytes;
167
168#if defined CONFIG_COMPACTION || defined CONFIG_CMA
169
170
171
172
173
174
175
176
177
178
179
180struct compact_control {
181 struct list_head freepages;
182 struct list_head migratepages;
183 unsigned int nr_freepages;
184 unsigned int nr_migratepages;
185 unsigned long free_pfn;
186 unsigned long migrate_pfn;
187 unsigned long fast_start_pfn;
188 struct zone *zone;
189 unsigned long total_migrate_scanned;
190 unsigned long total_free_scanned;
191 unsigned short fast_search_fail;
192 short search_order;
193 const gfp_t gfp_mask;
194 int order;
195 int migratetype;
196 const unsigned int alloc_flags;
197 const int classzone_idx;
198 enum migrate_mode mode;
199 bool ignore_skip_hint;
200 bool no_set_skip_hint;
201 bool ignore_block_suitable;
202 bool direct_compaction;
203 bool whole_zone;
204 bool contended;
205 bool rescan;
206};
207
208
209
210
211
212struct capture_control {
213 struct compact_control *cc;
214 struct page *page;
215};
216
217unsigned long
218isolate_freepages_range(struct compact_control *cc,
219 unsigned long start_pfn, unsigned long end_pfn);
220unsigned long
221isolate_migratepages_range(struct compact_control *cc,
222 unsigned long low_pfn, unsigned long end_pfn);
223int find_suitable_fallback(struct free_area *area, unsigned int order,
224 int migratetype, bool only_stealable, bool *can_steal);
225
226#endif
227
228
229
230
231
232
233
234
235
236static inline unsigned int page_order(struct page *page)
237{
238
239 return page_private(page);
240}
241
242
243
244
245
246
247
248
249
250
251
252
253#define page_order_unsafe(page) READ_ONCE(page_private(page))
254
255static inline bool is_cow_mapping(vm_flags_t flags)
256{
257 return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
258}
259
260
261
262
263
264
265
266
267static inline bool is_exec_mapping(vm_flags_t flags)
268{
269 return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
270}
271
272
273
274
275
276
277
278static inline bool is_stack_mapping(vm_flags_t flags)
279{
280 return (flags & VM_STACK) == VM_STACK;
281}
282
283
284
285
286static inline bool is_data_mapping(vm_flags_t flags)
287{
288 return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
289}
290
291
292void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
293 struct vm_area_struct *prev, struct rb_node *rb_parent);
294
295#ifdef CONFIG_MMU
296extern long populate_vma_page_range(struct vm_area_struct *vma,
297 unsigned long start, unsigned long end, int *nonblocking);
298extern void munlock_vma_pages_range(struct vm_area_struct *vma,
299 unsigned long start, unsigned long end);
300static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
301{
302 munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
303}
304
305
306
307
308extern void mlock_vma_page(struct page *page);
309extern unsigned int munlock_vma_page(struct page *page);
310
311
312
313
314
315
316
317
318
319
320extern void clear_page_mlock(struct page *page);
321
322
323
324
325
326
327static inline void mlock_migrate_page(struct page *newpage, struct page *page)
328{
329 if (TestClearPageMlocked(page)) {
330 int nr_pages = hpage_nr_pages(page);
331
332
333 __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
334 SetPageMlocked(newpage);
335 __mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages);
336 }
337}
338
339extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
340
341
342
343
344static inline unsigned long
345__vma_address(struct page *page, struct vm_area_struct *vma)
346{
347 pgoff_t pgoff = page_to_pgoff(page);
348 return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
349}
350
351static inline unsigned long
352vma_address(struct page *page, struct vm_area_struct *vma)
353{
354 unsigned long start, end;
355
356 start = __vma_address(page, vma);
357 end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
358
359
360 VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma);
361
362 return max(start, vma->vm_start);
363}
364
365#else
366static inline void clear_page_mlock(struct page *page) { }
367static inline void mlock_vma_page(struct page *page) { }
368static inline void mlock_migrate_page(struct page *new, struct page *old) { }
369
370#endif
371
372
373
374
375
376
377static inline struct page *mem_map_offset(struct page *base, int offset)
378{
379 if (unlikely(offset >= MAX_ORDER_NR_PAGES))
380 return nth_page(base, offset);
381 return base + offset;
382}
383
384
385
386
387
388static inline struct page *mem_map_next(struct page *iter,
389 struct page *base, int offset)
390{
391 if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) {
392 unsigned long pfn = page_to_pfn(base) + offset;
393 if (!pfn_valid(pfn))
394 return NULL;
395 return pfn_to_page(pfn);
396 }
397 return iter + 1;
398}
399
400
401enum mminit_level {
402 MMINIT_WARNING,
403 MMINIT_VERIFY,
404 MMINIT_TRACE
405};
406
407#ifdef CONFIG_DEBUG_MEMORY_INIT
408
409extern int mminit_loglevel;
410
411#define mminit_dprintk(level, prefix, fmt, arg...) \
412do { \
413 if (level < mminit_loglevel) { \
414 if (level <= MMINIT_WARNING) \
415 pr_warn("mminit::" prefix " " fmt, ##arg); \
416 else \
417 printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
418 } \
419} while (0)
420
421extern void mminit_verify_pageflags_layout(void);
422extern void mminit_verify_zonelist(void);
423#else
424
425static inline void mminit_dprintk(enum mminit_level level,
426 const char *prefix, const char *fmt, ...)
427{
428}
429
430static inline void mminit_verify_pageflags_layout(void)
431{
432}
433
434static inline void mminit_verify_zonelist(void)
435{
436}
437#endif
438
439
440#if defined(CONFIG_SPARSEMEM)
441extern void mminit_validate_memmodel_limits(unsigned long *start_pfn,
442 unsigned long *end_pfn);
443#else
444static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
445 unsigned long *end_pfn)
446{
447}
448#endif
449
450#define NODE_RECLAIM_NOSCAN -2
451#define NODE_RECLAIM_FULL -1
452#define NODE_RECLAIM_SOME 0
453#define NODE_RECLAIM_SUCCESS 1
454
455#ifdef CONFIG_NUMA
456extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
457#else
458static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
459 unsigned int order)
460{
461 return NODE_RECLAIM_NOSCAN;
462}
463#endif
464
465extern int hwpoison_filter(struct page *p);
466
467extern u32 hwpoison_filter_dev_major;
468extern u32 hwpoison_filter_dev_minor;
469extern u64 hwpoison_filter_flags_mask;
470extern u64 hwpoison_filter_flags_value;
471extern u64 hwpoison_filter_memcg;
472extern u32 hwpoison_filter_enable;
473
474extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long,
475 unsigned long, unsigned long,
476 unsigned long, unsigned long);
477
478extern void set_pageblock_order(void);
479unsigned long reclaim_clean_pages_from_list(struct zone *zone,
480 struct list_head *page_list);
481
482#define ALLOC_WMARK_MIN WMARK_MIN
483#define ALLOC_WMARK_LOW WMARK_LOW
484#define ALLOC_WMARK_HIGH WMARK_HIGH
485#define ALLOC_NO_WATERMARKS 0x04
486
487
488#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
489
490
491
492
493
494
495#ifdef CONFIG_MMU
496#define ALLOC_OOM 0x08
497#else
498#define ALLOC_OOM ALLOC_NO_WATERMARKS
499#endif
500
501#define ALLOC_HARDER 0x10
502#define ALLOC_HIGH 0x20
503#define ALLOC_CPUSET 0x40
504#define ALLOC_CMA 0x80
505#ifdef CONFIG_ZONE_DMA32
506#define ALLOC_NOFRAGMENT 0x100
507#else
508#define ALLOC_NOFRAGMENT 0x0
509#endif
510#define ALLOC_KSWAPD 0x200
511
512enum ttu_flags;
513struct tlbflush_unmap_batch;
514
515
516
517
518
519
520extern struct workqueue_struct *mm_percpu_wq;
521
522#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
523void try_to_unmap_flush(void);
524void try_to_unmap_flush_dirty(void);
525void flush_tlb_batched_pending(struct mm_struct *mm);
526#else
527static inline void try_to_unmap_flush(void)
528{
529}
530static inline void try_to_unmap_flush_dirty(void)
531{
532}
533static inline void flush_tlb_batched_pending(struct mm_struct *mm)
534{
535}
536#endif
537
538extern const struct trace_print_flags pageflag_names[];
539extern const struct trace_print_flags vmaflag_names[];
540extern const struct trace_print_flags gfpflag_names[];
541
542static inline bool is_migrate_highatomic(enum migratetype migratetype)
543{
544 return migratetype == MIGRATE_HIGHATOMIC;
545}
546
547static inline bool is_migrate_highatomic_page(struct page *page)
548{
549 return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC;
550}
551
552void setup_zone_pageset(struct zone *zone);
553extern struct page *alloc_new_node_page(struct page *page, unsigned long node);
554#endif
555