1
2
3
4
5
6
7#ifndef __MM_INTERNAL_H
8#define __MM_INTERNAL_H
9
10#include <linux/fs.h>
11#include <linux/mm.h>
12#include <linux/pagemap.h>
13#include <linux/tracepoint-defs.h>
14
15
16
17
18
19
20
21#define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
22 __GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
23 __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
24 __GFP_ATOMIC)
25
26
27#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
28
29
30#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
31
32
33#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
34
35void page_writeback_init(void);
36
37vm_fault_t do_swap_page(struct vm_fault *vmf);
38
39void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
40 unsigned long floor, unsigned long ceiling);
41
42static inline bool can_madv_lru_vma(struct vm_area_struct *vma)
43{
44 return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP));
45}
46
47void unmap_page_range(struct mmu_gather *tlb,
48 struct vm_area_struct *vma,
49 unsigned long addr, unsigned long end,
50 struct zap_details *details);
51
52void do_page_cache_ra(struct readahead_control *, unsigned long nr_to_read,
53 unsigned long lookahead_size);
54void force_page_cache_ra(struct readahead_control *, struct file_ra_state *,
55 unsigned long nr);
56static inline void force_page_cache_readahead(struct address_space *mapping,
57 struct file *file, pgoff_t index, unsigned long nr_to_read)
58{
59 DEFINE_READAHEAD(ractl, file, mapping, index);
60 force_page_cache_ra(&ractl, &file->f_ra, nr_to_read);
61}
62
63struct page *find_get_entry(struct address_space *mapping, pgoff_t index);
64struct page *find_lock_entry(struct address_space *mapping, pgoff_t index);
65
66
67
68
69
70
71
72
73
74
75
76
77
78static inline bool page_evictable(struct page *page)
79{
80 bool ret;
81
82
83 rcu_read_lock();
84 ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
85 rcu_read_unlock();
86 return ret;
87}
88
89
90
91
92
93static inline void set_page_refcounted(struct page *page)
94{
95 VM_BUG_ON_PAGE(PageTail(page), page);
96 VM_BUG_ON_PAGE(page_ref_count(page), page);
97 set_page_count(page, 1);
98}
99
100extern unsigned long highest_memmap_pfn;
101
102
103
104
105
106#define MAX_RECLAIM_RETRIES 16
107
108
109
110
111extern int isolate_lru_page(struct page *page);
112extern void putback_lru_page(struct page *page);
113
114
115
116
117extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136struct alloc_context {
137 struct zonelist *zonelist;
138 nodemask_t *nodemask;
139 struct zoneref *preferred_zoneref;
140 int migratetype;
141
142
143
144
145
146
147
148
149
150
151
152 enum zone_type highest_zoneidx;
153 bool spread_dirty_pages;
154};
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173static inline unsigned long
174__find_buddy_pfn(unsigned long page_pfn, unsigned int order)
175{
176 return page_pfn ^ (1 << order);
177}
178
179extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
180 unsigned long end_pfn, struct zone *zone);
181
182static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
183 unsigned long end_pfn, struct zone *zone)
184{
185 if (zone->contiguous)
186 return pfn_to_page(start_pfn);
187
188 return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
189}
190
191extern int __isolate_free_page(struct page *page, unsigned int order);
192extern void __putback_isolated_page(struct page *page, unsigned int order,
193 int mt);
194extern void memblock_free_pages(struct page *page, unsigned long pfn,
195 unsigned int order);
196extern void __free_pages_core(struct page *page, unsigned int order);
197extern void prep_compound_page(struct page *page, unsigned int order);
198extern void post_alloc_hook(struct page *page, unsigned int order,
199 gfp_t gfp_flags);
200extern int user_min_free_kbytes;
201
202extern void zone_pcp_update(struct zone *zone);
203extern void zone_pcp_reset(struct zone *zone);
204
205#if defined CONFIG_COMPACTION || defined CONFIG_CMA
206
207
208
209
210
211
212
213
214
215
216
217struct compact_control {
218 struct list_head freepages;
219 struct list_head migratepages;
220 unsigned int nr_freepages;
221 unsigned int nr_migratepages;
222 unsigned long free_pfn;
223 unsigned long migrate_pfn;
224 unsigned long fast_start_pfn;
225 struct zone *zone;
226 unsigned long total_migrate_scanned;
227 unsigned long total_free_scanned;
228 unsigned short fast_search_fail;
229 short search_order;
230 const gfp_t gfp_mask;
231 int order;
232 int migratetype;
233 const unsigned int alloc_flags;
234 const int highest_zoneidx;
235 enum migrate_mode mode;
236 bool ignore_skip_hint;
237 bool no_set_skip_hint;
238 bool ignore_block_suitable;
239 bool direct_compaction;
240 bool proactive_compaction;
241 bool whole_zone;
242 bool contended;
243 bool rescan;
244 bool alloc_contig;
245};
246
247
248
249
250
251struct capture_control {
252 struct compact_control *cc;
253 struct page *page;
254};
255
256unsigned long
257isolate_freepages_range(struct compact_control *cc,
258 unsigned long start_pfn, unsigned long end_pfn);
259unsigned long
260isolate_migratepages_range(struct compact_control *cc,
261 unsigned long low_pfn, unsigned long end_pfn);
262int find_suitable_fallback(struct free_area *area, unsigned int order,
263 int migratetype, bool only_stealable, bool *can_steal);
264
265#endif
266
267
268
269
270
271
272
273
274
275static inline unsigned int buddy_order(struct page *page)
276{
277
278 return page_private(page);
279}
280
281
282
283
284
285
286
287
288
289
290
291
292#define buddy_order_unsafe(page) READ_ONCE(page_private(page))
293
294static inline bool is_cow_mapping(vm_flags_t flags)
295{
296 return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
297}
298
299
300
301
302
303
304
305
306static inline bool is_exec_mapping(vm_flags_t flags)
307{
308 return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
309}
310
311
312
313
314
315
316
317static inline bool is_stack_mapping(vm_flags_t flags)
318{
319 return (flags & VM_STACK) == VM_STACK;
320}
321
322
323
324
325static inline bool is_data_mapping(vm_flags_t flags)
326{
327 return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
328}
329
330
331void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
332 struct vm_area_struct *prev);
333void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma);
334
335#ifdef CONFIG_MMU
336extern long populate_vma_page_range(struct vm_area_struct *vma,
337 unsigned long start, unsigned long end, int *nonblocking);
338extern void munlock_vma_pages_range(struct vm_area_struct *vma,
339 unsigned long start, unsigned long end);
340static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
341{
342 munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
343}
344
345
346
347
348extern void mlock_vma_page(struct page *page);
349extern unsigned int munlock_vma_page(struct page *page);
350
351
352
353
354
355
356
357
358
359
360extern void clear_page_mlock(struct page *page);
361
362
363
364
365
366
367static inline void mlock_migrate_page(struct page *newpage, struct page *page)
368{
369 if (TestClearPageMlocked(page)) {
370 int nr_pages = thp_nr_pages(page);
371
372
373 __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
374 SetPageMlocked(newpage);
375 __mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages);
376 }
377}
378
379extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
380
381
382
383
384static inline unsigned long
385__vma_address(struct page *page, struct vm_area_struct *vma)
386{
387 pgoff_t pgoff = page_to_pgoff(page);
388 return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
389}
390
391static inline unsigned long
392vma_address(struct page *page, struct vm_area_struct *vma)
393{
394 unsigned long start, end;
395
396 start = __vma_address(page, vma);
397 end = start + thp_size(page) - PAGE_SIZE;
398
399
400 VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma);
401
402 return max(start, vma->vm_start);
403}
404
405static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
406 struct file *fpin)
407{
408 int flags = vmf->flags;
409
410 if (fpin)
411 return fpin;
412
413
414
415
416
417
418 if (fault_flag_allow_retry_first(flags) &&
419 !(flags & FAULT_FLAG_RETRY_NOWAIT)) {
420 fpin = get_file(vmf->vma->vm_file);
421 mmap_read_unlock(vmf->vma->vm_mm);
422 }
423 return fpin;
424}
425
426#else
427static inline void clear_page_mlock(struct page *page) { }
428static inline void mlock_vma_page(struct page *page) { }
429static inline void mlock_migrate_page(struct page *new, struct page *old) { }
430
431#endif
432
433
434
435
436
437
438static inline struct page *mem_map_offset(struct page *base, int offset)
439{
440 if (unlikely(offset >= MAX_ORDER_NR_PAGES))
441 return nth_page(base, offset);
442 return base + offset;
443}
444
445
446
447
448
449static inline struct page *mem_map_next(struct page *iter,
450 struct page *base, int offset)
451{
452 if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) {
453 unsigned long pfn = page_to_pfn(base) + offset;
454 if (!pfn_valid(pfn))
455 return NULL;
456 return pfn_to_page(pfn);
457 }
458 return iter + 1;
459}
460
461
462enum mminit_level {
463 MMINIT_WARNING,
464 MMINIT_VERIFY,
465 MMINIT_TRACE
466};
467
468#ifdef CONFIG_DEBUG_MEMORY_INIT
469
470extern int mminit_loglevel;
471
472#define mminit_dprintk(level, prefix, fmt, arg...) \
473do { \
474 if (level < mminit_loglevel) { \
475 if (level <= MMINIT_WARNING) \
476 pr_warn("mminit::" prefix " " fmt, ##arg); \
477 else \
478 printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
479 } \
480} while (0)
481
482extern void mminit_verify_pageflags_layout(void);
483extern void mminit_verify_zonelist(void);
484#else
485
486static inline void mminit_dprintk(enum mminit_level level,
487 const char *prefix, const char *fmt, ...)
488{
489}
490
491static inline void mminit_verify_pageflags_layout(void)
492{
493}
494
495static inline void mminit_verify_zonelist(void)
496{
497}
498#endif
499
500
501#if defined(CONFIG_SPARSEMEM)
502extern void mminit_validate_memmodel_limits(unsigned long *start_pfn,
503 unsigned long *end_pfn);
504#else
505static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
506 unsigned long *end_pfn)
507{
508}
509#endif
510
511#define NODE_RECLAIM_NOSCAN -2
512#define NODE_RECLAIM_FULL -1
513#define NODE_RECLAIM_SOME 0
514#define NODE_RECLAIM_SUCCESS 1
515
516#ifdef CONFIG_NUMA
517extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
518#else
519static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
520 unsigned int order)
521{
522 return NODE_RECLAIM_NOSCAN;
523}
524#endif
525
526extern int hwpoison_filter(struct page *p);
527
528extern u32 hwpoison_filter_dev_major;
529extern u32 hwpoison_filter_dev_minor;
530extern u64 hwpoison_filter_flags_mask;
531extern u64 hwpoison_filter_flags_value;
532extern u64 hwpoison_filter_memcg;
533extern u32 hwpoison_filter_enable;
534
535extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long,
536 unsigned long, unsigned long,
537 unsigned long, unsigned long);
538
539extern void set_pageblock_order(void);
540unsigned int reclaim_clean_pages_from_list(struct zone *zone,
541 struct list_head *page_list);
542
543#define ALLOC_WMARK_MIN WMARK_MIN
544#define ALLOC_WMARK_LOW WMARK_LOW
545#define ALLOC_WMARK_HIGH WMARK_HIGH
546#define ALLOC_NO_WATERMARKS 0x04
547
548
549#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
550
551
552
553
554
555
556#ifdef CONFIG_MMU
557#define ALLOC_OOM 0x08
558#else
559#define ALLOC_OOM ALLOC_NO_WATERMARKS
560#endif
561
562#define ALLOC_HARDER 0x10
563#define ALLOC_HIGH 0x20
564#define ALLOC_CPUSET 0x40
565#define ALLOC_CMA 0x80
566#ifdef CONFIG_ZONE_DMA32
567#define ALLOC_NOFRAGMENT 0x100
568#else
569#define ALLOC_NOFRAGMENT 0x0
570#endif
571#define ALLOC_KSWAPD 0x800
572
573enum ttu_flags;
574struct tlbflush_unmap_batch;
575
576
577
578
579
580
581extern struct workqueue_struct *mm_percpu_wq;
582
583#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
584void try_to_unmap_flush(void);
585void try_to_unmap_flush_dirty(void);
586void flush_tlb_batched_pending(struct mm_struct *mm);
587#else
588static inline void try_to_unmap_flush(void)
589{
590}
591static inline void try_to_unmap_flush_dirty(void)
592{
593}
594static inline void flush_tlb_batched_pending(struct mm_struct *mm)
595{
596}
597#endif
598
599extern const struct trace_print_flags pageflag_names[];
600extern const struct trace_print_flags vmaflag_names[];
601extern const struct trace_print_flags gfpflag_names[];
602
603static inline bool is_migrate_highatomic(enum migratetype migratetype)
604{
605 return migratetype == MIGRATE_HIGHATOMIC;
606}
607
608static inline bool is_migrate_highatomic_page(struct page *page)
609{
610 return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC;
611}
612
613void setup_zone_pageset(struct zone *zone);
614
615struct migration_target_control {
616 int nid;
617 nodemask_t *nmask;
618 gfp_t gfp_mask;
619};
620
621#endif
622