1
2
3
4
5
6
7#ifndef __MM_INTERNAL_H
8#define __MM_INTERNAL_H
9
10#include <linux/fs.h>
11#include <linux/mm.h>
12#include <linux/pagemap.h>
13#include <linux/rmap.h>
14#include <linux/tracepoint-defs.h>
15
16struct folio_batch;
17
18
19
20
21
22
23
24#define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
25 __GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
26 __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
27 __GFP_ATOMIC|__GFP_NOLOCKDEP)
28
29
30#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
31
32
33#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
34
35
36#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
37
38void page_writeback_init(void);
39
40static inline void *folio_raw_mapping(struct folio *folio)
41{
42 unsigned long mapping = (unsigned long)folio->mapping;
43
44 return (void *)(mapping & ~PAGE_MAPPING_FLAGS);
45}
46
47void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
48 int nr_throttled);
49static inline void acct_reclaim_writeback(struct folio *folio)
50{
51 pg_data_t *pgdat = folio_pgdat(folio);
52 int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled);
53
54 if (nr_throttled)
55 __acct_reclaim_writeback(pgdat, folio, nr_throttled);
56}
57
58static inline void wake_throttle_isolated(pg_data_t *pgdat)
59{
60 wait_queue_head_t *wqh;
61
62 wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED];
63 if (waitqueue_active(wqh))
64 wake_up(wqh);
65}
66
67vm_fault_t do_swap_page(struct vm_fault *vmf);
68void folio_rotate_reclaimable(struct folio *folio);
69bool __folio_end_writeback(struct folio *folio);
70void deactivate_file_folio(struct folio *folio);
71
72void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
73 unsigned long floor, unsigned long ceiling);
74void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
75
76struct zap_details;
77void unmap_page_range(struct mmu_gather *tlb,
78 struct vm_area_struct *vma,
79 unsigned long addr, unsigned long end,
80 struct zap_details *details);
81
82void page_cache_ra_order(struct readahead_control *, struct file_ra_state *,
83 unsigned int order);
84void force_page_cache_ra(struct readahead_control *, unsigned long nr);
85static inline void force_page_cache_readahead(struct address_space *mapping,
86 struct file *file, pgoff_t index, unsigned long nr_to_read)
87{
88 DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index);
89 force_page_cache_ra(&ractl, nr_to_read);
90}
91
92unsigned find_lock_entries(struct address_space *mapping, pgoff_t start,
93 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
94unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
95 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
96void filemap_free_folio(struct address_space *mapping, struct folio *folio);
97int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
98bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
99 loff_t end);
100long invalidate_inode_page(struct page *page);
101unsigned long invalidate_mapping_pagevec(struct address_space *mapping,
102 pgoff_t start, pgoff_t end, unsigned long *nr_pagevec);
103
104
105
106
107
108
109
110
111
112
113
114
115static inline bool folio_evictable(struct folio *folio)
116{
117 bool ret;
118
119
120 rcu_read_lock();
121 ret = !mapping_unevictable(folio_mapping(folio)) &&
122 !folio_test_mlocked(folio);
123 rcu_read_unlock();
124 return ret;
125}
126
127static inline bool page_evictable(struct page *page)
128{
129 bool ret;
130
131
132 rcu_read_lock();
133 ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
134 rcu_read_unlock();
135 return ret;
136}
137
138
139
140
141
142static inline void set_page_refcounted(struct page *page)
143{
144 VM_BUG_ON_PAGE(PageTail(page), page);
145 VM_BUG_ON_PAGE(page_ref_count(page), page);
146 set_page_count(page, 1);
147}
148
149extern unsigned long highest_memmap_pfn;
150
151
152
153
154
155#define MAX_RECLAIM_RETRIES 16
156
157
158
159
160pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
161 unsigned long size, pgprot_t prot);
162
163
164
165
166int isolate_lru_page(struct page *page);
167int folio_isolate_lru(struct folio *folio);
168void putback_lru_page(struct page *page);
169void folio_putback_lru(struct folio *folio);
170extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason);
171
172
173
174
175extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194struct alloc_context {
195 struct zonelist *zonelist;
196 nodemask_t *nodemask;
197 struct zoneref *preferred_zoneref;
198 int migratetype;
199
200
201
202
203
204
205
206
207
208
209
210 enum zone_type highest_zoneidx;
211 bool spread_dirty_pages;
212};
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231static inline unsigned long
232__find_buddy_pfn(unsigned long page_pfn, unsigned int order)
233{
234 return page_pfn ^ (1 << order);
235}
236
237extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
238 unsigned long end_pfn, struct zone *zone);
239
240static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
241 unsigned long end_pfn, struct zone *zone)
242{
243 if (zone->contiguous)
244 return pfn_to_page(start_pfn);
245
246 return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
247}
248
249extern int __isolate_free_page(struct page *page, unsigned int order);
250extern void __putback_isolated_page(struct page *page, unsigned int order,
251 int mt);
252extern void memblock_free_pages(struct page *page, unsigned long pfn,
253 unsigned int order);
254extern void __free_pages_core(struct page *page, unsigned int order);
255extern void prep_compound_page(struct page *page, unsigned int order);
256extern void post_alloc_hook(struct page *page, unsigned int order,
257 gfp_t gfp_flags);
258extern int user_min_free_kbytes;
259
260extern void free_unref_page(struct page *page, unsigned int order);
261extern void free_unref_page_list(struct list_head *list);
262
263extern void zone_pcp_update(struct zone *zone, int cpu_online);
264extern void zone_pcp_reset(struct zone *zone);
265extern void zone_pcp_disable(struct zone *zone);
266extern void zone_pcp_enable(struct zone *zone);
267
268extern void *memmap_alloc(phys_addr_t size, phys_addr_t align,
269 phys_addr_t min_addr,
270 int nid, bool exact_nid);
271
272#if defined CONFIG_COMPACTION || defined CONFIG_CMA
273
274
275
276
277
278
279
280
281
282
283
284struct compact_control {
285 struct list_head freepages;
286 struct list_head migratepages;
287 unsigned int nr_freepages;
288 unsigned int nr_migratepages;
289 unsigned long free_pfn;
290
291
292
293
294
295
296 unsigned long migrate_pfn;
297 unsigned long fast_start_pfn;
298 struct zone *zone;
299 unsigned long total_migrate_scanned;
300 unsigned long total_free_scanned;
301 unsigned short fast_search_fail;
302 short search_order;
303 const gfp_t gfp_mask;
304 int order;
305 int migratetype;
306 const unsigned int alloc_flags;
307 const int highest_zoneidx;
308 enum migrate_mode mode;
309 bool ignore_skip_hint;
310 bool no_set_skip_hint;
311 bool ignore_block_suitable;
312 bool direct_compaction;
313 bool proactive_compaction;
314 bool whole_zone;
315 bool contended;
316 bool rescan;
317 bool alloc_contig;
318};
319
320
321
322
323
324struct capture_control {
325 struct compact_control *cc;
326 struct page *page;
327};
328
329unsigned long
330isolate_freepages_range(struct compact_control *cc,
331 unsigned long start_pfn, unsigned long end_pfn);
332int
333isolate_migratepages_range(struct compact_control *cc,
334 unsigned long low_pfn, unsigned long end_pfn);
335#endif
336int find_suitable_fallback(struct free_area *area, unsigned int order,
337 int migratetype, bool only_stealable, bool *can_steal);
338
339
340
341
342
343
344
345
346
347static inline unsigned int buddy_order(struct page *page)
348{
349
350 return page_private(page);
351}
352
353
354
355
356
357
358
359
360
361
362
363
364#define buddy_order_unsafe(page) READ_ONCE(page_private(page))
365
366
367
368
369
370
371
372
373static inline bool is_exec_mapping(vm_flags_t flags)
374{
375 return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
376}
377
378
379
380
381
382
383
384static inline bool is_stack_mapping(vm_flags_t flags)
385{
386 return (flags & VM_STACK) == VM_STACK;
387}
388
389
390
391
392static inline bool is_data_mapping(vm_flags_t flags)
393{
394 return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
395}
396
397
398void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
399 struct vm_area_struct *prev);
400void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma);
401struct anon_vma *folio_anon_vma(struct folio *folio);
402
403#ifdef CONFIG_MMU
404void unmap_mapping_folio(struct folio *folio);
405extern long populate_vma_page_range(struct vm_area_struct *vma,
406 unsigned long start, unsigned long end, int *locked);
407extern long faultin_vma_page_range(struct vm_area_struct *vma,
408 unsigned long start, unsigned long end,
409 bool write, int *locked);
410extern int mlock_future_check(struct mm_struct *mm, unsigned long flags,
411 unsigned long len);
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426void mlock_folio(struct folio *folio);
427static inline void mlock_vma_folio(struct folio *folio,
428 struct vm_area_struct *vma, bool compound)
429{
430
431
432
433
434
435
436
437
438 if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED) &&
439 (compound || !folio_test_large(folio)))
440 mlock_folio(folio);
441}
442
443static inline void mlock_vma_page(struct page *page,
444 struct vm_area_struct *vma, bool compound)
445{
446 mlock_vma_folio(page_folio(page), vma, compound);
447}
448
449void munlock_page(struct page *page);
450static inline void munlock_vma_page(struct page *page,
451 struct vm_area_struct *vma, bool compound)
452{
453 if (unlikely(vma->vm_flags & VM_LOCKED) &&
454 (compound || !PageTransCompound(page)))
455 munlock_page(page);
456}
457void mlock_new_page(struct page *page);
458bool need_mlock_page_drain(int cpu);
459void mlock_page_drain_local(void);
460void mlock_page_drain_remote(int cpu);
461
462extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
463
464
465
466
467
468
469static inline unsigned long
470vma_address(struct page *page, struct vm_area_struct *vma)
471{
472 pgoff_t pgoff;
473 unsigned long address;
474
475 VM_BUG_ON_PAGE(PageKsm(page), page);
476 pgoff = page_to_pgoff(page);
477 if (pgoff >= vma->vm_pgoff) {
478 address = vma->vm_start +
479 ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
480
481 if (address < vma->vm_start || address >= vma->vm_end)
482 address = -EFAULT;
483 } else if (PageHead(page) &&
484 pgoff + compound_nr(page) - 1 >= vma->vm_pgoff) {
485
486 address = vma->vm_start;
487 } else {
488 address = -EFAULT;
489 }
490 return address;
491}
492
493
494
495
496
497static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw)
498{
499 struct vm_area_struct *vma = pvmw->vma;
500 pgoff_t pgoff;
501 unsigned long address;
502
503
504 if (pvmw->nr_pages == 1)
505 return pvmw->address + PAGE_SIZE;
506
507 pgoff = pvmw->pgoff + pvmw->nr_pages;
508 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
509
510 if (address < vma->vm_start || address > vma->vm_end)
511 address = vma->vm_end;
512 return address;
513}
514
515static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
516 struct file *fpin)
517{
518 int flags = vmf->flags;
519
520 if (fpin)
521 return fpin;
522
523
524
525
526
527
528 if (fault_flag_allow_retry_first(flags) &&
529 !(flags & FAULT_FLAG_RETRY_NOWAIT)) {
530 fpin = get_file(vmf->vma->vm_file);
531 mmap_read_unlock(vmf->vma->vm_mm);
532 }
533 return fpin;
534}
535#else
536static inline void unmap_mapping_folio(struct folio *folio) { }
537static inline void mlock_vma_page(struct page *page,
538 struct vm_area_struct *vma, bool compound) { }
539static inline void munlock_vma_page(struct page *page,
540 struct vm_area_struct *vma, bool compound) { }
541static inline void mlock_new_page(struct page *page) { }
542static inline bool need_mlock_page_drain(int cpu) { return false; }
543static inline void mlock_page_drain_local(void) { }
544static inline void mlock_page_drain_remote(int cpu) { }
545static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
546{
547}
548#endif
549
550
551
552
553
554
555static inline struct page *mem_map_offset(struct page *base, int offset)
556{
557 if (unlikely(offset >= MAX_ORDER_NR_PAGES))
558 return nth_page(base, offset);
559 return base + offset;
560}
561
562
563
564
565
566static inline struct page *mem_map_next(struct page *iter,
567 struct page *base, int offset)
568{
569 if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) {
570 unsigned long pfn = page_to_pfn(base) + offset;
571 if (!pfn_valid(pfn))
572 return NULL;
573 return pfn_to_page(pfn);
574 }
575 return iter + 1;
576}
577
578
579enum mminit_level {
580 MMINIT_WARNING,
581 MMINIT_VERIFY,
582 MMINIT_TRACE
583};
584
585#ifdef CONFIG_DEBUG_MEMORY_INIT
586
587extern int mminit_loglevel;
588
589#define mminit_dprintk(level, prefix, fmt, arg...) \
590do { \
591 if (level < mminit_loglevel) { \
592 if (level <= MMINIT_WARNING) \
593 pr_warn("mminit::" prefix " " fmt, ##arg); \
594 else \
595 printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
596 } \
597} while (0)
598
599extern void mminit_verify_pageflags_layout(void);
600extern void mminit_verify_zonelist(void);
601#else
602
603static inline void mminit_dprintk(enum mminit_level level,
604 const char *prefix, const char *fmt, ...)
605{
606}
607
608static inline void mminit_verify_pageflags_layout(void)
609{
610}
611
612static inline void mminit_verify_zonelist(void)
613{
614}
615#endif
616
617#define NODE_RECLAIM_NOSCAN -2
618#define NODE_RECLAIM_FULL -1
619#define NODE_RECLAIM_SOME 0
620#define NODE_RECLAIM_SUCCESS 1
621
622#ifdef CONFIG_NUMA
623extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
624extern int find_next_best_node(int node, nodemask_t *used_node_mask);
625#else
626static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
627 unsigned int order)
628{
629 return NODE_RECLAIM_NOSCAN;
630}
631static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
632{
633 return NUMA_NO_NODE;
634}
635#endif
636
637extern int hwpoison_filter(struct page *p);
638
639extern u32 hwpoison_filter_dev_major;
640extern u32 hwpoison_filter_dev_minor;
641extern u64 hwpoison_filter_flags_mask;
642extern u64 hwpoison_filter_flags_value;
643extern u64 hwpoison_filter_memcg;
644extern u32 hwpoison_filter_enable;
645
646extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long,
647 unsigned long, unsigned long,
648 unsigned long, unsigned long);
649
650extern void set_pageblock_order(void);
651unsigned int reclaim_clean_pages_from_list(struct zone *zone,
652 struct list_head *page_list);
653
654#define ALLOC_WMARK_MIN WMARK_MIN
655#define ALLOC_WMARK_LOW WMARK_LOW
656#define ALLOC_WMARK_HIGH WMARK_HIGH
657#define ALLOC_NO_WATERMARKS 0x04
658
659
660#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
661
662
663
664
665
666
667#ifdef CONFIG_MMU
668#define ALLOC_OOM 0x08
669#else
670#define ALLOC_OOM ALLOC_NO_WATERMARKS
671#endif
672
673#define ALLOC_HARDER 0x10
674#define ALLOC_HIGH 0x20
675#define ALLOC_CPUSET 0x40
676#define ALLOC_CMA 0x80
677#ifdef CONFIG_ZONE_DMA32
678#define ALLOC_NOFRAGMENT 0x100
679#else
680#define ALLOC_NOFRAGMENT 0x0
681#endif
682#define ALLOC_KSWAPD 0x800
683
684enum ttu_flags;
685struct tlbflush_unmap_batch;
686
687
688
689
690
691
692extern struct workqueue_struct *mm_percpu_wq;
693
694#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
695void try_to_unmap_flush(void);
696void try_to_unmap_flush_dirty(void);
697void flush_tlb_batched_pending(struct mm_struct *mm);
698#else
699static inline void try_to_unmap_flush(void)
700{
701}
702static inline void try_to_unmap_flush_dirty(void)
703{
704}
705static inline void flush_tlb_batched_pending(struct mm_struct *mm)
706{
707}
708#endif
709
710extern const struct trace_print_flags pageflag_names[];
711extern const struct trace_print_flags vmaflag_names[];
712extern const struct trace_print_flags gfpflag_names[];
713
714static inline bool is_migrate_highatomic(enum migratetype migratetype)
715{
716 return migratetype == MIGRATE_HIGHATOMIC;
717}
718
719static inline bool is_migrate_highatomic_page(struct page *page)
720{
721 return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC;
722}
723
724void setup_zone_pageset(struct zone *zone);
725
726struct migration_target_control {
727 int nid;
728 nodemask_t *nmask;
729 gfp_t gfp_mask;
730};
731
732
733
734
735#ifdef CONFIG_MMU
736int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
737 pgprot_t prot, struct page **pages, unsigned int page_shift);
738#else
739static inline
740int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
741 pgprot_t prot, struct page **pages, unsigned int page_shift)
742{
743 return -EINVAL;
744}
745#endif
746
747void vunmap_range_noflush(unsigned long start, unsigned long end);
748
749int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
750 unsigned long addr, int page_nid, int *flags);
751
752void free_zone_device_page(struct page *page);
753
754
755
756
757struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags);
758
759DECLARE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
760
761#endif
762