1
2
3
4
5
6
7#ifndef __MM_INTERNAL_H
8#define __MM_INTERNAL_H
9
10#include <linux/fs.h>
11#include <linux/mm.h>
12#include <linux/pagemap.h>
13#include <linux/rmap.h>
14#include <linux/tracepoint-defs.h>
15
16struct folio_batch;
17
18
19
20
21
22
23
24#define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
25 __GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
26 __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
27 __GFP_ATOMIC|__GFP_NOLOCKDEP)
28
29
30#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
31
32
33#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
34
35
36#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
37
38
39
40
41
42#define WARN_ON_ONCE_GFP(cond, gfp) ({ \
43 static bool __section(".data.once") __warned; \
44 int __ret_warn_once = !!(cond); \
45 \
46 if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
47 __warned = true; \
48 WARN_ON(1); \
49 } \
50 unlikely(__ret_warn_once); \
51})
52
53void page_writeback_init(void);
54
55static inline void *folio_raw_mapping(struct folio *folio)
56{
57 unsigned long mapping = (unsigned long)folio->mapping;
58
59 return (void *)(mapping & ~PAGE_MAPPING_FLAGS);
60}
61
62void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
63 int nr_throttled);
64static inline void acct_reclaim_writeback(struct folio *folio)
65{
66 pg_data_t *pgdat = folio_pgdat(folio);
67 int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled);
68
69 if (nr_throttled)
70 __acct_reclaim_writeback(pgdat, folio, nr_throttled);
71}
72
73static inline void wake_throttle_isolated(pg_data_t *pgdat)
74{
75 wait_queue_head_t *wqh;
76
77 wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED];
78 if (waitqueue_active(wqh))
79 wake_up(wqh);
80}
81
82vm_fault_t do_swap_page(struct vm_fault *vmf);
83void folio_rotate_reclaimable(struct folio *folio);
84bool __folio_end_writeback(struct folio *folio);
85void deactivate_file_folio(struct folio *folio);
86
87void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
88 unsigned long floor, unsigned long ceiling);
89void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
90
91struct zap_details;
92void unmap_page_range(struct mmu_gather *tlb,
93 struct vm_area_struct *vma,
94 unsigned long addr, unsigned long end,
95 struct zap_details *details);
96
97void page_cache_ra_order(struct readahead_control *, struct file_ra_state *,
98 unsigned int order);
99void force_page_cache_ra(struct readahead_control *, unsigned long nr);
100static inline void force_page_cache_readahead(struct address_space *mapping,
101 struct file *file, pgoff_t index, unsigned long nr_to_read)
102{
103 DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index);
104 force_page_cache_ra(&ractl, nr_to_read);
105}
106
107unsigned find_lock_entries(struct address_space *mapping, pgoff_t start,
108 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
109unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
110 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
111void filemap_free_folio(struct address_space *mapping, struct folio *folio);
112int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
113bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
114 loff_t end);
115long invalidate_inode_page(struct page *page);
116unsigned long invalidate_mapping_pagevec(struct address_space *mapping,
117 pgoff_t start, pgoff_t end, unsigned long *nr_pagevec);
118
119
120
121
122
123
124
125
126
127
128
129
130static inline bool folio_evictable(struct folio *folio)
131{
132 bool ret;
133
134
135 rcu_read_lock();
136 ret = !mapping_unevictable(folio_mapping(folio)) &&
137 !folio_test_mlocked(folio);
138 rcu_read_unlock();
139 return ret;
140}
141
142static inline bool page_evictable(struct page *page)
143{
144 bool ret;
145
146
147 rcu_read_lock();
148 ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
149 rcu_read_unlock();
150 return ret;
151}
152
153
154
155
156
157static inline void set_page_refcounted(struct page *page)
158{
159 VM_BUG_ON_PAGE(PageTail(page), page);
160 VM_BUG_ON_PAGE(page_ref_count(page), page);
161 set_page_count(page, 1);
162}
163
164extern unsigned long highest_memmap_pfn;
165
166
167
168
169
170#define MAX_RECLAIM_RETRIES 16
171
172
173
174
175pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
176 unsigned long size, pgprot_t prot);
177
178
179
180
181int isolate_lru_page(struct page *page);
182int folio_isolate_lru(struct folio *folio);
183void putback_lru_page(struct page *page);
184void folio_putback_lru(struct folio *folio);
185extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason);
186
187
188
189
190extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209struct alloc_context {
210 struct zonelist *zonelist;
211 nodemask_t *nodemask;
212 struct zoneref *preferred_zoneref;
213 int migratetype;
214
215
216
217
218
219
220
221
222
223
224
225 enum zone_type highest_zoneidx;
226 bool spread_dirty_pages;
227};
228
229
230
231
232
233
234
235
236
237static inline unsigned int buddy_order(struct page *page)
238{
239
240 return page_private(page);
241}
242
243
244
245
246
247
248
249
250
251
252
253
254#define buddy_order_unsafe(page) READ_ONCE(page_private(page))
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269static inline bool page_is_buddy(struct page *page, struct page *buddy,
270 unsigned int order)
271{
272 if (!page_is_guard(buddy) && !PageBuddy(buddy))
273 return false;
274
275 if (buddy_order(buddy) != order)
276 return false;
277
278
279
280
281
282 if (page_zone_id(page) != page_zone_id(buddy))
283 return false;
284
285 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
286
287 return true;
288}
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307static inline unsigned long
308__find_buddy_pfn(unsigned long page_pfn, unsigned int order)
309{
310 return page_pfn ^ (1 << order);
311}
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327static inline struct page *find_buddy_page_pfn(struct page *page,
328 unsigned long pfn, unsigned int order, unsigned long *buddy_pfn)
329{
330 unsigned long __buddy_pfn = __find_buddy_pfn(pfn, order);
331 struct page *buddy;
332
333 buddy = page + (__buddy_pfn - pfn);
334 if (buddy_pfn)
335 *buddy_pfn = __buddy_pfn;
336
337 if (page_is_buddy(page, buddy, order))
338 return buddy;
339 return NULL;
340}
341
342extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
343 unsigned long end_pfn, struct zone *zone);
344
345static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
346 unsigned long end_pfn, struct zone *zone)
347{
348 if (zone->contiguous)
349 return pfn_to_page(start_pfn);
350
351 return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
352}
353
354extern int __isolate_free_page(struct page *page, unsigned int order);
355extern void __putback_isolated_page(struct page *page, unsigned int order,
356 int mt);
357extern void memblock_free_pages(struct page *page, unsigned long pfn,
358 unsigned int order);
359extern void __free_pages_core(struct page *page, unsigned int order);
360extern void prep_compound_page(struct page *page, unsigned int order);
361extern void post_alloc_hook(struct page *page, unsigned int order,
362 gfp_t gfp_flags);
363extern int user_min_free_kbytes;
364
365extern void free_unref_page(struct page *page, unsigned int order);
366extern void free_unref_page_list(struct list_head *list);
367
368extern void zone_pcp_update(struct zone *zone, int cpu_online);
369extern void zone_pcp_reset(struct zone *zone);
370extern void zone_pcp_disable(struct zone *zone);
371extern void zone_pcp_enable(struct zone *zone);
372
373extern void *memmap_alloc(phys_addr_t size, phys_addr_t align,
374 phys_addr_t min_addr,
375 int nid, bool exact_nid);
376
377int split_free_page(struct page *free_page,
378 unsigned int order, unsigned long split_pfn_offset);
379
380#if defined CONFIG_COMPACTION || defined CONFIG_CMA
381
382
383
384
385
386
387
388
389
390
391
392struct compact_control {
393 struct list_head freepages;
394 struct list_head migratepages;
395 unsigned int nr_freepages;
396 unsigned int nr_migratepages;
397 unsigned long free_pfn;
398
399
400
401
402
403
404 unsigned long migrate_pfn;
405 unsigned long fast_start_pfn;
406 struct zone *zone;
407 unsigned long total_migrate_scanned;
408 unsigned long total_free_scanned;
409 unsigned short fast_search_fail;
410 short search_order;
411 const gfp_t gfp_mask;
412 int order;
413 int migratetype;
414 const unsigned int alloc_flags;
415 const int highest_zoneidx;
416 enum migrate_mode mode;
417 bool ignore_skip_hint;
418 bool no_set_skip_hint;
419 bool ignore_block_suitable;
420 bool direct_compaction;
421 bool proactive_compaction;
422 bool whole_zone;
423 bool contended;
424 bool rescan;
425 bool alloc_contig;
426};
427
428
429
430
431
432struct capture_control {
433 struct compact_control *cc;
434 struct page *page;
435};
436
437unsigned long
438isolate_freepages_range(struct compact_control *cc,
439 unsigned long start_pfn, unsigned long end_pfn);
440int
441isolate_migratepages_range(struct compact_control *cc,
442 unsigned long low_pfn, unsigned long end_pfn);
443
444int __alloc_contig_migrate_range(struct compact_control *cc,
445 unsigned long start, unsigned long end);
446#endif
447int find_suitable_fallback(struct free_area *area, unsigned int order,
448 int migratetype, bool only_stealable, bool *can_steal);
449
450
451
452
453
454
455
456
457static inline bool is_exec_mapping(vm_flags_t flags)
458{
459 return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
460}
461
462
463
464
465
466
467
468static inline bool is_stack_mapping(vm_flags_t flags)
469{
470 return (flags & VM_STACK) == VM_STACK;
471}
472
473
474
475
476static inline bool is_data_mapping(vm_flags_t flags)
477{
478 return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
479}
480
481
482void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
483 struct vm_area_struct *prev);
484void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma);
485struct anon_vma *folio_anon_vma(struct folio *folio);
486
487#ifdef CONFIG_MMU
488void unmap_mapping_folio(struct folio *folio);
489extern long populate_vma_page_range(struct vm_area_struct *vma,
490 unsigned long start, unsigned long end, int *locked);
491extern long faultin_vma_page_range(struct vm_area_struct *vma,
492 unsigned long start, unsigned long end,
493 bool write, int *locked);
494extern int mlock_future_check(struct mm_struct *mm, unsigned long flags,
495 unsigned long len);
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510void mlock_folio(struct folio *folio);
511static inline void mlock_vma_folio(struct folio *folio,
512 struct vm_area_struct *vma, bool compound)
513{
514
515
516
517
518
519
520
521
522 if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED) &&
523 (compound || !folio_test_large(folio)))
524 mlock_folio(folio);
525}
526
527static inline void mlock_vma_page(struct page *page,
528 struct vm_area_struct *vma, bool compound)
529{
530 mlock_vma_folio(page_folio(page), vma, compound);
531}
532
533void munlock_page(struct page *page);
534static inline void munlock_vma_page(struct page *page,
535 struct vm_area_struct *vma, bool compound)
536{
537 if (unlikely(vma->vm_flags & VM_LOCKED) &&
538 (compound || !PageTransCompound(page)))
539 munlock_page(page);
540}
541void mlock_new_page(struct page *page);
542bool need_mlock_page_drain(int cpu);
543void mlock_page_drain_local(void);
544void mlock_page_drain_remote(int cpu);
545
546extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
547
548
549
550
551
552static inline unsigned long
553vma_pgoff_address(pgoff_t pgoff, unsigned long nr_pages,
554 struct vm_area_struct *vma)
555{
556 unsigned long address;
557
558 if (pgoff >= vma->vm_pgoff) {
559 address = vma->vm_start +
560 ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
561
562 if (address < vma->vm_start || address >= vma->vm_end)
563 address = -EFAULT;
564 } else if (pgoff + nr_pages - 1 >= vma->vm_pgoff) {
565
566 address = vma->vm_start;
567 } else {
568 address = -EFAULT;
569 }
570 return address;
571}
572
573
574
575
576
577
578static inline unsigned long
579vma_address(struct page *page, struct vm_area_struct *vma)
580{
581 VM_BUG_ON_PAGE(PageKsm(page), page);
582 return vma_pgoff_address(page_to_pgoff(page), compound_nr(page), vma);
583}
584
585
586
587
588
589static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw)
590{
591 struct vm_area_struct *vma = pvmw->vma;
592 pgoff_t pgoff;
593 unsigned long address;
594
595
596 if (pvmw->nr_pages == 1)
597 return pvmw->address + PAGE_SIZE;
598
599 pgoff = pvmw->pgoff + pvmw->nr_pages;
600 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
601
602 if (address < vma->vm_start || address > vma->vm_end)
603 address = vma->vm_end;
604 return address;
605}
606
607static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
608 struct file *fpin)
609{
610 int flags = vmf->flags;
611
612 if (fpin)
613 return fpin;
614
615
616
617
618
619
620 if (fault_flag_allow_retry_first(flags) &&
621 !(flags & FAULT_FLAG_RETRY_NOWAIT)) {
622 fpin = get_file(vmf->vma->vm_file);
623 mmap_read_unlock(vmf->vma->vm_mm);
624 }
625 return fpin;
626}
627#else
628static inline void unmap_mapping_folio(struct folio *folio) { }
629static inline void mlock_vma_page(struct page *page,
630 struct vm_area_struct *vma, bool compound) { }
631static inline void munlock_vma_page(struct page *page,
632 struct vm_area_struct *vma, bool compound) { }
633static inline void mlock_new_page(struct page *page) { }
634static inline bool need_mlock_page_drain(int cpu) { return false; }
635static inline void mlock_page_drain_local(void) { }
636static inline void mlock_page_drain_remote(int cpu) { }
637static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
638{
639}
640#endif
641
642
643
644
645
646
647static inline struct page *mem_map_offset(struct page *base, int offset)
648{
649 if (unlikely(offset >= MAX_ORDER_NR_PAGES))
650 return nth_page(base, offset);
651 return base + offset;
652}
653
654
655
656
657
658static inline struct page *mem_map_next(struct page *iter,
659 struct page *base, int offset)
660{
661 if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) {
662 unsigned long pfn = page_to_pfn(base) + offset;
663 if (!pfn_valid(pfn))
664 return NULL;
665 return pfn_to_page(pfn);
666 }
667 return iter + 1;
668}
669
670
671enum mminit_level {
672 MMINIT_WARNING,
673 MMINIT_VERIFY,
674 MMINIT_TRACE
675};
676
677#ifdef CONFIG_DEBUG_MEMORY_INIT
678
679extern int mminit_loglevel;
680
681#define mminit_dprintk(level, prefix, fmt, arg...) \
682do { \
683 if (level < mminit_loglevel) { \
684 if (level <= MMINIT_WARNING) \
685 pr_warn("mminit::" prefix " " fmt, ##arg); \
686 else \
687 printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
688 } \
689} while (0)
690
691extern void mminit_verify_pageflags_layout(void);
692extern void mminit_verify_zonelist(void);
693#else
694
695static inline void mminit_dprintk(enum mminit_level level,
696 const char *prefix, const char *fmt, ...)
697{
698}
699
700static inline void mminit_verify_pageflags_layout(void)
701{
702}
703
704static inline void mminit_verify_zonelist(void)
705{
706}
707#endif
708
709#define NODE_RECLAIM_NOSCAN -2
710#define NODE_RECLAIM_FULL -1
711#define NODE_RECLAIM_SOME 0
712#define NODE_RECLAIM_SUCCESS 1
713
714#ifdef CONFIG_NUMA
715extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
716extern int find_next_best_node(int node, nodemask_t *used_node_mask);
717#else
718static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
719 unsigned int order)
720{
721 return NODE_RECLAIM_NOSCAN;
722}
723static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
724{
725 return NUMA_NO_NODE;
726}
727#endif
728
729
730
731
732extern int hwpoison_filter(struct page *p);
733
734extern u32 hwpoison_filter_dev_major;
735extern u32 hwpoison_filter_dev_minor;
736extern u64 hwpoison_filter_flags_mask;
737extern u64 hwpoison_filter_flags_value;
738extern u64 hwpoison_filter_memcg;
739extern u32 hwpoison_filter_enable;
740
741#ifdef CONFIG_MEMORY_FAILURE
742void clear_hwpoisoned_pages(struct page *memmap, int nr_pages);
743#else
744static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
745{
746}
747#endif
748
749extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long,
750 unsigned long, unsigned long,
751 unsigned long, unsigned long);
752
753extern void set_pageblock_order(void);
754unsigned int reclaim_clean_pages_from_list(struct zone *zone,
755 struct list_head *page_list);
756
757#define ALLOC_WMARK_MIN WMARK_MIN
758#define ALLOC_WMARK_LOW WMARK_LOW
759#define ALLOC_WMARK_HIGH WMARK_HIGH
760#define ALLOC_NO_WATERMARKS 0x04
761
762
763#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
764
765
766
767
768
769
770#ifdef CONFIG_MMU
771#define ALLOC_OOM 0x08
772#else
773#define ALLOC_OOM ALLOC_NO_WATERMARKS
774#endif
775
776#define ALLOC_HARDER 0x10
777#define ALLOC_HIGH 0x20
778#define ALLOC_CPUSET 0x40
779#define ALLOC_CMA 0x80
780#ifdef CONFIG_ZONE_DMA32
781#define ALLOC_NOFRAGMENT 0x100
782#else
783#define ALLOC_NOFRAGMENT 0x0
784#endif
785#define ALLOC_KSWAPD 0x800
786
787enum ttu_flags;
788struct tlbflush_unmap_batch;
789
790
791
792
793
794
795extern struct workqueue_struct *mm_percpu_wq;
796
797#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
798void try_to_unmap_flush(void);
799void try_to_unmap_flush_dirty(void);
800void flush_tlb_batched_pending(struct mm_struct *mm);
801#else
802static inline void try_to_unmap_flush(void)
803{
804}
805static inline void try_to_unmap_flush_dirty(void)
806{
807}
808static inline void flush_tlb_batched_pending(struct mm_struct *mm)
809{
810}
811#endif
812
813extern const struct trace_print_flags pageflag_names[];
814extern const struct trace_print_flags vmaflag_names[];
815extern const struct trace_print_flags gfpflag_names[];
816
817static inline bool is_migrate_highatomic(enum migratetype migratetype)
818{
819 return migratetype == MIGRATE_HIGHATOMIC;
820}
821
822static inline bool is_migrate_highatomic_page(struct page *page)
823{
824 return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC;
825}
826
827void setup_zone_pageset(struct zone *zone);
828
829struct migration_target_control {
830 int nid;
831 nodemask_t *nmask;
832 gfp_t gfp_mask;
833};
834
835
836
837
838#ifdef CONFIG_MMU
839int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
840 pgprot_t prot, struct page **pages, unsigned int page_shift);
841#else
842static inline
843int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
844 pgprot_t prot, struct page **pages, unsigned int page_shift)
845{
846 return -EINVAL;
847}
848#endif
849
850void vunmap_range_noflush(unsigned long start, unsigned long end);
851
852int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
853 unsigned long addr, int page_nid, int *flags);
854
855void free_zone_device_page(struct page *page);
856
857
858
859
860struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags);
861
862DECLARE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
863
864#endif
865