1
2#ifndef _LINUX_MIGRATE_H
3#define _LINUX_MIGRATE_H
4
5#include <linux/mm.h>
6#include <linux/mempolicy.h>
7#include <linux/migrate_mode.h>
8#include <linux/hugetlb.h>
9
10typedef struct page *new_page_t(struct page *page, unsigned long private);
11typedef void free_page_t(struct page *page, unsigned long private);
12
13struct migration_target_control;
14
15
16
17
18
19
20#define MIGRATEPAGE_SUCCESS 0
21
22
23extern const char *migrate_reason_names[MR_TYPES];
24
25#ifdef CONFIG_MIGRATION
26
27extern void putback_movable_pages(struct list_head *l);
28extern int migrate_page(struct address_space *mapping,
29 struct page *newpage, struct page *page,
30 enum migrate_mode mode);
31extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
32 unsigned long private, enum migrate_mode mode, int reason,
33 unsigned int *ret_succeeded);
34extern struct page *alloc_migration_target(struct page *page, unsigned long private);
35extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
36
37extern void migrate_page_states(struct page *newpage, struct page *page);
38extern void migrate_page_copy(struct page *newpage, struct page *page);
39extern int migrate_huge_page_move_mapping(struct address_space *mapping,
40 struct page *newpage, struct page *page);
41extern int migrate_page_move_mapping(struct address_space *mapping,
42 struct page *newpage, struct page *page, int extra_count);
43void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep,
44 spinlock_t *ptl);
45void folio_migrate_flags(struct folio *newfolio, struct folio *folio);
46void folio_migrate_copy(struct folio *newfolio, struct folio *folio);
47int folio_migrate_mapping(struct address_space *mapping,
48 struct folio *newfolio, struct folio *folio, int extra_count);
49
50extern bool numa_demotion_enabled;
51extern void migrate_on_reclaim_init(void);
52#ifdef CONFIG_HOTPLUG_CPU
53extern void set_migration_target_nodes(void);
54#else
55static inline void set_migration_target_nodes(void) {}
56#endif
57#else
58
59static inline void set_migration_target_nodes(void) {}
60
61static inline void putback_movable_pages(struct list_head *l) {}
62static inline int migrate_pages(struct list_head *l, new_page_t new,
63 free_page_t free, unsigned long private, enum migrate_mode mode,
64 int reason, unsigned int *ret_succeeded)
65 { return -ENOSYS; }
66static inline struct page *alloc_migration_target(struct page *page,
67 unsigned long private)
68 { return NULL; }
69static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
70 { return -EBUSY; }
71
72static inline void migrate_page_states(struct page *newpage, struct page *page)
73{
74}
75
76static inline void migrate_page_copy(struct page *newpage,
77 struct page *page) {}
78
79static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
80 struct page *newpage, struct page *page)
81{
82 return -ENOSYS;
83}
84
85#define numa_demotion_enabled false
86#endif
87
88#ifdef CONFIG_COMPACTION
89extern int PageMovable(struct page *page);
90extern void __SetPageMovable(struct page *page, struct address_space *mapping);
91extern void __ClearPageMovable(struct page *page);
92#else
93static inline int PageMovable(struct page *page) { return 0; }
94static inline void __SetPageMovable(struct page *page,
95 struct address_space *mapping)
96{
97}
98static inline void __ClearPageMovable(struct page *page)
99{
100}
101#endif
102
103#ifdef CONFIG_NUMA_BALANCING
104extern int migrate_misplaced_page(struct page *page,
105 struct vm_area_struct *vma, int node);
106#else
107static inline int migrate_misplaced_page(struct page *page,
108 struct vm_area_struct *vma, int node)
109{
110 return -EAGAIN;
111}
112#endif
113
114#ifdef CONFIG_MIGRATION
115
116
117
118
119
120
121#define MIGRATE_PFN_VALID (1UL << 0)
122#define MIGRATE_PFN_MIGRATE (1UL << 1)
123#define MIGRATE_PFN_WRITE (1UL << 3)
124#define MIGRATE_PFN_SHIFT 6
125
126static inline struct page *migrate_pfn_to_page(unsigned long mpfn)
127{
128 if (!(mpfn & MIGRATE_PFN_VALID))
129 return NULL;
130 return pfn_to_page(mpfn >> MIGRATE_PFN_SHIFT);
131}
132
133static inline unsigned long migrate_pfn(unsigned long pfn)
134{
135 return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID;
136}
137
138enum migrate_vma_direction {
139 MIGRATE_VMA_SELECT_SYSTEM = 1 << 0,
140 MIGRATE_VMA_SELECT_DEVICE_PRIVATE = 1 << 1,
141};
142
143struct migrate_vma {
144 struct vm_area_struct *vma;
145
146
147
148
149
150
151
152
153 unsigned long *dst;
154 unsigned long *src;
155 unsigned long cpages;
156 unsigned long npages;
157 unsigned long start;
158 unsigned long end;
159
160
161
162
163
164
165
166
167
168 void *pgmap_owner;
169 unsigned long flags;
170};
171
172int migrate_vma_setup(struct migrate_vma *args);
173void migrate_vma_pages(struct migrate_vma *migrate);
174void migrate_vma_finalize(struct migrate_vma *migrate);
175int next_demotion_node(int node);
176
177#else
178
179static inline int next_demotion_node(int node)
180{
181 return NUMA_NO_NODE;
182}
183
184#endif
185
186#endif
187