1
2#ifndef _LINUX_MIGRATE_H
3#define _LINUX_MIGRATE_H
4
5#include <linux/mm.h>
6#include <linux/mempolicy.h>
7#include <linux/migrate_mode.h>
8#include <linux/hugetlb.h>
9
10typedef struct page *new_page_t(struct page *page, unsigned long private);
11typedef void free_page_t(struct page *page, unsigned long private);
12
13
14
15
16
17
18#define MIGRATEPAGE_SUCCESS 0
19
20enum migrate_reason {
21 MR_COMPACTION,
22 MR_MEMORY_FAILURE,
23 MR_MEMORY_HOTPLUG,
24 MR_SYSCALL,
25 MR_MEMPOLICY_MBIND,
26 MR_NUMA_MISPLACED,
27 MR_CONTIG_RANGE,
28 MR_TYPES
29};
30
31
32extern const char *migrate_reason_names[MR_TYPES];
33
34static inline struct page *new_page_nodemask(struct page *page,
35 int preferred_nid, nodemask_t *nodemask)
36{
37 gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
38 unsigned int order = 0;
39 struct page *new_page = NULL;
40
41 if (PageHuge(page))
42 return alloc_huge_page_nodemask(page_hstate(compound_head(page)),
43 preferred_nid, nodemask);
44
45 if (PageTransHuge(page)) {
46 gfp_mask |= GFP_TRANSHUGE;
47 order = HPAGE_PMD_ORDER;
48 }
49
50 if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
51 gfp_mask |= __GFP_HIGHMEM;
52
53 new_page = __alloc_pages_nodemask(gfp_mask, order,
54 preferred_nid, nodemask);
55
56 if (new_page && PageTransHuge(new_page))
57 prep_transhuge_page(new_page);
58
59 return new_page;
60}
61
62#ifdef CONFIG_MIGRATION
63
64extern void putback_movable_pages(struct list_head *l);
65extern int migrate_page(struct address_space *mapping,
66 struct page *newpage, struct page *page,
67 enum migrate_mode mode);
68extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
69 unsigned long private, enum migrate_mode mode, int reason);
70extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
71extern void putback_movable_page(struct page *page);
72
73extern int migrate_prep(void);
74extern int migrate_prep_local(void);
75extern void migrate_page_states(struct page *newpage, struct page *page);
76extern void migrate_page_copy(struct page *newpage, struct page *page);
77extern int migrate_huge_page_move_mapping(struct address_space *mapping,
78 struct page *newpage, struct page *page);
79extern int migrate_page_move_mapping(struct address_space *mapping,
80 struct page *newpage, struct page *page, enum migrate_mode mode,
81 int extra_count);
82#else
83
84static inline void putback_movable_pages(struct list_head *l) {}
85static inline int migrate_pages(struct list_head *l, new_page_t new,
86 free_page_t free, unsigned long private, enum migrate_mode mode,
87 int reason)
88 { return -ENOSYS; }
89static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
90 { return -EBUSY; }
91
92static inline int migrate_prep(void) { return -ENOSYS; }
93static inline int migrate_prep_local(void) { return -ENOSYS; }
94
95static inline void migrate_page_states(struct page *newpage, struct page *page)
96{
97}
98
99static inline void migrate_page_copy(struct page *newpage,
100 struct page *page) {}
101
102static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
103 struct page *newpage, struct page *page)
104{
105 return -ENOSYS;
106}
107
108#endif
109
110#ifdef CONFIG_COMPACTION
111extern int PageMovable(struct page *page);
112extern void __SetPageMovable(struct page *page, struct address_space *mapping);
113extern void __ClearPageMovable(struct page *page);
114#else
115static inline int PageMovable(struct page *page) { return 0; };
116static inline void __SetPageMovable(struct page *page,
117 struct address_space *mapping)
118{
119}
120static inline void __ClearPageMovable(struct page *page)
121{
122}
123#endif
124
125#ifdef CONFIG_NUMA_BALANCING
126extern bool pmd_trans_migrating(pmd_t pmd);
127extern int migrate_misplaced_page(struct page *page,
128 struct vm_area_struct *vma, int node);
129#else
130static inline bool pmd_trans_migrating(pmd_t pmd)
131{
132 return false;
133}
134static inline int migrate_misplaced_page(struct page *page,
135 struct vm_area_struct *vma, int node)
136{
137 return -EAGAIN;
138}
139#endif
140
141#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
142extern int migrate_misplaced_transhuge_page(struct mm_struct *mm,
143 struct vm_area_struct *vma,
144 pmd_t *pmd, pmd_t entry,
145 unsigned long address,
146 struct page *page, int node);
147#else
148static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
149 struct vm_area_struct *vma,
150 pmd_t *pmd, pmd_t entry,
151 unsigned long address,
152 struct page *page, int node)
153{
154 return -EAGAIN;
155}
156#endif
157
158
159#ifdef CONFIG_MIGRATION
160
161
162
163
164
165
166#define MIGRATE_PFN_VALID (1UL << 0)
167#define MIGRATE_PFN_MIGRATE (1UL << 1)
168#define MIGRATE_PFN_LOCKED (1UL << 2)
169#define MIGRATE_PFN_WRITE (1UL << 3)
170#define MIGRATE_PFN_DEVICE (1UL << 4)
171#define MIGRATE_PFN_ERROR (1UL << 5)
172#define MIGRATE_PFN_SHIFT 6
173
174static inline struct page *migrate_pfn_to_page(unsigned long mpfn)
175{
176 if (!(mpfn & MIGRATE_PFN_VALID))
177 return NULL;
178 return pfn_to_page(mpfn >> MIGRATE_PFN_SHIFT);
179}
180
181static inline unsigned long migrate_pfn(unsigned long pfn)
182{
183 return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID;
184}
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252struct migrate_vma_ops {
253 void (*alloc_and_copy)(struct vm_area_struct *vma,
254 const unsigned long *src,
255 unsigned long *dst,
256 unsigned long start,
257 unsigned long end,
258 void *private);
259 void (*finalize_and_map)(struct vm_area_struct *vma,
260 const unsigned long *src,
261 const unsigned long *dst,
262 unsigned long start,
263 unsigned long end,
264 void *private);
265};
266
267#if defined(CONFIG_MIGRATE_VMA_HELPER)
268int migrate_vma(const struct migrate_vma_ops *ops,
269 struct vm_area_struct *vma,
270 unsigned long start,
271 unsigned long end,
272 unsigned long *src,
273 unsigned long *dst,
274 void *private);
275#else
276static inline int migrate_vma(const struct migrate_vma_ops *ops,
277 struct vm_area_struct *vma,
278 unsigned long start,
279 unsigned long end,
280 unsigned long *src,
281 unsigned long *dst,
282 void *private)
283{
284 return -EINVAL;
285}
286#endif
287
288#endif
289
290#endif
291