1
2#ifndef _LINUX_MIGRATE_H
3#define _LINUX_MIGRATE_H
4
5#include <linux/mm.h>
6#include <linux/mempolicy.h>
7#include <linux/migrate_mode.h>
8#include <linux/hugetlb.h>
9
10typedef struct page *new_page_t(struct page *page, unsigned long private);
11typedef void free_page_t(struct page *page, unsigned long private);
12
13
14
15
16
17
18#define MIGRATEPAGE_SUCCESS 0
19
20enum migrate_reason {
21 MR_COMPACTION,
22 MR_MEMORY_FAILURE,
23 MR_MEMORY_HOTPLUG,
24 MR_SYSCALL,
25 MR_MEMPOLICY_MBIND,
26 MR_NUMA_MISPLACED,
27 MR_CONTIG_RANGE,
28 MR_TYPES
29};
30
31
32extern char *migrate_reason_names[MR_TYPES];
33
34static inline struct page *new_page_nodemask(struct page *page,
35 int preferred_nid, nodemask_t *nodemask)
36{
37 gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
38 unsigned int order = 0;
39 struct page *new_page = NULL;
40
41 if (PageHuge(page))
42 return alloc_huge_page_nodemask(page_hstate(compound_head(page)),
43 preferred_nid, nodemask);
44
45 if (PageTransHuge(page)) {
46 gfp_mask |= GFP_TRANSHUGE;
47 order = HPAGE_PMD_ORDER;
48 }
49
50 if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
51 gfp_mask |= __GFP_HIGHMEM;
52
53 new_page = __alloc_pages_nodemask(gfp_mask, order,
54 preferred_nid, nodemask);
55
56 if (new_page && PageTransHuge(new_page))
57 prep_transhuge_page(new_page);
58
59 return new_page;
60}
61
62#ifdef CONFIG_MIGRATION
63
64extern void putback_movable_pages(struct list_head *l);
65extern int migrate_page(struct address_space *mapping,
66 struct page *newpage, struct page *page,
67 enum migrate_mode mode);
68extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
69 unsigned long private, enum migrate_mode mode, int reason);
70extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
71extern void putback_movable_page(struct page *page);
72
73extern int migrate_prep(void);
74extern int migrate_prep_local(void);
75extern void migrate_page_states(struct page *newpage, struct page *page);
76extern void migrate_page_copy(struct page *newpage, struct page *page);
77extern int migrate_huge_page_move_mapping(struct address_space *mapping,
78 struct page *newpage, struct page *page);
79extern int migrate_page_move_mapping(struct address_space *mapping,
80 struct page *newpage, struct page *page,
81 struct buffer_head *head, enum migrate_mode mode,
82 int extra_count);
83#else
84
85static inline void putback_movable_pages(struct list_head *l) {}
86static inline int migrate_pages(struct list_head *l, new_page_t new,
87 free_page_t free, unsigned long private, enum migrate_mode mode,
88 int reason)
89 { return -ENOSYS; }
90static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
91 { return -EBUSY; }
92
93static inline int migrate_prep(void) { return -ENOSYS; }
94static inline int migrate_prep_local(void) { return -ENOSYS; }
95
96static inline void migrate_page_states(struct page *newpage, struct page *page)
97{
98}
99
100static inline void migrate_page_copy(struct page *newpage,
101 struct page *page) {}
102
103static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
104 struct page *newpage, struct page *page)
105{
106 return -ENOSYS;
107}
108
109#endif
110
111#ifdef CONFIG_COMPACTION
112extern int PageMovable(struct page *page);
113extern void __SetPageMovable(struct page *page, struct address_space *mapping);
114extern void __ClearPageMovable(struct page *page);
115#else
116static inline int PageMovable(struct page *page) { return 0; };
117static inline void __SetPageMovable(struct page *page,
118 struct address_space *mapping)
119{
120}
121static inline void __ClearPageMovable(struct page *page)
122{
123}
124#endif
125
126#ifdef CONFIG_NUMA_BALANCING
127extern bool pmd_trans_migrating(pmd_t pmd);
128extern int migrate_misplaced_page(struct page *page,
129 struct vm_area_struct *vma, int node);
130#else
131static inline bool pmd_trans_migrating(pmd_t pmd)
132{
133 return false;
134}
135static inline int migrate_misplaced_page(struct page *page,
136 struct vm_area_struct *vma, int node)
137{
138 return -EAGAIN;
139}
140#endif
141
142#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
143extern int migrate_misplaced_transhuge_page(struct mm_struct *mm,
144 struct vm_area_struct *vma,
145 pmd_t *pmd, pmd_t entry,
146 unsigned long address,
147 struct page *page, int node);
148#else
149static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
150 struct vm_area_struct *vma,
151 pmd_t *pmd, pmd_t entry,
152 unsigned long address,
153 struct page *page, int node)
154{
155 return -EAGAIN;
156}
157#endif
158
159
160#ifdef CONFIG_MIGRATION
161
162
163
164
165
166
167#define MIGRATE_PFN_VALID (1UL << 0)
168#define MIGRATE_PFN_MIGRATE (1UL << 1)
169#define MIGRATE_PFN_LOCKED (1UL << 2)
170#define MIGRATE_PFN_WRITE (1UL << 3)
171#define MIGRATE_PFN_DEVICE (1UL << 4)
172#define MIGRATE_PFN_ERROR (1UL << 5)
173#define MIGRATE_PFN_SHIFT 6
174
175static inline struct page *migrate_pfn_to_page(unsigned long mpfn)
176{
177 if (!(mpfn & MIGRATE_PFN_VALID))
178 return NULL;
179 return pfn_to_page(mpfn >> MIGRATE_PFN_SHIFT);
180}
181
182static inline unsigned long migrate_pfn(unsigned long pfn)
183{
184 return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID;
185}
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253struct migrate_vma_ops {
254 void (*alloc_and_copy)(struct vm_area_struct *vma,
255 const unsigned long *src,
256 unsigned long *dst,
257 unsigned long start,
258 unsigned long end,
259 void *private);
260 void (*finalize_and_map)(struct vm_area_struct *vma,
261 const unsigned long *src,
262 const unsigned long *dst,
263 unsigned long start,
264 unsigned long end,
265 void *private);
266};
267
268#if defined(CONFIG_MIGRATE_VMA_HELPER)
269int migrate_vma(const struct migrate_vma_ops *ops,
270 struct vm_area_struct *vma,
271 unsigned long start,
272 unsigned long end,
273 unsigned long *src,
274 unsigned long *dst,
275 void *private);
276#else
277static inline int migrate_vma(const struct migrate_vma_ops *ops,
278 struct vm_area_struct *vma,
279 unsigned long start,
280 unsigned long end,
281 unsigned long *src,
282 unsigned long *dst,
283 void *private)
284{
285 return -EINVAL;
286}
287#endif
288
289#endif
290
291#endif
292