1
2
3
4
5
6#include <linux/mm.h>
7#include <linux/page-isolation.h>
8#include <linux/pageblock-flags.h>
9#include <linux/memory.h>
10#include <linux/hugetlb.h>
11#include <linux/page_owner.h>
12#include <linux/migrate.h>
13#include "internal.h"
14
15#define CREATE_TRACE_POINTS
16#include <trace/events/page_isolation.h>
17
18static int set_migratetype_isolate(struct page *page, int migratetype,
19 bool skip_hwpoisoned_pages)
20{
21 struct zone *zone;
22 unsigned long flags, pfn;
23 struct memory_isolate_notify arg;
24 int notifier_ret;
25 int ret = -EBUSY;
26
27 zone = page_zone(page);
28
29 spin_lock_irqsave(&zone->lock, flags);
30
31
32
33
34
35
36 if (is_migrate_isolate_page(page))
37 goto out;
38
39 pfn = page_to_pfn(page);
40 arg.start_pfn = pfn;
41 arg.nr_pages = pageblock_nr_pages;
42 arg.pages_found = 0;
43
44
45
46
47
48
49
50
51
52
53
54
55 notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
56 notifier_ret = notifier_to_errno(notifier_ret);
57 if (notifier_ret)
58 goto out;
59
60
61
62
63 if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype,
64 skip_hwpoisoned_pages))
65 ret = 0;
66
67
68
69
70
71
72out:
73 if (!ret) {
74 unsigned long nr_pages;
75 int mt = get_pageblock_migratetype(page);
76
77 set_pageblock_migratetype(page, MIGRATE_ISOLATE);
78 zone->nr_isolate_pageblock++;
79 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE,
80 NULL);
81
82 __mod_zone_freepage_state(zone, -nr_pages, mt);
83 }
84
85 spin_unlock_irqrestore(&zone->lock, flags);
86 if (!ret)
87 drain_all_pages(zone);
88 return ret;
89}
90
91static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
92{
93 struct zone *zone;
94 unsigned long flags, nr_pages;
95 bool isolated_page = false;
96 unsigned int order;
97 unsigned long pfn, buddy_pfn;
98 struct page *buddy;
99
100 zone = page_zone(page);
101 spin_lock_irqsave(&zone->lock, flags);
102 if (!is_migrate_isolate_page(page))
103 goto out;
104
105
106
107
108
109
110
111
112
113 if (PageBuddy(page)) {
114 order = page_order(page);
115 if (order >= pageblock_order) {
116 pfn = page_to_pfn(page);
117 buddy_pfn = __find_buddy_pfn(pfn, order);
118 buddy = page + (buddy_pfn - pfn);
119
120 if (pfn_valid_within(buddy_pfn) &&
121 !is_migrate_isolate_page(buddy)) {
122 __isolate_free_page(page, order);
123 isolated_page = true;
124 }
125 }
126 }
127
128
129
130
131
132
133 if (!isolated_page) {
134 nr_pages = move_freepages_block(zone, page, migratetype, NULL);
135 __mod_zone_freepage_state(zone, nr_pages, migratetype);
136 }
137 set_pageblock_migratetype(page, migratetype);
138 zone->nr_isolate_pageblock--;
139out:
140 spin_unlock_irqrestore(&zone->lock, flags);
141 if (isolated_page) {
142 post_alloc_hook(page, order, __GFP_MOVABLE);
143 __free_pages(page, order);
144 }
145}
146
147static inline struct page *
148__first_valid_page(unsigned long pfn, unsigned long nr_pages)
149{
150 int i;
151
152 for (i = 0; i < nr_pages; i++) {
153 struct page *page;
154
155 if (!pfn_valid_within(pfn + i))
156 continue;
157 page = pfn_to_online_page(pfn + i);
158 if (!page)
159 continue;
160 return page;
161 }
162 return NULL;
163}
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
188 unsigned migratetype, bool skip_hwpoisoned_pages)
189{
190 unsigned long pfn;
191 unsigned long undo_pfn;
192 struct page *page;
193
194 BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
195 BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
196
197 for (pfn = start_pfn;
198 pfn < end_pfn;
199 pfn += pageblock_nr_pages) {
200 page = __first_valid_page(pfn, pageblock_nr_pages);
201 if (page &&
202 set_migratetype_isolate(page, migratetype, skip_hwpoisoned_pages)) {
203 undo_pfn = pfn;
204 goto undo;
205 }
206 }
207 return 0;
208undo:
209 for (pfn = start_pfn;
210 pfn < undo_pfn;
211 pfn += pageblock_nr_pages) {
212 struct page *page = pfn_to_online_page(pfn);
213 if (!page)
214 continue;
215 unset_migratetype_isolate(page, migratetype);
216 }
217
218 return -EBUSY;
219}
220
221
222
223
224int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
225 unsigned migratetype)
226{
227 unsigned long pfn;
228 struct page *page;
229
230 BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
231 BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
232
233 for (pfn = start_pfn;
234 pfn < end_pfn;
235 pfn += pageblock_nr_pages) {
236 page = __first_valid_page(pfn, pageblock_nr_pages);
237 if (!page || !is_migrate_isolate_page(page))
238 continue;
239 unset_migratetype_isolate(page, migratetype);
240 }
241 return 0;
242}
243
244
245
246
247
248
249
250static unsigned long
251__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
252 bool skip_hwpoisoned_pages)
253{
254 struct page *page;
255
256 while (pfn < end_pfn) {
257 if (!pfn_valid_within(pfn)) {
258 pfn++;
259 continue;
260 }
261 page = pfn_to_page(pfn);
262 if (PageBuddy(page))
263
264
265
266
267
268 pfn += 1 << page_order(page);
269 else if (skip_hwpoisoned_pages && PageHWPoison(page))
270
271 pfn++;
272 else
273 break;
274 }
275
276 return pfn;
277}
278
279
280int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
281 bool skip_hwpoisoned_pages)
282{
283 unsigned long pfn, flags;
284 struct page *page;
285 struct zone *zone;
286
287
288
289
290
291
292 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
293 page = __first_valid_page(pfn, pageblock_nr_pages);
294 if (page && !is_migrate_isolate_page(page))
295 break;
296 }
297 page = __first_valid_page(start_pfn, end_pfn - start_pfn);
298 if ((pfn < end_pfn) || !page)
299 return -EBUSY;
300
301 zone = page_zone(page);
302 spin_lock_irqsave(&zone->lock, flags);
303 pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn,
304 skip_hwpoisoned_pages);
305 spin_unlock_irqrestore(&zone->lock, flags);
306
307 trace_test_pages_isolated(start_pfn, end_pfn, pfn);
308
309 return pfn < end_pfn ? -EBUSY : 0;
310}
311
312struct page *alloc_migrate_target(struct page *page, unsigned long private)
313{
314 return new_page_nodemask(page, numa_node_id(), &node_states[N_MEMORY]);
315}
316