1
2#include <linux/mm.h>
3#include <linux/mmzone.h>
4#include <linux/memblock.h>
5#include <linux/page_ext.h>
6#include <linux/memory.h>
7#include <linux/vmalloc.h>
8#include <linux/kmemleak.h>
9#include <linux/page_owner.h>
10#include <linux/page_idle.h>
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61static struct page_ext_operations *page_ext_ops[] = {
62#ifdef CONFIG_PAGE_OWNER
63 &page_owner_ops,
64#endif
65#if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
66 &page_idle_ops,
67#endif
68};
69
70unsigned long page_ext_size = sizeof(struct page_ext);
71
72static unsigned long total_usage;
73
74static bool __init invoke_need_callbacks(void)
75{
76 int i;
77 int entries = ARRAY_SIZE(page_ext_ops);
78 bool need = false;
79
80 for (i = 0; i < entries; i++) {
81 if (page_ext_ops[i]->need && page_ext_ops[i]->need()) {
82 page_ext_ops[i]->offset = page_ext_size;
83 page_ext_size += page_ext_ops[i]->size;
84 need = true;
85 }
86 }
87
88 return need;
89}
90
91static void __init invoke_init_callbacks(void)
92{
93 int i;
94 int entries = ARRAY_SIZE(page_ext_ops);
95
96 for (i = 0; i < entries; i++) {
97 if (page_ext_ops[i]->init)
98 page_ext_ops[i]->init();
99 }
100}
101
102static inline struct page_ext *get_entry(void *base, unsigned long index)
103{
104 return base + page_ext_size * index;
105}
106
107#if !defined(CONFIG_SPARSEMEM)
108
109
110void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
111{
112 pgdat->node_page_ext = NULL;
113}
114
115struct page_ext *lookup_page_ext(const struct page *page)
116{
117 unsigned long pfn = page_to_pfn(page);
118 unsigned long index;
119 struct page_ext *base;
120
121 base = NODE_DATA(page_to_nid(page))->node_page_ext;
122
123
124
125
126
127
128 if (unlikely(!base))
129 return NULL;
130 index = pfn - round_down(node_start_pfn(page_to_nid(page)),
131 MAX_ORDER_NR_PAGES);
132 return get_entry(base, index);
133}
134
135static int __init alloc_node_page_ext(int nid)
136{
137 struct page_ext *base;
138 unsigned long table_size;
139 unsigned long nr_pages;
140
141 nr_pages = NODE_DATA(nid)->node_spanned_pages;
142 if (!nr_pages)
143 return 0;
144
145
146
147
148
149
150 if (!IS_ALIGNED(node_start_pfn(nid), MAX_ORDER_NR_PAGES) ||
151 !IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES))
152 nr_pages += MAX_ORDER_NR_PAGES;
153
154 table_size = page_ext_size * nr_pages;
155
156 base = memblock_alloc_try_nid(
157 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
158 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
159 if (!base)
160 return -ENOMEM;
161 NODE_DATA(nid)->node_page_ext = base;
162 total_usage += table_size;
163 return 0;
164}
165
166void __init page_ext_init_flatmem(void)
167{
168
169 int nid, fail;
170
171 if (!invoke_need_callbacks())
172 return;
173
174 for_each_online_node(nid) {
175 fail = alloc_node_page_ext(nid);
176 if (fail)
177 goto fail;
178 }
179 pr_info("allocated %ld bytes of page_ext\n", total_usage);
180 invoke_init_callbacks();
181 return;
182
183fail:
184 pr_crit("allocation of page_ext failed.\n");
185 panic("Out of memory");
186}
187
188#else
189
190struct page_ext *lookup_page_ext(const struct page *page)
191{
192 unsigned long pfn = page_to_pfn(page);
193 struct mem_section *section = __pfn_to_section(pfn);
194
195
196
197
198
199
200 if (!section->page_ext)
201 return NULL;
202 return get_entry(section->page_ext, pfn);
203}
204
205static void *__meminit alloc_page_ext(size_t size, int nid)
206{
207 gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN;
208 void *addr = NULL;
209
210 addr = alloc_pages_exact_nid(nid, size, flags);
211 if (addr) {
212 kmemleak_alloc(addr, size, 1, flags);
213 return addr;
214 }
215
216 addr = vzalloc_node(size, nid);
217
218 return addr;
219}
220
221static int __meminit init_section_page_ext(unsigned long pfn, int nid)
222{
223 struct mem_section *section;
224 struct page_ext *base;
225 unsigned long table_size;
226
227 section = __pfn_to_section(pfn);
228
229 if (section->page_ext)
230 return 0;
231
232 table_size = page_ext_size * PAGES_PER_SECTION;
233 base = alloc_page_ext(table_size, nid);
234
235
236
237
238
239
240 kmemleak_not_leak(base);
241
242 if (!base) {
243 pr_err("page ext allocation failure\n");
244 return -ENOMEM;
245 }
246
247
248
249
250
251 pfn &= PAGE_SECTION_MASK;
252 section->page_ext = (void *)base - page_ext_size * pfn;
253 total_usage += table_size;
254 return 0;
255}
256#ifdef CONFIG_MEMORY_HOTPLUG
257static void free_page_ext(void *addr)
258{
259 if (is_vmalloc_addr(addr)) {
260 vfree(addr);
261 } else {
262 struct page *page = virt_to_page(addr);
263 size_t table_size;
264
265 table_size = page_ext_size * PAGES_PER_SECTION;
266
267 BUG_ON(PageReserved(page));
268 kmemleak_free(addr);
269 free_pages_exact(addr, table_size);
270 }
271}
272
273static void __free_page_ext(unsigned long pfn)
274{
275 struct mem_section *ms;
276 struct page_ext *base;
277
278 ms = __pfn_to_section(pfn);
279 if (!ms || !ms->page_ext)
280 return;
281 base = get_entry(ms->page_ext, pfn);
282 free_page_ext(base);
283 ms->page_ext = NULL;
284}
285
286static int __meminit online_page_ext(unsigned long start_pfn,
287 unsigned long nr_pages,
288 int nid)
289{
290 unsigned long start, end, pfn;
291 int fail = 0;
292
293 start = SECTION_ALIGN_DOWN(start_pfn);
294 end = SECTION_ALIGN_UP(start_pfn + nr_pages);
295
296 if (nid == NUMA_NO_NODE) {
297
298
299
300
301
302 nid = pfn_to_nid(start_pfn);
303 VM_BUG_ON(!node_state(nid, N_ONLINE));
304 }
305
306 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION)
307 fail = init_section_page_ext(pfn, nid);
308 if (!fail)
309 return 0;
310
311
312 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
313 __free_page_ext(pfn);
314
315 return -ENOMEM;
316}
317
318static int __meminit offline_page_ext(unsigned long start_pfn,
319 unsigned long nr_pages, int nid)
320{
321 unsigned long start, end, pfn;
322
323 start = SECTION_ALIGN_DOWN(start_pfn);
324 end = SECTION_ALIGN_UP(start_pfn + nr_pages);
325
326 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
327 __free_page_ext(pfn);
328 return 0;
329
330}
331
332static int __meminit page_ext_callback(struct notifier_block *self,
333 unsigned long action, void *arg)
334{
335 struct memory_notify *mn = arg;
336 int ret = 0;
337
338 switch (action) {
339 case MEM_GOING_ONLINE:
340 ret = online_page_ext(mn->start_pfn,
341 mn->nr_pages, mn->status_change_nid);
342 break;
343 case MEM_OFFLINE:
344 offline_page_ext(mn->start_pfn,
345 mn->nr_pages, mn->status_change_nid);
346 break;
347 case MEM_CANCEL_ONLINE:
348 offline_page_ext(mn->start_pfn,
349 mn->nr_pages, mn->status_change_nid);
350 break;
351 case MEM_GOING_OFFLINE:
352 break;
353 case MEM_ONLINE:
354 case MEM_CANCEL_OFFLINE:
355 break;
356 }
357
358 return notifier_from_errno(ret);
359}
360
361#endif
362
363void __init page_ext_init(void)
364{
365 unsigned long pfn;
366 int nid;
367
368 if (!invoke_need_callbacks())
369 return;
370
371 for_each_node_state(nid, N_MEMORY) {
372 unsigned long start_pfn, end_pfn;
373
374 start_pfn = node_start_pfn(nid);
375 end_pfn = node_end_pfn(nid);
376
377
378
379
380
381 for (pfn = start_pfn; pfn < end_pfn;
382 pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {
383
384 if (!pfn_valid(pfn))
385 continue;
386
387
388
389
390
391
392 if (pfn_to_nid(pfn) != nid)
393 continue;
394 if (init_section_page_ext(pfn, nid))
395 goto oom;
396 cond_resched();
397 }
398 }
399 hotplug_memory_notifier(page_ext_callback, 0);
400 pr_info("allocated %ld bytes of page_ext\n", total_usage);
401 invoke_init_callbacks();
402 return;
403
404oom:
405 panic("Out of memory");
406}
407
408void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
409{
410}
411
412#endif
413