1
2
3
4#include <linux/mm.h>
5#include <linux/slab.h>
6#include <linux/mmzone.h>
7#include <linux/bootmem.h>
8#include <linux/highmem.h>
9#include <linux/export.h>
10#include <linux/spinlock.h>
11#include <linux/vmalloc.h>
12#include "internal.h"
13#include <asm/dma.h>
14#include <asm/pgalloc.h>
15#include <asm/pgtable.h>
16
17
18
19
20
21
22#ifdef CONFIG_SPARSEMEM_EXTREME
23struct mem_section *mem_section[NR_SECTION_ROOTS]
24 ____cacheline_internodealigned_in_smp;
25#else
26struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
27 ____cacheline_internodealigned_in_smp;
28#endif
29EXPORT_SYMBOL(mem_section);
30
31#ifdef NODE_NOT_IN_PAGE_FLAGS
32
33
34
35
36
37#if MAX_NUMNODES <= 256
38static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
39#else
40static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
41#endif
42
43int page_to_nid(const struct page *page)
44{
45 return section_to_node_table[page_to_section(page)];
46}
47EXPORT_SYMBOL(page_to_nid);
48
49static void set_section_nid(unsigned long section_nr, int nid)
50{
51 section_to_node_table[section_nr] = nid;
52}
53#else
54static inline void set_section_nid(unsigned long section_nr, int nid)
55{
56}
57#endif
58
59#ifdef CONFIG_SPARSEMEM_EXTREME
60static struct mem_section noinline __init_refok *sparse_index_alloc(int nid)
61{
62 struct mem_section *section = NULL;
63 unsigned long array_size = SECTIONS_PER_ROOT *
64 sizeof(struct mem_section);
65
66 if (slab_is_available()) {
67 if (node_state(nid, N_HIGH_MEMORY))
68 section = kzalloc_node(array_size, GFP_KERNEL, nid);
69 else
70 section = kzalloc(array_size, GFP_KERNEL);
71 } else {
72 section = memblock_virt_alloc_node(array_size, nid);
73 }
74
75 return section;
76}
77
78static int __meminit sparse_index_init(unsigned long section_nr, int nid)
79{
80 unsigned long root = SECTION_NR_TO_ROOT(section_nr);
81 struct mem_section *section;
82 int ret = 0;
83
84 if (mem_section[root])
85 return -EEXIST;
86
87 section = sparse_index_alloc(nid);
88 if (!section)
89 return -ENOMEM;
90
91 mem_section[root] = section;
92
93 return ret;
94}
95#else
96static inline int sparse_index_init(unsigned long section_nr, int nid)
97{
98 return 0;
99}
100#endif
101
102
103
104
105
106
107int __section_nr(struct mem_section* ms)
108{
109 unsigned long root_nr;
110 struct mem_section* root;
111
112 for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
113 root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
114 if (!root)
115 continue;
116
117 if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
118 break;
119 }
120
121 VM_BUG_ON(root_nr == NR_SECTION_ROOTS);
122
123 return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
124}
125
126
127
128
129
130
131
132static inline unsigned long sparse_encode_early_nid(int nid)
133{
134 return (nid << SECTION_NID_SHIFT);
135}
136
137static inline int sparse_early_nid(struct mem_section *section)
138{
139 return (section->section_mem_map >> SECTION_NID_SHIFT);
140}
141
142
143void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
144 unsigned long *end_pfn)
145{
146 unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
147
148
149
150
151
152 if (*start_pfn > max_sparsemem_pfn) {
153 mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
154 "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
155 *start_pfn, *end_pfn, max_sparsemem_pfn);
156 WARN_ON_ONCE(1);
157 *start_pfn = max_sparsemem_pfn;
158 *end_pfn = max_sparsemem_pfn;
159 } else if (*end_pfn > max_sparsemem_pfn) {
160 mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
161 "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
162 *start_pfn, *end_pfn, max_sparsemem_pfn);
163 WARN_ON_ONCE(1);
164 *end_pfn = max_sparsemem_pfn;
165 }
166}
167
168
169void __init memory_present(int nid, unsigned long start, unsigned long end)
170{
171 unsigned long pfn;
172
173 start &= PAGE_SECTION_MASK;
174 mminit_validate_memmodel_limits(&start, &end);
175 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
176 unsigned long section = pfn_to_section_nr(pfn);
177 struct mem_section *ms;
178
179 sparse_index_init(section, nid);
180 set_section_nid(section, nid);
181
182 ms = __nr_to_section(section);
183 if (!ms->section_mem_map)
184 ms->section_mem_map = sparse_encode_early_nid(nid) |
185 SECTION_MARKED_PRESENT;
186 }
187}
188
189
190
191
192
193unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
194 unsigned long end_pfn)
195{
196 unsigned long pfn;
197 unsigned long nr_pages = 0;
198
199 mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
200 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
201 if (nid != early_pfn_to_nid(pfn))
202 continue;
203
204 if (pfn_present(pfn))
205 nr_pages += PAGES_PER_SECTION;
206 }
207
208 return nr_pages * sizeof(struct page);
209}
210
211
212
213
214
215
216static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
217{
218 return (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
219}
220
221
222
223
224struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
225{
226
227 coded_mem_map &= SECTION_MAP_MASK;
228 return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
229}
230
231static int __meminit sparse_init_one_section(struct mem_section *ms,
232 unsigned long pnum, struct page *mem_map,
233 unsigned long *pageblock_bitmap)
234{
235 if (!present_section(ms))
236 return -EINVAL;
237
238 ms->section_mem_map &= ~SECTION_MAP_MASK;
239 ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) |
240 SECTION_HAS_MEM_MAP;
241 ms->pageblock_flags = pageblock_bitmap;
242
243 return 1;
244}
245
246unsigned long usemap_size(void)
247{
248 unsigned long size_bytes;
249 size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8;
250 size_bytes = roundup(size_bytes, sizeof(unsigned long));
251 return size_bytes;
252}
253
254#ifdef CONFIG_MEMORY_HOTPLUG
255static unsigned long *__kmalloc_section_usemap(void)
256{
257 return kmalloc(usemap_size(), GFP_KERNEL);
258}
259#endif
260
261#ifdef CONFIG_MEMORY_HOTREMOVE
262static unsigned long * __init
263sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
264 unsigned long size)
265{
266 unsigned long goal, limit;
267 unsigned long *p;
268 int nid;
269
270
271
272
273
274
275
276
277
278
279 goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
280 limit = goal + (1UL << PA_SECTION_SHIFT);
281 nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
282again:
283 p = memblock_virt_alloc_try_nid_nopanic(size,
284 SMP_CACHE_BYTES, goal, limit,
285 nid);
286 if (!p && limit) {
287 limit = 0;
288 goto again;
289 }
290 return p;
291}
292
293static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
294{
295 unsigned long usemap_snr, pgdat_snr;
296 static unsigned long old_usemap_snr = NR_MEM_SECTIONS;
297 static unsigned long old_pgdat_snr = NR_MEM_SECTIONS;
298 struct pglist_data *pgdat = NODE_DATA(nid);
299 int usemap_nid;
300
301 usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT);
302 pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
303 if (usemap_snr == pgdat_snr)
304 return;
305
306 if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr)
307
308 return;
309
310 old_usemap_snr = usemap_snr;
311 old_pgdat_snr = pgdat_snr;
312
313 usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr));
314 if (usemap_nid != nid) {
315 printk(KERN_INFO
316 "node %d must be removed before remove section %ld\n",
317 nid, usemap_snr);
318 return;
319 }
320
321
322
323
324
325
326 printk(KERN_INFO "Section %ld and %ld (node %d)", usemap_snr,
327 pgdat_snr, nid);
328 printk(KERN_CONT
329 " have a circular dependency on usemap and pgdat allocations\n");
330}
331#else
332static unsigned long * __init
333sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
334 unsigned long size)
335{
336 return memblock_virt_alloc_node_nopanic(size, pgdat->node_id);
337}
338
339static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
340{
341}
342#endif
343
344static void __init sparse_early_usemaps_alloc_node(unsigned long**usemap_map,
345 unsigned long pnum_begin,
346 unsigned long pnum_end,
347 unsigned long usemap_count, int nodeid)
348{
349 void *usemap;
350 unsigned long pnum;
351 int size = usemap_size();
352
353 usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid),
354 size * usemap_count);
355 if (!usemap) {
356 printk(KERN_WARNING "%s: allocation failed\n", __func__);
357 return;
358 }
359
360 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
361 if (!present_section_nr(pnum))
362 continue;
363 usemap_map[pnum] = usemap;
364 usemap += size;
365 check_usemap_section_nr(nodeid, usemap_map[pnum]);
366 }
367}
368
369#ifndef CONFIG_SPARSEMEM_VMEMMAP
370struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid,
371 struct vmem_altmap *altmap)
372{
373 struct page *map;
374 unsigned long size;
375
376 map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
377 if (map)
378 return map;
379
380 size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
381 map = memblock_virt_alloc_try_nid(size,
382 PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
383 BOOTMEM_ALLOC_ACCESSIBLE, nid);
384 return map;
385}
386void __init sparse_mem_maps_populate_node(struct page **map_map,
387 unsigned long pnum_begin,
388 unsigned long pnum_end,
389 unsigned long map_count, int nodeid)
390{
391 void *map;
392 unsigned long pnum;
393 unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;
394
395 map = alloc_remap(nodeid, size * map_count);
396 if (map) {
397 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
398 if (!present_section_nr(pnum))
399 continue;
400 map_map[pnum] = map;
401 map += size;
402 }
403 return;
404 }
405
406 size = PAGE_ALIGN(size);
407 map = memblock_virt_alloc_try_nid(size * map_count,
408 PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
409 BOOTMEM_ALLOC_ACCESSIBLE, nodeid);
410 if (map) {
411 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
412 if (!present_section_nr(pnum))
413 continue;
414 map_map[pnum] = map;
415 map += size;
416 }
417 return;
418 }
419
420
421 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
422 struct mem_section *ms;
423
424 if (!present_section_nr(pnum))
425 continue;
426 map_map[pnum] = sparse_mem_map_populate(pnum, nodeid, NULL);
427 if (map_map[pnum])
428 continue;
429 ms = __nr_to_section(pnum);
430 printk(KERN_ERR "%s: sparsemem memory map backing failed "
431 "some memory will not be available.\n", __func__);
432 ms->section_mem_map = 0;
433 }
434}
435#endif
436
437#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
438static void __init sparse_early_mem_maps_alloc_node(struct page **map_map,
439 unsigned long pnum_begin,
440 unsigned long pnum_end,
441 unsigned long map_count, int nodeid)
442{
443 sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end,
444 map_count, nodeid);
445}
446#else
447static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
448{
449 struct page *map;
450 struct mem_section *ms = __nr_to_section(pnum);
451 int nid = sparse_early_nid(ms);
452
453 map = sparse_mem_map_populate(pnum, nid, NULL);
454 if (map)
455 return map;
456
457 printk(KERN_ERR "%s: sparsemem memory map backing failed "
458 "some memory will not be available.\n", __func__);
459 ms->section_mem_map = 0;
460 return NULL;
461}
462#endif
463
464void __attribute__((weak)) __meminit vmemmap_populate_print_last(void)
465{
466}
467
468
469
470
471
472void __init sparse_init(void)
473{
474 unsigned long pnum;
475 struct page *map;
476 unsigned long *usemap;
477 unsigned long **usemap_map;
478 int size;
479 int nodeid_begin = 0;
480 unsigned long pnum_begin = 0;
481 unsigned long usemap_count;
482#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
483 unsigned long map_count;
484 int size2;
485 struct page **map_map;
486#endif
487
488
489 set_pageblock_order();
490
491
492
493
494
495
496
497
498
499
500
501
502 size = sizeof(unsigned long *) * NR_MEM_SECTIONS;
503 usemap_map = memblock_virt_alloc(size, 0);
504 if (!usemap_map)
505 panic("can not allocate usemap_map\n");
506
507 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
508 struct mem_section *ms;
509
510 if (!present_section_nr(pnum))
511 continue;
512 ms = __nr_to_section(pnum);
513 nodeid_begin = sparse_early_nid(ms);
514 pnum_begin = pnum;
515 break;
516 }
517 usemap_count = 1;
518 for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
519 struct mem_section *ms;
520 int nodeid;
521
522 if (!present_section_nr(pnum))
523 continue;
524 ms = __nr_to_section(pnum);
525 nodeid = sparse_early_nid(ms);
526 if (nodeid == nodeid_begin) {
527 usemap_count++;
528 continue;
529 }
530
531 sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, pnum,
532 usemap_count, nodeid_begin);
533
534 nodeid_begin = nodeid;
535 pnum_begin = pnum;
536 usemap_count = 1;
537 }
538
539 sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, NR_MEM_SECTIONS,
540 usemap_count, nodeid_begin);
541
542#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
543 size2 = sizeof(struct page *) * NR_MEM_SECTIONS;
544 map_map = memblock_virt_alloc(size2, 0);
545 if (!map_map)
546 panic("can not allocate map_map\n");
547
548 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
549 struct mem_section *ms;
550
551 if (!present_section_nr(pnum))
552 continue;
553 ms = __nr_to_section(pnum);
554 nodeid_begin = sparse_early_nid(ms);
555 pnum_begin = pnum;
556 break;
557 }
558 map_count = 1;
559 for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
560 struct mem_section *ms;
561 int nodeid;
562
563 if (!present_section_nr(pnum))
564 continue;
565 ms = __nr_to_section(pnum);
566 nodeid = sparse_early_nid(ms);
567 if (nodeid == nodeid_begin) {
568 map_count++;
569 continue;
570 }
571
572 sparse_early_mem_maps_alloc_node(map_map, pnum_begin, pnum,
573 map_count, nodeid_begin);
574
575 nodeid_begin = nodeid;
576 pnum_begin = pnum;
577 map_count = 1;
578 }
579
580 sparse_early_mem_maps_alloc_node(map_map, pnum_begin, NR_MEM_SECTIONS,
581 map_count, nodeid_begin);
582#endif
583
584 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
585 if (!present_section_nr(pnum))
586 continue;
587
588 usemap = usemap_map[pnum];
589 if (!usemap)
590 continue;
591
592#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
593 map = map_map[pnum];
594#else
595 map = sparse_early_mem_map_alloc(pnum);
596#endif
597 if (!map)
598 continue;
599
600 sparse_init_one_section(__nr_to_section(pnum), pnum, map,
601 usemap);
602 }
603
604 vmemmap_populate_print_last();
605
606#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
607 memblock_free_early(__pa(map_map), size2);
608#endif
609 memblock_free_early(__pa(usemap_map), size);
610}
611
612#ifdef CONFIG_MEMORY_HOTPLUG
613#ifdef CONFIG_SPARSEMEM_VMEMMAP
614static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
615 struct vmem_altmap *altmap)
616{
617
618 return sparse_mem_map_populate(pnum, nid, altmap);
619}
620static void __kfree_section_memmap(struct page *memmap,
621 struct vmem_altmap *altmap)
622{
623 unsigned long start = (unsigned long)memmap;
624 unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
625
626 vmemmap_free(start, end, altmap);
627}
628#ifdef CONFIG_MEMORY_HOTREMOVE
629static void free_map_bootmem(struct page *memmap)
630{
631 unsigned long start = (unsigned long)memmap;
632 unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
633
634 vmemmap_free(start, end, NULL);
635}
636#endif
637#else
638static struct page *__kmalloc_section_memmap(void)
639{
640 struct page *page, *ret;
641 unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION;
642
643 page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
644 if (page)
645 goto got_map_page;
646
647 ret = vmalloc(memmap_size);
648 if (ret)
649 goto got_map_ptr;
650
651 return NULL;
652got_map_page:
653 ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
654got_map_ptr:
655
656 return ret;
657}
658
659static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
660 struct vmem_altmap *altmap)
661{
662 return __kmalloc_section_memmap();
663}
664
665static void __kfree_section_memmap(struct page *memmap,
666 struct vmem_altmap *altmap)
667{
668 if (is_vmalloc_addr(memmap))
669 vfree(memmap);
670 else
671 free_pages((unsigned long)memmap,
672 get_order(sizeof(struct page) * PAGES_PER_SECTION));
673}
674
675#ifdef CONFIG_MEMORY_HOTREMOVE
676static void free_map_bootmem(struct page *memmap)
677{
678 unsigned long maps_section_nr, removing_section_nr, i;
679 unsigned long magic, nr_pages;
680 struct page *page = virt_to_page(memmap);
681
682 nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
683 >> PAGE_SHIFT;
684
685 for (i = 0; i < nr_pages; i++, page++) {
686 magic = (unsigned long) page->freelist;
687
688 BUG_ON(magic == NODE_INFO);
689
690 maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
691 removing_section_nr = page_private(page);
692
693
694
695
696
697
698
699
700
701 if (maps_section_nr != removing_section_nr)
702 put_page_bootmem(page);
703 }
704}
705#endif
706#endif
707
708
709
710
711
712
713int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
714 struct vmem_altmap *altmap)
715{
716 unsigned long section_nr = pfn_to_section_nr(start_pfn);
717 struct pglist_data *pgdat = zone->zone_pgdat;
718 struct mem_section *ms;
719 struct page *memmap;
720 unsigned long *usemap;
721 unsigned long flags;
722 int ret;
723
724
725
726
727
728 ret = sparse_index_init(section_nr, pgdat->node_id);
729 if (ret < 0 && ret != -EEXIST)
730 return ret;
731 memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, altmap);
732 if (!memmap)
733 return -ENOMEM;
734 usemap = __kmalloc_section_usemap();
735 if (!usemap) {
736 __kfree_section_memmap(memmap, altmap);
737 return -ENOMEM;
738 }
739
740 pgdat_resize_lock(pgdat, &flags);
741
742 ms = __pfn_to_section(start_pfn);
743 if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
744 ret = -EEXIST;
745 goto out;
746 }
747
748 memset(memmap, 0, sizeof(struct page) * PAGES_PER_SECTION);
749
750 ms->section_mem_map |= SECTION_MARKED_PRESENT;
751
752 ret = sparse_init_one_section(ms, section_nr, memmap, usemap);
753
754out:
755 pgdat_resize_unlock(pgdat, &flags);
756 if (ret <= 0) {
757 kfree(usemap);
758 __kfree_section_memmap(memmap, altmap);
759 }
760 return ret;
761}
762
763#ifdef CONFIG_MEMORY_FAILURE
764static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
765{
766 int i;
767
768 if (!memmap)
769 return;
770
771 for (i = 0; i < nr_pages; i++) {
772 if (PageHWPoison(&memmap[i])) {
773 atomic_long_sub(1, &num_poisoned_pages);
774 ClearPageHWPoison(&memmap[i]);
775 }
776 }
777}
778#else
779static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
780{
781}
782#endif
783
784#ifdef CONFIG_MEMORY_HOTREMOVE
785static void free_section_usemap(struct page *memmap, unsigned long *usemap,
786 struct vmem_altmap *altmap)
787{
788 struct page *usemap_page;
789
790 if (!usemap)
791 return;
792
793 usemap_page = virt_to_page(usemap);
794
795
796
797 if (PageSlab(usemap_page) || PageCompound(usemap_page)) {
798 kfree(usemap);
799 if (memmap)
800 __kfree_section_memmap(memmap, altmap);
801 return;
802 }
803
804
805
806
807
808
809 if (memmap)
810 free_map_bootmem(memmap);
811}
812
813void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
814 unsigned long map_offset, struct vmem_altmap *altmap)
815{
816 struct page *memmap = NULL;
817 unsigned long *usemap = NULL, flags;
818 struct pglist_data *pgdat = zone->zone_pgdat;
819
820 pgdat_resize_lock(pgdat, &flags);
821 if (ms->section_mem_map) {
822 usemap = ms->pageblock_flags;
823 memmap = sparse_decode_mem_map(ms->section_mem_map,
824 __section_nr(ms));
825 ms->section_mem_map = 0;
826 ms->pageblock_flags = NULL;
827 }
828 pgdat_resize_unlock(pgdat, &flags);
829
830 clear_hwpoisoned_pages(memmap + map_offset,
831 PAGES_PER_SECTION - map_offset);
832 free_section_usemap(memmap, usemap, altmap);
833}
834#endif
835#endif
836