1
2
3
4
5#include <linux/mm.h>
6#include <linux/slab.h>
7#include <linux/mmzone.h>
8#include <linux/memblock.h>
9#include <linux/compiler.h>
10#include <linux/highmem.h>
11#include <linux/export.h>
12#include <linux/spinlock.h>
13#include <linux/vmalloc.h>
14#include <linux/swap.h>
15#include <linux/swapops.h>
16
17#include "internal.h"
18#include <asm/dma.h>
19#include <asm/pgalloc.h>
20#include <asm/pgtable.h>
21
22
23
24
25
26
27#ifdef CONFIG_SPARSEMEM_EXTREME
28struct mem_section **mem_section;
29#else
30struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
31 ____cacheline_internodealigned_in_smp;
32#endif
33EXPORT_SYMBOL(mem_section);
34
35#ifdef NODE_NOT_IN_PAGE_FLAGS
36
37
38
39
40
41#if MAX_NUMNODES <= 256
42static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
43#else
44static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
45#endif
46
47int page_to_nid(const struct page *page)
48{
49 return section_to_node_table[page_to_section(page)];
50}
51EXPORT_SYMBOL(page_to_nid);
52
53static void set_section_nid(unsigned long section_nr, int nid)
54{
55 section_to_node_table[section_nr] = nid;
56}
57#else
58static inline void set_section_nid(unsigned long section_nr, int nid)
59{
60}
61#endif
62
63#ifdef CONFIG_SPARSEMEM_EXTREME
64static noinline struct mem_section __ref *sparse_index_alloc(int nid)
65{
66 struct mem_section *section = NULL;
67 unsigned long array_size = SECTIONS_PER_ROOT *
68 sizeof(struct mem_section);
69
70 if (slab_is_available()) {
71 section = kzalloc_node(array_size, GFP_KERNEL, nid);
72 } else {
73 section = memblock_alloc_node(array_size, SMP_CACHE_BYTES,
74 nid);
75 if (!section)
76 panic("%s: Failed to allocate %lu bytes nid=%d\n",
77 __func__, array_size, nid);
78 }
79
80 return section;
81}
82
83static int __meminit sparse_index_init(unsigned long section_nr, int nid)
84{
85 unsigned long root = SECTION_NR_TO_ROOT(section_nr);
86 struct mem_section *section;
87
88
89
90
91
92
93
94
95 if (mem_section[root])
96 return 0;
97
98 section = sparse_index_alloc(nid);
99 if (!section)
100 return -ENOMEM;
101
102 mem_section[root] = section;
103
104 return 0;
105}
106#else
107static inline int sparse_index_init(unsigned long section_nr, int nid)
108{
109 return 0;
110}
111#endif
112
113#ifdef CONFIG_SPARSEMEM_EXTREME
114unsigned long __section_nr(struct mem_section *ms)
115{
116 unsigned long root_nr;
117 struct mem_section *root = NULL;
118
119 for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
120 root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
121 if (!root)
122 continue;
123
124 if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
125 break;
126 }
127
128 VM_BUG_ON(!root);
129
130 return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
131}
132#else
133unsigned long __section_nr(struct mem_section *ms)
134{
135 return (unsigned long)(ms - mem_section[0]);
136}
137#endif
138
139
140
141
142
143
144
145static inline unsigned long sparse_encode_early_nid(int nid)
146{
147 return (nid << SECTION_NID_SHIFT);
148}
149
150static inline int sparse_early_nid(struct mem_section *section)
151{
152 return (section->section_mem_map >> SECTION_NID_SHIFT);
153}
154
155
156void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
157 unsigned long *end_pfn)
158{
159 unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
160
161
162
163
164
165 if (*start_pfn > max_sparsemem_pfn) {
166 mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
167 "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
168 *start_pfn, *end_pfn, max_sparsemem_pfn);
169 WARN_ON_ONCE(1);
170 *start_pfn = max_sparsemem_pfn;
171 *end_pfn = max_sparsemem_pfn;
172 } else if (*end_pfn > max_sparsemem_pfn) {
173 mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
174 "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
175 *start_pfn, *end_pfn, max_sparsemem_pfn);
176 WARN_ON_ONCE(1);
177 *end_pfn = max_sparsemem_pfn;
178 }
179}
180
181
182
183
184
185
186
187
188
189
190unsigned long __highest_present_section_nr;
191static void section_mark_present(struct mem_section *ms)
192{
193 unsigned long section_nr = __section_nr(ms);
194
195 if (section_nr > __highest_present_section_nr)
196 __highest_present_section_nr = section_nr;
197
198 ms->section_mem_map |= SECTION_MARKED_PRESENT;
199}
200
201#define for_each_present_section_nr(start, section_nr) \
202 for (section_nr = next_present_section_nr(start-1); \
203 ((section_nr != -1) && \
204 (section_nr <= __highest_present_section_nr)); \
205 section_nr = next_present_section_nr(section_nr))
206
207static inline unsigned long first_present_section_nr(void)
208{
209 return next_present_section_nr(-1);
210}
211
212#ifdef CONFIG_SPARSEMEM_VMEMMAP
213static void subsection_mask_set(unsigned long *map, unsigned long pfn,
214 unsigned long nr_pages)
215{
216 int idx = subsection_map_index(pfn);
217 int end = subsection_map_index(pfn + nr_pages - 1);
218
219 bitmap_set(map, idx, end - idx + 1);
220}
221
222void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
223{
224 int end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
225 unsigned long nr, start_sec = pfn_to_section_nr(pfn);
226
227 if (!nr_pages)
228 return;
229
230 for (nr = start_sec; nr <= end_sec; nr++) {
231 struct mem_section *ms;
232 unsigned long pfns;
233
234 pfns = min(nr_pages, PAGES_PER_SECTION
235 - (pfn & ~PAGE_SECTION_MASK));
236 ms = __nr_to_section(nr);
237 subsection_mask_set(ms->usage->subsection_map, pfn, pfns);
238
239 pr_debug("%s: sec: %lu pfns: %lu set(%d, %d)\n", __func__, nr,
240 pfns, subsection_map_index(pfn),
241 subsection_map_index(pfn + pfns - 1));
242
243 pfn += pfns;
244 nr_pages -= pfns;
245 }
246}
247#else
248void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
249{
250}
251#endif
252
253
254void __init memory_present(int nid, unsigned long start, unsigned long end)
255{
256 unsigned long pfn;
257
258#ifdef CONFIG_SPARSEMEM_EXTREME
259 if (unlikely(!mem_section)) {
260 unsigned long size, align;
261
262 size = sizeof(struct mem_section*) * NR_SECTION_ROOTS;
263 align = 1 << (INTERNODE_CACHE_SHIFT);
264 mem_section = memblock_alloc(size, align);
265 if (!mem_section)
266 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
267 __func__, size, align);
268 }
269#endif
270
271 start &= PAGE_SECTION_MASK;
272 mminit_validate_memmodel_limits(&start, &end);
273 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
274 unsigned long section = pfn_to_section_nr(pfn);
275 struct mem_section *ms;
276
277 sparse_index_init(section, nid);
278 set_section_nid(section, nid);
279
280 ms = __nr_to_section(section);
281 if (!ms->section_mem_map) {
282 ms->section_mem_map = sparse_encode_early_nid(nid) |
283 SECTION_IS_ONLINE;
284 section_mark_present(ms);
285 }
286 }
287}
288
289
290
291
292
293
294void __init memblocks_present(void)
295{
296 struct memblock_region *reg;
297
298 for_each_memblock(memory, reg) {
299 memory_present(memblock_get_region_node(reg),
300 memblock_region_memory_base_pfn(reg),
301 memblock_region_memory_end_pfn(reg));
302 }
303}
304
305
306
307
308
309
310static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
311{
312 unsigned long coded_mem_map =
313 (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
314 BUILD_BUG_ON(SECTION_MAP_LAST_BIT > (1UL<<PFN_SECTION_SHIFT));
315 BUG_ON(coded_mem_map & ~SECTION_MAP_MASK);
316 return coded_mem_map;
317}
318
319
320
321
322struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
323{
324
325 coded_mem_map &= SECTION_MAP_MASK;
326 return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
327}
328
329static void __meminit sparse_init_one_section(struct mem_section *ms,
330 unsigned long pnum, struct page *mem_map,
331 struct mem_section_usage *usage, unsigned long flags)
332{
333 ms->section_mem_map &= ~SECTION_MAP_MASK;
334 ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum)
335 | SECTION_HAS_MEM_MAP | flags;
336 ms->usage = usage;
337}
338
339static unsigned long usemap_size(void)
340{
341 return BITS_TO_LONGS(SECTION_BLOCKFLAGS_BITS) * sizeof(unsigned long);
342}
343
344size_t mem_section_usage_size(void)
345{
346 return sizeof(struct mem_section_usage) + usemap_size();
347}
348
349#ifdef CONFIG_MEMORY_HOTREMOVE
350static struct mem_section_usage * __init
351sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
352 unsigned long size)
353{
354 struct mem_section_usage *usage;
355 unsigned long goal, limit;
356 int nid;
357
358
359
360
361
362
363
364
365
366
367 goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
368 limit = goal + (1UL << PA_SECTION_SHIFT);
369 nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
370again:
371 usage = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, goal, limit, nid);
372 if (!usage && limit) {
373 limit = 0;
374 goto again;
375 }
376 return usage;
377}
378
379static void __init check_usemap_section_nr(int nid,
380 struct mem_section_usage *usage)
381{
382 unsigned long usemap_snr, pgdat_snr;
383 static unsigned long old_usemap_snr;
384 static unsigned long old_pgdat_snr;
385 struct pglist_data *pgdat = NODE_DATA(nid);
386 int usemap_nid;
387
388
389 if (!old_usemap_snr) {
390 old_usemap_snr = NR_MEM_SECTIONS;
391 old_pgdat_snr = NR_MEM_SECTIONS;
392 }
393
394 usemap_snr = pfn_to_section_nr(__pa(usage) >> PAGE_SHIFT);
395 pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
396 if (usemap_snr == pgdat_snr)
397 return;
398
399 if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr)
400
401 return;
402
403 old_usemap_snr = usemap_snr;
404 old_pgdat_snr = pgdat_snr;
405
406 usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr));
407 if (usemap_nid != nid) {
408 pr_info("node %d must be removed before remove section %ld\n",
409 nid, usemap_snr);
410 return;
411 }
412
413
414
415
416
417
418 pr_info("Section %ld and %ld (node %d) have a circular dependency on usemap and pgdat allocations\n",
419 usemap_snr, pgdat_snr, nid);
420}
421#else
422static struct mem_section_usage * __init
423sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
424 unsigned long size)
425{
426 return memblock_alloc_node(size, SMP_CACHE_BYTES, pgdat->node_id);
427}
428
429static void __init check_usemap_section_nr(int nid,
430 struct mem_section_usage *usage)
431{
432}
433#endif
434
435#ifdef CONFIG_SPARSEMEM_VMEMMAP
436static unsigned long __init section_map_size(void)
437{
438 return ALIGN(sizeof(struct page) * PAGES_PER_SECTION, PMD_SIZE);
439}
440
441#else
442static unsigned long __init section_map_size(void)
443{
444 return PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
445}
446
447struct page __init *__populate_section_memmap(unsigned long pfn,
448 unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
449{
450 unsigned long size = section_map_size();
451 struct page *map = sparse_buffer_alloc(size);
452 phys_addr_t addr = __pa(MAX_DMA_ADDRESS);
453
454 if (map)
455 return map;
456
457 map = memblock_alloc_try_nid_raw(size, size, addr,
458 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
459 if (!map)
460 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa\n",
461 __func__, size, PAGE_SIZE, nid, &addr);
462
463 return map;
464}
465#endif
466
467static void *sparsemap_buf __meminitdata;
468static void *sparsemap_buf_end __meminitdata;
469
470static inline void __meminit sparse_buffer_free(unsigned long size)
471{
472 WARN_ON(!sparsemap_buf || size == 0);
473 memblock_free_early(__pa(sparsemap_buf), size);
474}
475
476static void __init sparse_buffer_init(unsigned long size, int nid)
477{
478 phys_addr_t addr = __pa(MAX_DMA_ADDRESS);
479 WARN_ON(sparsemap_buf);
480
481
482
483
484
485 sparsemap_buf = memblock_alloc_exact_nid_raw(size, section_map_size(),
486 addr, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
487 sparsemap_buf_end = sparsemap_buf + size;
488}
489
490static void __init sparse_buffer_fini(void)
491{
492 unsigned long size = sparsemap_buf_end - sparsemap_buf;
493
494 if (sparsemap_buf && size > 0)
495 sparse_buffer_free(size);
496 sparsemap_buf = NULL;
497}
498
499void * __meminit sparse_buffer_alloc(unsigned long size)
500{
501 void *ptr = NULL;
502
503 if (sparsemap_buf) {
504 ptr = (void *) roundup((unsigned long)sparsemap_buf, size);
505 if (ptr + size > sparsemap_buf_end)
506 ptr = NULL;
507 else {
508
509 if ((unsigned long)(ptr - sparsemap_buf) > 0)
510 sparse_buffer_free((unsigned long)(ptr - sparsemap_buf));
511 sparsemap_buf = ptr + size;
512 }
513 }
514 return ptr;
515}
516
517void __weak __meminit vmemmap_populate_print_last(void)
518{
519}
520
521
522
523
524
525static void __init sparse_init_nid(int nid, unsigned long pnum_begin,
526 unsigned long pnum_end,
527 unsigned long map_count)
528{
529 struct mem_section_usage *usage;
530 unsigned long pnum;
531 struct page *map;
532
533 usage = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nid),
534 mem_section_usage_size() * map_count);
535 if (!usage) {
536 pr_err("%s: node[%d] usemap allocation failed", __func__, nid);
537 goto failed;
538 }
539 sparse_buffer_init(map_count * section_map_size(), nid);
540 for_each_present_section_nr(pnum_begin, pnum) {
541 unsigned long pfn = section_nr_to_pfn(pnum);
542
543 if (pnum >= pnum_end)
544 break;
545
546 map = __populate_section_memmap(pfn, PAGES_PER_SECTION,
547 nid, NULL);
548 if (!map) {
549 pr_err("%s: node[%d] memory map backing failed. Some memory will not be available.",
550 __func__, nid);
551 pnum_begin = pnum;
552 goto failed;
553 }
554 check_usemap_section_nr(nid, usage);
555 sparse_init_one_section(__nr_to_section(pnum), pnum, map, usage,
556 SECTION_IS_EARLY);
557 usage = (void *) usage + mem_section_usage_size();
558 }
559 sparse_buffer_fini();
560 return;
561failed:
562
563 for_each_present_section_nr(pnum_begin, pnum) {
564 struct mem_section *ms;
565
566 if (pnum >= pnum_end)
567 break;
568 ms = __nr_to_section(pnum);
569 ms->section_mem_map = 0;
570 }
571}
572
573
574
575
576
577void __init sparse_init(void)
578{
579 unsigned long pnum_begin = first_present_section_nr();
580 int nid_begin = sparse_early_nid(__nr_to_section(pnum_begin));
581 unsigned long pnum_end, map_count = 1;
582
583
584 set_pageblock_order();
585
586 for_each_present_section_nr(pnum_begin + 1, pnum_end) {
587 int nid = sparse_early_nid(__nr_to_section(pnum_end));
588
589 if (nid == nid_begin) {
590 map_count++;
591 continue;
592 }
593
594 sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count);
595 nid_begin = nid;
596 pnum_begin = pnum_end;
597 map_count = 1;
598 }
599
600 sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count);
601 vmemmap_populate_print_last();
602}
603
604#ifdef CONFIG_MEMORY_HOTPLUG
605
606
607void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
608{
609 unsigned long pfn;
610
611 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
612 unsigned long section_nr = pfn_to_section_nr(pfn);
613 struct mem_section *ms;
614
615
616 if (WARN_ON(!valid_section_nr(section_nr)))
617 continue;
618
619 ms = __nr_to_section(section_nr);
620 ms->section_mem_map |= SECTION_IS_ONLINE;
621 }
622}
623
624#ifdef CONFIG_MEMORY_HOTREMOVE
625
626void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
627{
628 unsigned long pfn;
629
630 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
631 unsigned long section_nr = pfn_to_section_nr(pfn);
632 struct mem_section *ms;
633
634
635
636
637
638 if (WARN_ON(!valid_section_nr(section_nr)))
639 continue;
640
641 ms = __nr_to_section(section_nr);
642 ms->section_mem_map &= ~SECTION_IS_ONLINE;
643 }
644}
645#endif
646
647#ifdef CONFIG_SPARSEMEM_VMEMMAP
648static struct page * __meminit populate_section_memmap(unsigned long pfn,
649 unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
650{
651 return __populate_section_memmap(pfn, nr_pages, nid, altmap);
652}
653
654static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
655 struct vmem_altmap *altmap)
656{
657 unsigned long start = (unsigned long) pfn_to_page(pfn);
658 unsigned long end = start + nr_pages * sizeof(struct page);
659
660 vmemmap_free(start, end, altmap);
661}
662static void free_map_bootmem(struct page *memmap)
663{
664 unsigned long start = (unsigned long)memmap;
665 unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
666
667 vmemmap_free(start, end, NULL);
668}
669
670static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
671{
672 DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
673 DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 };
674 struct mem_section *ms = __pfn_to_section(pfn);
675 unsigned long *subsection_map = ms->usage
676 ? &ms->usage->subsection_map[0] : NULL;
677
678 subsection_mask_set(map, pfn, nr_pages);
679 if (subsection_map)
680 bitmap_and(tmp, map, subsection_map, SUBSECTIONS_PER_SECTION);
681
682 if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION),
683 "section already deactivated (%#lx + %ld)\n",
684 pfn, nr_pages))
685 return -EINVAL;
686
687 bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION);
688 return 0;
689}
690
691static bool is_subsection_map_empty(struct mem_section *ms)
692{
693 return bitmap_empty(&ms->usage->subsection_map[0],
694 SUBSECTIONS_PER_SECTION);
695}
696
697static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
698{
699 struct mem_section *ms = __pfn_to_section(pfn);
700 DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
701 unsigned long *subsection_map;
702 int rc = 0;
703
704 subsection_mask_set(map, pfn, nr_pages);
705
706 subsection_map = &ms->usage->subsection_map[0];
707
708 if (bitmap_empty(map, SUBSECTIONS_PER_SECTION))
709 rc = -EINVAL;
710 else if (bitmap_intersects(map, subsection_map, SUBSECTIONS_PER_SECTION))
711 rc = -EEXIST;
712 else
713 bitmap_or(subsection_map, map, subsection_map,
714 SUBSECTIONS_PER_SECTION);
715
716 return rc;
717}
718#else
719struct page * __meminit populate_section_memmap(unsigned long pfn,
720 unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
721{
722 return kvmalloc_node(array_size(sizeof(struct page),
723 PAGES_PER_SECTION), GFP_KERNEL, nid);
724}
725
726static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
727 struct vmem_altmap *altmap)
728{
729 kvfree(pfn_to_page(pfn));
730}
731
732static void free_map_bootmem(struct page *memmap)
733{
734 unsigned long maps_section_nr, removing_section_nr, i;
735 unsigned long magic, nr_pages;
736 struct page *page = virt_to_page(memmap);
737
738 nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
739 >> PAGE_SHIFT;
740
741 for (i = 0; i < nr_pages; i++, page++) {
742 magic = (unsigned long) page->freelist;
743
744 BUG_ON(magic == NODE_INFO);
745
746 maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
747 removing_section_nr = page_private(page);
748
749
750
751
752
753
754
755
756
757 if (maps_section_nr != removing_section_nr)
758 put_page_bootmem(page);
759 }
760}
761
762static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
763{
764 return 0;
765}
766
767static bool is_subsection_map_empty(struct mem_section *ms)
768{
769 return true;
770}
771
772static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
773{
774 return 0;
775}
776#endif
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
795 struct vmem_altmap *altmap)
796{
797 struct mem_section *ms = __pfn_to_section(pfn);
798 bool section_is_early = early_section(ms);
799 struct page *memmap = NULL;
800 bool empty;
801
802 if (clear_subsection_map(pfn, nr_pages))
803 return;
804
805 empty = is_subsection_map_empty(ms);
806 if (empty) {
807 unsigned long section_nr = pfn_to_section_nr(pfn);
808
809
810
811
812
813
814
815
816 if (!PageReserved(virt_to_page(ms->usage))) {
817 kfree(ms->usage);
818 ms->usage = NULL;
819 }
820 memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
821
822
823
824
825
826 ms->section_mem_map &= ~SECTION_HAS_MEM_MAP;
827 }
828
829 if (section_is_early && memmap)
830 free_map_bootmem(memmap);
831 else
832 depopulate_section_memmap(pfn, nr_pages, altmap);
833
834 if (empty)
835 ms->section_mem_map = (unsigned long)NULL;
836}
837
838static struct page * __meminit section_activate(int nid, unsigned long pfn,
839 unsigned long nr_pages, struct vmem_altmap *altmap)
840{
841 struct mem_section *ms = __pfn_to_section(pfn);
842 struct mem_section_usage *usage = NULL;
843 struct page *memmap;
844 int rc = 0;
845
846 if (!ms->usage) {
847 usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
848 if (!usage)
849 return ERR_PTR(-ENOMEM);
850 ms->usage = usage;
851 }
852
853 rc = fill_subsection_map(pfn, nr_pages);
854 if (rc) {
855 if (usage)
856 ms->usage = NULL;
857 kfree(usage);
858 return ERR_PTR(rc);
859 }
860
861
862
863
864
865
866
867
868 if (nr_pages < PAGES_PER_SECTION && early_section(ms))
869 return pfn_to_page(pfn);
870
871 memmap = populate_section_memmap(pfn, nr_pages, nid, altmap);
872 if (!memmap) {
873 section_deactivate(pfn, nr_pages, altmap);
874 return ERR_PTR(-ENOMEM);
875 }
876
877 return memmap;
878}
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898int __meminit sparse_add_section(int nid, unsigned long start_pfn,
899 unsigned long nr_pages, struct vmem_altmap *altmap)
900{
901 unsigned long section_nr = pfn_to_section_nr(start_pfn);
902 struct mem_section *ms;
903 struct page *memmap;
904 int ret;
905
906 ret = sparse_index_init(section_nr, nid);
907 if (ret < 0)
908 return ret;
909
910 memmap = section_activate(nid, start_pfn, nr_pages, altmap);
911 if (IS_ERR(memmap))
912 return PTR_ERR(memmap);
913
914
915
916
917
918 page_init_poison(memmap, sizeof(struct page) * nr_pages);
919
920 ms = __nr_to_section(section_nr);
921 set_section_nid(section_nr, nid);
922 section_mark_present(ms);
923
924
925 if (section_nr_to_pfn(section_nr) != start_pfn)
926 memmap = pfn_to_page(section_nr_to_pfn(section_nr));
927 sparse_init_one_section(ms, section_nr, memmap, ms->usage, 0);
928
929 return 0;
930}
931
932#ifdef CONFIG_MEMORY_FAILURE
933static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
934{
935 int i;
936
937
938
939
940
941
942
943 if (atomic_long_read(&num_poisoned_pages) == 0)
944 return;
945
946 for (i = 0; i < nr_pages; i++) {
947 if (PageHWPoison(&memmap[i])) {
948 num_poisoned_pages_dec();
949 ClearPageHWPoison(&memmap[i]);
950 }
951 }
952}
953#else
954static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
955{
956}
957#endif
958
959void sparse_remove_section(struct mem_section *ms, unsigned long pfn,
960 unsigned long nr_pages, unsigned long map_offset,
961 struct vmem_altmap *altmap)
962{
963 clear_hwpoisoned_pages(pfn_to_page(pfn) + map_offset,
964 nr_pages - map_offset);
965 section_deactivate(pfn, nr_pages, altmap);
966}
967#endif
968