linux/mm/sparse.c
<<
>>
Prefs
   1/*
   2 * sparse memory mappings.
   3 */
   4#include <linux/mm.h>
   5#include <linux/slab.h>
   6#include <linux/mmzone.h>
   7#include <linux/bootmem.h>
   8#include <linux/compiler.h>
   9#include <linux/highmem.h>
  10#include <linux/export.h>
  11#include <linux/spinlock.h>
  12#include <linux/vmalloc.h>
  13
  14#include "internal.h"
  15#include <asm/dma.h>
  16#include <asm/pgalloc.h>
  17#include <asm/pgtable.h>
  18
  19/*
  20 * Permanent SPARSEMEM data:
  21 *
  22 * 1) mem_section       - memory sections, mem_map's for valid memory
  23 */
  24#ifdef CONFIG_SPARSEMEM_EXTREME
  25struct mem_section *mem_section[NR_SECTION_ROOTS]
  26        ____cacheline_internodealigned_in_smp;
  27#else
  28struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
  29        ____cacheline_internodealigned_in_smp;
  30#endif
  31EXPORT_SYMBOL(mem_section);
  32
  33#ifdef NODE_NOT_IN_PAGE_FLAGS
  34/*
  35 * If we did not store the node number in the page then we have to
  36 * do a lookup in the section_to_node_table in order to find which
  37 * node the page belongs to.
  38 */
  39#if MAX_NUMNODES <= 256
  40static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
  41#else
  42static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
  43#endif
  44
  45int page_to_nid(const struct page *page)
  46{
  47        return section_to_node_table[page_to_section(page)];
  48}
  49EXPORT_SYMBOL(page_to_nid);
  50
  51static void set_section_nid(unsigned long section_nr, int nid)
  52{
  53        section_to_node_table[section_nr] = nid;
  54}
  55#else /* !NODE_NOT_IN_PAGE_FLAGS */
  56static inline void set_section_nid(unsigned long section_nr, int nid)
  57{
  58}
  59#endif
  60
  61#ifdef CONFIG_SPARSEMEM_EXTREME
  62static struct mem_section noinline __init_refok *sparse_index_alloc(int nid)
  63{
  64        struct mem_section *section = NULL;
  65        unsigned long array_size = SECTIONS_PER_ROOT *
  66                                   sizeof(struct mem_section);
  67
  68        if (slab_is_available()) {
  69                if (node_state(nid, N_HIGH_MEMORY))
  70                        section = kzalloc_node(array_size, GFP_KERNEL, nid);
  71                else
  72                        section = kzalloc(array_size, GFP_KERNEL);
  73        } else {
  74                section = memblock_virt_alloc_node(array_size, nid);
  75        }
  76
  77        return section;
  78}
  79
  80static int __meminit sparse_index_init(unsigned long section_nr, int nid)
  81{
  82        unsigned long root = SECTION_NR_TO_ROOT(section_nr);
  83        struct mem_section *section;
  84
  85        if (mem_section[root])
  86                return -EEXIST;
  87
  88        section = sparse_index_alloc(nid);
  89        if (!section)
  90                return -ENOMEM;
  91
  92        mem_section[root] = section;
  93
  94        return 0;
  95}
  96#else /* !SPARSEMEM_EXTREME */
  97static inline int sparse_index_init(unsigned long section_nr, int nid)
  98{
  99        return 0;
 100}
 101#endif
 102
 103/*
 104 * Although written for the SPARSEMEM_EXTREME case, this happens
 105 * to also work for the flat array case because
 106 * NR_SECTION_ROOTS==NR_MEM_SECTIONS.
 107 */
 108int __section_nr(struct mem_section* ms)
 109{
 110        unsigned long root_nr;
 111        struct mem_section* root;
 112
 113        for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
 114                root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
 115                if (!root)
 116                        continue;
 117
 118                if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
 119                     break;
 120        }
 121
 122        VM_BUG_ON(root_nr == NR_SECTION_ROOTS);
 123
 124        return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
 125}
 126
 127/*
 128 * During early boot, before section_mem_map is used for an actual
 129 * mem_map, we use section_mem_map to store the section's NUMA
 130 * node.  This keeps us from having to use another data structure.  The
 131 * node information is cleared just before we store the real mem_map.
 132 */
 133static inline unsigned long sparse_encode_early_nid(int nid)
 134{
 135        return (nid << SECTION_NID_SHIFT);
 136}
 137
 138static inline int sparse_early_nid(struct mem_section *section)
 139{
 140        return (section->section_mem_map >> SECTION_NID_SHIFT);
 141}
 142
 143/* Validate the physical addressing limitations of the model */
 144void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
 145                                                unsigned long *end_pfn)
 146{
 147        unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
 148
 149        /*
 150         * Sanity checks - do not allow an architecture to pass
 151         * in larger pfns than the maximum scope of sparsemem:
 152         */
 153        if (*start_pfn > max_sparsemem_pfn) {
 154                mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
 155                        "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
 156                        *start_pfn, *end_pfn, max_sparsemem_pfn);
 157                WARN_ON_ONCE(1);
 158                *start_pfn = max_sparsemem_pfn;
 159                *end_pfn = max_sparsemem_pfn;
 160        } else if (*end_pfn > max_sparsemem_pfn) {
 161                mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
 162                        "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
 163                        *start_pfn, *end_pfn, max_sparsemem_pfn);
 164                WARN_ON_ONCE(1);
 165                *end_pfn = max_sparsemem_pfn;
 166        }
 167}
 168
 169/* Record a memory area against a node. */
 170void __init memory_present(int nid, unsigned long start, unsigned long end)
 171{
 172        unsigned long pfn;
 173
 174        start &= PAGE_SECTION_MASK;
 175        mminit_validate_memmodel_limits(&start, &end);
 176        for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
 177                unsigned long section = pfn_to_section_nr(pfn);
 178                struct mem_section *ms;
 179
 180                sparse_index_init(section, nid);
 181                set_section_nid(section, nid);
 182
 183                ms = __nr_to_section(section);
 184                if (!ms->section_mem_map)
 185                        ms->section_mem_map = sparse_encode_early_nid(nid) |
 186                                                        SECTION_MARKED_PRESENT;
 187        }
 188}
 189
 190/*
 191 * Only used by the i386 NUMA architecures, but relatively
 192 * generic code.
 193 */
 194unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
 195                                                     unsigned long end_pfn)
 196{
 197        unsigned long pfn;
 198        unsigned long nr_pages = 0;
 199
 200        mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
 201        for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
 202                if (nid != early_pfn_to_nid(pfn))
 203                        continue;
 204
 205                if (pfn_present(pfn))
 206                        nr_pages += PAGES_PER_SECTION;
 207        }
 208
 209        return nr_pages * sizeof(struct page);
 210}
 211
 212/*
 213 * Subtle, we encode the real pfn into the mem_map such that
 214 * the identity pfn - section_mem_map will return the actual
 215 * physical page frame number.
 216 */
 217static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
 218{
 219        return (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
 220}
 221
 222/*
 223 * Decode mem_map from the coded memmap
 224 */
 225struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
 226{
 227        /* mask off the extra low bits of information */
 228        coded_mem_map &= SECTION_MAP_MASK;
 229        return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
 230}
 231
 232static int __meminit sparse_init_one_section(struct mem_section *ms,
 233                unsigned long pnum, struct page *mem_map,
 234                unsigned long *pageblock_bitmap)
 235{
 236        if (!present_section(ms))
 237                return -EINVAL;
 238
 239        ms->section_mem_map &= ~SECTION_MAP_MASK;
 240        ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) |
 241                                                        SECTION_HAS_MEM_MAP;
 242        ms->pageblock_flags = pageblock_bitmap;
 243
 244        return 1;
 245}
 246
 247unsigned long usemap_size(void)
 248{
 249        unsigned long size_bytes;
 250        size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8;
 251        size_bytes = roundup(size_bytes, sizeof(unsigned long));
 252        return size_bytes;
 253}
 254
 255#ifdef CONFIG_MEMORY_HOTPLUG
 256static unsigned long *__kmalloc_section_usemap(void)
 257{
 258        return kmalloc(usemap_size(), GFP_KERNEL);
 259}
 260#endif /* CONFIG_MEMORY_HOTPLUG */
 261
 262#ifdef CONFIG_MEMORY_HOTREMOVE
 263static unsigned long * __init
 264sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
 265                                         unsigned long size)
 266{
 267        unsigned long goal, limit;
 268        unsigned long *p;
 269        int nid;
 270        /*
 271         * A page may contain usemaps for other sections preventing the
 272         * page being freed and making a section unremovable while
 273         * other sections referencing the usemap remain active. Similarly,
 274         * a pgdat can prevent a section being removed. If section A
 275         * contains a pgdat and section B contains the usemap, both
 276         * sections become inter-dependent. This allocates usemaps
 277         * from the same section as the pgdat where possible to avoid
 278         * this problem.
 279         */
 280        goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
 281        limit = goal + (1UL << PA_SECTION_SHIFT);
 282        nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
 283again:
 284        p = memblock_virt_alloc_try_nid_nopanic(size,
 285                                                SMP_CACHE_BYTES, goal, limit,
 286                                                nid);
 287        if (!p && limit) {
 288                limit = 0;
 289                goto again;
 290        }
 291        return p;
 292}
 293
 294static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
 295{
 296        unsigned long usemap_snr, pgdat_snr;
 297        static unsigned long old_usemap_snr = NR_MEM_SECTIONS;
 298        static unsigned long old_pgdat_snr = NR_MEM_SECTIONS;
 299        struct pglist_data *pgdat = NODE_DATA(nid);
 300        int usemap_nid;
 301
 302        usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT);
 303        pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
 304        if (usemap_snr == pgdat_snr)
 305                return;
 306
 307        if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr)
 308                /* skip redundant message */
 309                return;
 310
 311        old_usemap_snr = usemap_snr;
 312        old_pgdat_snr = pgdat_snr;
 313
 314        usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr));
 315        if (usemap_nid != nid) {
 316                pr_info("node %d must be removed before remove section %ld\n",
 317                        nid, usemap_snr);
 318                return;
 319        }
 320        /*
 321         * There is a circular dependency.
 322         * Some platforms allow un-removable section because they will just
 323         * gather other removable sections for dynamic partitioning.
 324         * Just notify un-removable section's number here.
 325         */
 326        pr_info("Section %ld and %ld (node %d) have a circular dependency on usemap and pgdat allocations\n",
 327                usemap_snr, pgdat_snr, nid);
 328}
 329#else
 330static unsigned long * __init
 331sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
 332                                         unsigned long size)
 333{
 334        return memblock_virt_alloc_node_nopanic(size, pgdat->node_id);
 335}
 336
 337static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
 338{
 339}
 340#endif /* CONFIG_MEMORY_HOTREMOVE */
 341
 342static void __init sparse_early_usemaps_alloc_node(void *data,
 343                                 unsigned long pnum_begin,
 344                                 unsigned long pnum_end,
 345                                 unsigned long usemap_count, int nodeid)
 346{
 347        void *usemap;
 348        unsigned long pnum;
 349        unsigned long **usemap_map = (unsigned long **)data;
 350        int size = usemap_size();
 351
 352        usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid),
 353                                                          size * usemap_count);
 354        if (!usemap) {
 355                pr_warn("%s: allocation failed\n", __func__);
 356                return;
 357        }
 358
 359        for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
 360                if (!present_section_nr(pnum))
 361                        continue;
 362                usemap_map[pnum] = usemap;
 363                usemap += size;
 364                check_usemap_section_nr(nodeid, usemap_map[pnum]);
 365        }
 366}
 367
 368#ifndef CONFIG_SPARSEMEM_VMEMMAP
 369struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
 370{
 371        struct page *map;
 372        unsigned long size;
 373
 374        map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
 375        if (map)
 376                return map;
 377
 378        size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
 379        map = memblock_virt_alloc_try_nid(size,
 380                                          PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
 381                                          BOOTMEM_ALLOC_ACCESSIBLE, nid);
 382        return map;
 383}
 384void __init sparse_mem_maps_populate_node(struct page **map_map,
 385                                          unsigned long pnum_begin,
 386                                          unsigned long pnum_end,
 387                                          unsigned long map_count, int nodeid)
 388{
 389        void *map;
 390        unsigned long pnum;
 391        unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;
 392
 393        map = alloc_remap(nodeid, size * map_count);
 394        if (map) {
 395                for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
 396                        if (!present_section_nr(pnum))
 397                                continue;
 398                        map_map[pnum] = map;
 399                        map += size;
 400                }
 401                return;
 402        }
 403
 404        size = PAGE_ALIGN(size);
 405        map = memblock_virt_alloc_try_nid(size * map_count,
 406                                          PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
 407                                          BOOTMEM_ALLOC_ACCESSIBLE, nodeid);
 408        if (map) {
 409                for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
 410                        if (!present_section_nr(pnum))
 411                                continue;
 412                        map_map[pnum] = map;
 413                        map += size;
 414                }
 415                return;
 416        }
 417
 418        /* fallback */
 419        for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
 420                struct mem_section *ms;
 421
 422                if (!present_section_nr(pnum))
 423                        continue;
 424                map_map[pnum] = sparse_mem_map_populate(pnum, nodeid);
 425                if (map_map[pnum])
 426                        continue;
 427                ms = __nr_to_section(pnum);
 428                pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
 429                       __func__);
 430                ms->section_mem_map = 0;
 431        }
 432}
 433#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
 434
 435#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
 436static void __init sparse_early_mem_maps_alloc_node(void *data,
 437                                 unsigned long pnum_begin,
 438                                 unsigned long pnum_end,
 439                                 unsigned long map_count, int nodeid)
 440{
 441        struct page **map_map = (struct page **)data;
 442        sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end,
 443                                         map_count, nodeid);
 444}
 445#else
 446static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
 447{
 448        struct page *map;
 449        struct mem_section *ms = __nr_to_section(pnum);
 450        int nid = sparse_early_nid(ms);
 451
 452        map = sparse_mem_map_populate(pnum, nid);
 453        if (map)
 454                return map;
 455
 456        pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
 457               __func__);
 458        ms->section_mem_map = 0;
 459        return NULL;
 460}
 461#endif
 462
 463void __weak __meminit vmemmap_populate_print_last(void)
 464{
 465}
 466
 467/**
 468 *  alloc_usemap_and_memmap - memory alloction for pageblock flags and vmemmap
 469 *  @map: usemap_map for pageblock flags or mmap_map for vmemmap
 470 */
 471static void __init alloc_usemap_and_memmap(void (*alloc_func)
 472                                        (void *, unsigned long, unsigned long,
 473                                        unsigned long, int), void *data)
 474{
 475        unsigned long pnum;
 476        unsigned long map_count;
 477        int nodeid_begin = 0;
 478        unsigned long pnum_begin = 0;
 479
 480        for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
 481                struct mem_section *ms;
 482
 483                if (!present_section_nr(pnum))
 484                        continue;
 485                ms = __nr_to_section(pnum);
 486                nodeid_begin = sparse_early_nid(ms);
 487                pnum_begin = pnum;
 488                break;
 489        }
 490        map_count = 1;
 491        for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
 492                struct mem_section *ms;
 493                int nodeid;
 494
 495                if (!present_section_nr(pnum))
 496                        continue;
 497                ms = __nr_to_section(pnum);
 498                nodeid = sparse_early_nid(ms);
 499                if (nodeid == nodeid_begin) {
 500                        map_count++;
 501                        continue;
 502                }
 503                /* ok, we need to take cake of from pnum_begin to pnum - 1*/
 504                alloc_func(data, pnum_begin, pnum,
 505                                                map_count, nodeid_begin);
 506                /* new start, update count etc*/
 507                nodeid_begin = nodeid;
 508                pnum_begin = pnum;
 509                map_count = 1;
 510        }
 511        /* ok, last chunk */
 512        alloc_func(data, pnum_begin, NR_MEM_SECTIONS,
 513                                                map_count, nodeid_begin);
 514}
 515
 516/*
 517 * Allocate the accumulated non-linear sections, allocate a mem_map
 518 * for each and record the physical to section mapping.
 519 */
 520void __init sparse_init(void)
 521{
 522        unsigned long pnum;
 523        struct page *map;
 524        unsigned long *usemap;
 525        unsigned long **usemap_map;
 526        int size;
 527#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
 528        int size2;
 529        struct page **map_map;
 530#endif
 531
 532        /* see include/linux/mmzone.h 'struct mem_section' definition */
 533        BUILD_BUG_ON(!is_power_of_2(sizeof(struct mem_section)));
 534
 535        /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */
 536        set_pageblock_order();
 537
 538        /*
 539         * map is using big page (aka 2M in x86 64 bit)
 540         * usemap is less one page (aka 24 bytes)
 541         * so alloc 2M (with 2M align) and 24 bytes in turn will
 542         * make next 2M slip to one more 2M later.
 543         * then in big system, the memory will have a lot of holes...
 544         * here try to allocate 2M pages continuously.
 545         *
 546         * powerpc need to call sparse_init_one_section right after each
 547         * sparse_early_mem_map_alloc, so allocate usemap_map at first.
 548         */
 549        size = sizeof(unsigned long *) * NR_MEM_SECTIONS;
 550        usemap_map = memblock_virt_alloc(size, 0);
 551        if (!usemap_map)
 552                panic("can not allocate usemap_map\n");
 553        alloc_usemap_and_memmap(sparse_early_usemaps_alloc_node,
 554                                                        (void *)usemap_map);
 555
 556#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
 557        size2 = sizeof(struct page *) * NR_MEM_SECTIONS;
 558        map_map = memblock_virt_alloc(size2, 0);
 559        if (!map_map)
 560                panic("can not allocate map_map\n");
 561        alloc_usemap_and_memmap(sparse_early_mem_maps_alloc_node,
 562                                                        (void *)map_map);
 563#endif
 564
 565        for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
 566                if (!present_section_nr(pnum))
 567                        continue;
 568
 569                usemap = usemap_map[pnum];
 570                if (!usemap)
 571                        continue;
 572
 573#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
 574                map = map_map[pnum];
 575#else
 576                map = sparse_early_mem_map_alloc(pnum);
 577#endif
 578                if (!map)
 579                        continue;
 580
 581                sparse_init_one_section(__nr_to_section(pnum), pnum, map,
 582                                                                usemap);
 583        }
 584
 585        vmemmap_populate_print_last();
 586
 587#ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
 588        memblock_free_early(__pa(map_map), size2);
 589#endif
 590        memblock_free_early(__pa(usemap_map), size);
 591}
 592
 593#ifdef CONFIG_MEMORY_HOTPLUG
 594#ifdef CONFIG_SPARSEMEM_VMEMMAP
 595static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid)
 596{
 597        /* This will make the necessary allocations eventually. */
 598        return sparse_mem_map_populate(pnum, nid);
 599}
 600static void __kfree_section_memmap(struct page *memmap)
 601{
 602        unsigned long start = (unsigned long)memmap;
 603        unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
 604
 605        vmemmap_free(start, end);
 606}
 607#ifdef CONFIG_MEMORY_HOTREMOVE
 608static void free_map_bootmem(struct page *memmap)
 609{
 610        unsigned long start = (unsigned long)memmap;
 611        unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
 612
 613        vmemmap_free(start, end);
 614}
 615#endif /* CONFIG_MEMORY_HOTREMOVE */
 616#else
 617static struct page *__kmalloc_section_memmap(void)
 618{
 619        struct page *page, *ret;
 620        unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION;
 621
 622        page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
 623        if (page)
 624                goto got_map_page;
 625
 626        ret = vmalloc(memmap_size);
 627        if (ret)
 628                goto got_map_ptr;
 629
 630        return NULL;
 631got_map_page:
 632        ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
 633got_map_ptr:
 634
 635        return ret;
 636}
 637
 638static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid)
 639{
 640        return __kmalloc_section_memmap();
 641}
 642
 643static void __kfree_section_memmap(struct page *memmap)
 644{
 645        if (is_vmalloc_addr(memmap))
 646                vfree(memmap);
 647        else
 648                free_pages((unsigned long)memmap,
 649                           get_order(sizeof(struct page) * PAGES_PER_SECTION));
 650}
 651
 652#ifdef CONFIG_MEMORY_HOTREMOVE
 653static void free_map_bootmem(struct page *memmap)
 654{
 655        unsigned long maps_section_nr, removing_section_nr, i;
 656        unsigned long magic, nr_pages;
 657        struct page *page = virt_to_page(memmap);
 658
 659        nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
 660                >> PAGE_SHIFT;
 661
 662        for (i = 0; i < nr_pages; i++, page++) {
 663                magic = (unsigned long) page->lru.next;
 664
 665                BUG_ON(magic == NODE_INFO);
 666
 667                maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
 668                removing_section_nr = page->private;
 669
 670                /*
 671                 * When this function is called, the removing section is
 672                 * logical offlined state. This means all pages are isolated
 673                 * from page allocator. If removing section's memmap is placed
 674                 * on the same section, it must not be freed.
 675                 * If it is freed, page allocator may allocate it which will
 676                 * be removed physically soon.
 677                 */
 678                if (maps_section_nr != removing_section_nr)
 679                        put_page_bootmem(page);
 680        }
 681}
 682#endif /* CONFIG_MEMORY_HOTREMOVE */
 683#endif /* CONFIG_SPARSEMEM_VMEMMAP */
 684
 685/*
 686 * returns the number of sections whose mem_maps were properly
 687 * set.  If this is <=0, then that means that the passed-in
 688 * map was not consumed and must be freed.
 689 */
 690int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn)
 691{
 692        unsigned long section_nr = pfn_to_section_nr(start_pfn);
 693        struct pglist_data *pgdat = zone->zone_pgdat;
 694        struct mem_section *ms;
 695        struct page *memmap;
 696        unsigned long *usemap;
 697        unsigned long flags;
 698        int ret;
 699
 700        /*
 701         * no locking for this, because it does its own
 702         * plus, it does a kmalloc
 703         */
 704        ret = sparse_index_init(section_nr, pgdat->node_id);
 705        if (ret < 0 && ret != -EEXIST)
 706                return ret;
 707        memmap = kmalloc_section_memmap(section_nr, pgdat->node_id);
 708        if (!memmap)
 709                return -ENOMEM;
 710        usemap = __kmalloc_section_usemap();
 711        if (!usemap) {
 712                __kfree_section_memmap(memmap);
 713                return -ENOMEM;
 714        }
 715
 716        pgdat_resize_lock(pgdat, &flags);
 717
 718        ms = __pfn_to_section(start_pfn);
 719        if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
 720                ret = -EEXIST;
 721                goto out;
 722        }
 723
 724        memset(memmap, 0, sizeof(struct page) * PAGES_PER_SECTION);
 725
 726        ms->section_mem_map |= SECTION_MARKED_PRESENT;
 727
 728        ret = sparse_init_one_section(ms, section_nr, memmap, usemap);
 729
 730out:
 731        pgdat_resize_unlock(pgdat, &flags);
 732        if (ret <= 0) {
 733                kfree(usemap);
 734                __kfree_section_memmap(memmap);
 735        }
 736        return ret;
 737}
 738
 739#ifdef CONFIG_MEMORY_HOTREMOVE
 740#ifdef CONFIG_MEMORY_FAILURE
 741static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
 742{
 743        int i;
 744
 745        if (!memmap)
 746                return;
 747
 748        for (i = 0; i < nr_pages; i++) {
 749                if (PageHWPoison(&memmap[i])) {
 750                        atomic_long_sub(1, &num_poisoned_pages);
 751                        ClearPageHWPoison(&memmap[i]);
 752                }
 753        }
 754}
 755#else
 756static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
 757{
 758}
 759#endif
 760
 761static void free_section_usemap(struct page *memmap, unsigned long *usemap)
 762{
 763        struct page *usemap_page;
 764
 765        if (!usemap)
 766                return;
 767
 768        usemap_page = virt_to_page(usemap);
 769        /*
 770         * Check to see if allocation came from hot-plug-add
 771         */
 772        if (PageSlab(usemap_page) || PageCompound(usemap_page)) {
 773                kfree(usemap);
 774                if (memmap)
 775                        __kfree_section_memmap(memmap);
 776                return;
 777        }
 778
 779        /*
 780         * The usemap came from bootmem. This is packed with other usemaps
 781         * on the section which has pgdat at boot time. Just keep it as is now.
 782         */
 783
 784        if (memmap)
 785                free_map_bootmem(memmap);
 786}
 787
 788void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
 789                unsigned long map_offset)
 790{
 791        struct page *memmap = NULL;
 792        unsigned long *usemap = NULL, flags;
 793        struct pglist_data *pgdat = zone->zone_pgdat;
 794
 795        pgdat_resize_lock(pgdat, &flags);
 796        if (ms->section_mem_map) {
 797                usemap = ms->pageblock_flags;
 798                memmap = sparse_decode_mem_map(ms->section_mem_map,
 799                                                __section_nr(ms));
 800                ms->section_mem_map = 0;
 801                ms->pageblock_flags = NULL;
 802        }
 803        pgdat_resize_unlock(pgdat, &flags);
 804
 805        clear_hwpoisoned_pages(memmap + map_offset,
 806                        PAGES_PER_SECTION - map_offset);
 807        free_section_usemap(memmap, usemap);
 808}
 809#endif /* CONFIG_MEMORY_HOTREMOVE */
 810#endif /* CONFIG_MEMORY_HOTPLUG */
 811