linux/mm/memory_hotplug.c
<<
>>
Prefs
   1/*
   2 *  linux/mm/memory_hotplug.c
   3 *
   4 *  Copyright (C)
   5 */
   6
   7#include <linux/stddef.h>
   8#include <linux/mm.h>
   9#include <linux/sched/signal.h>
  10#include <linux/swap.h>
  11#include <linux/interrupt.h>
  12#include <linux/pagemap.h>
  13#include <linux/compiler.h>
  14#include <linux/export.h>
  15#include <linux/pagevec.h>
  16#include <linux/writeback.h>
  17#include <linux/slab.h>
  18#include <linux/sysctl.h>
  19#include <linux/cpu.h>
  20#include <linux/memory.h>
  21#include <linux/memremap.h>
  22#include <linux/memory_hotplug.h>
  23#include <linux/highmem.h>
  24#include <linux/vmalloc.h>
  25#include <linux/ioport.h>
  26#include <linux/delay.h>
  27#include <linux/migrate.h>
  28#include <linux/page-isolation.h>
  29#include <linux/pfn.h>
  30#include <linux/suspend.h>
  31#include <linux/mm_inline.h>
  32#include <linux/firmware-map.h>
  33#include <linux/stop_machine.h>
  34#include <linux/hugetlb.h>
  35#include <linux/memblock.h>
  36#include <linux/bootmem.h>
  37#include <linux/compaction.h>
  38
  39#include <asm/tlbflush.h>
  40
  41#include "internal.h"
  42
  43/*
  44 * online_page_callback contains pointer to current page onlining function.
  45 * Initially it is generic_online_page(). If it is required it could be
  46 * changed by calling set_online_page_callback() for callback registration
  47 * and restore_online_page_callback() for generic callback restore.
  48 */
  49
  50static void generic_online_page(struct page *page);
  51
  52static online_page_callback_t online_page_callback = generic_online_page;
  53static DEFINE_MUTEX(online_page_callback_lock);
  54
  55DEFINE_STATIC_PERCPU_RWSEM(mem_hotplug_lock);
  56
  57void get_online_mems(void)
  58{
  59        percpu_down_read(&mem_hotplug_lock);
  60}
  61
  62void put_online_mems(void)
  63{
  64        percpu_up_read(&mem_hotplug_lock);
  65}
  66
  67bool movable_node_enabled = false;
  68
  69#ifndef CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE
  70bool memhp_auto_online;
  71#else
  72bool memhp_auto_online = true;
  73#endif
  74EXPORT_SYMBOL_GPL(memhp_auto_online);
  75
  76static int __init setup_memhp_default_state(char *str)
  77{
  78        if (!strcmp(str, "online"))
  79                memhp_auto_online = true;
  80        else if (!strcmp(str, "offline"))
  81                memhp_auto_online = false;
  82
  83        return 1;
  84}
  85__setup("memhp_default_state=", setup_memhp_default_state);
  86
  87void mem_hotplug_begin(void)
  88{
  89        cpus_read_lock();
  90        percpu_down_write(&mem_hotplug_lock);
  91}
  92
  93void mem_hotplug_done(void)
  94{
  95        percpu_up_write(&mem_hotplug_lock);
  96        cpus_read_unlock();
  97}
  98
  99/* add this memory to iomem resource */
 100static struct resource *register_memory_resource(u64 start, u64 size)
 101{
 102        struct resource *res, *conflict;
 103        res = kzalloc(sizeof(struct resource), GFP_KERNEL);
 104        if (!res)
 105                return ERR_PTR(-ENOMEM);
 106
 107        res->name = "System RAM";
 108        res->start = start;
 109        res->end = start + size - 1;
 110        res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
 111        conflict =  request_resource_conflict(&iomem_resource, res);
 112        if (conflict) {
 113                if (conflict->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) {
 114                        pr_debug("Device unaddressable memory block "
 115                                 "memory hotplug at %#010llx !\n",
 116                                 (unsigned long long)start);
 117                }
 118                pr_debug("System RAM resource %pR cannot be added\n", res);
 119                kfree(res);
 120                return ERR_PTR(-EEXIST);
 121        }
 122        return res;
 123}
 124
 125static void release_memory_resource(struct resource *res)
 126{
 127        if (!res)
 128                return;
 129        release_resource(res);
 130        kfree(res);
 131        return;
 132}
 133
 134#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
 135void get_page_bootmem(unsigned long info,  struct page *page,
 136                      unsigned long type)
 137{
 138        page->freelist = (void *)type;
 139        SetPagePrivate(page);
 140        set_page_private(page, info);
 141        page_ref_inc(page);
 142}
 143
 144void put_page_bootmem(struct page *page)
 145{
 146        unsigned long type;
 147
 148        type = (unsigned long) page->freelist;
 149        BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
 150               type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
 151
 152        if (page_ref_dec_return(page) == 1) {
 153                page->freelist = NULL;
 154                ClearPagePrivate(page);
 155                set_page_private(page, 0);
 156                INIT_LIST_HEAD(&page->lru);
 157                free_reserved_page(page);
 158        }
 159}
 160
 161#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
 162#ifndef CONFIG_SPARSEMEM_VMEMMAP
 163static void register_page_bootmem_info_section(unsigned long start_pfn)
 164{
 165        unsigned long *usemap, mapsize, section_nr, i;
 166        struct mem_section *ms;
 167        struct page *page, *memmap;
 168
 169        section_nr = pfn_to_section_nr(start_pfn);
 170        ms = __nr_to_section(section_nr);
 171
 172        /* Get section's memmap address */
 173        memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
 174
 175        /*
 176         * Get page for the memmap's phys address
 177         * XXX: need more consideration for sparse_vmemmap...
 178         */
 179        page = virt_to_page(memmap);
 180        mapsize = sizeof(struct page) * PAGES_PER_SECTION;
 181        mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
 182
 183        /* remember memmap's page */
 184        for (i = 0; i < mapsize; i++, page++)
 185                get_page_bootmem(section_nr, page, SECTION_INFO);
 186
 187        usemap = ms->pageblock_flags;
 188        page = virt_to_page(usemap);
 189
 190        mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
 191
 192        for (i = 0; i < mapsize; i++, page++)
 193                get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
 194
 195}
 196#else /* CONFIG_SPARSEMEM_VMEMMAP */
 197static void register_page_bootmem_info_section(unsigned long start_pfn)
 198{
 199        unsigned long *usemap, mapsize, section_nr, i;
 200        struct mem_section *ms;
 201        struct page *page, *memmap;
 202
 203        section_nr = pfn_to_section_nr(start_pfn);
 204        ms = __nr_to_section(section_nr);
 205
 206        memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
 207
 208        register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION);
 209
 210        usemap = ms->pageblock_flags;
 211        page = virt_to_page(usemap);
 212
 213        mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
 214
 215        for (i = 0; i < mapsize; i++, page++)
 216                get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
 217}
 218#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
 219
 220void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
 221{
 222        unsigned long i, pfn, end_pfn, nr_pages;
 223        int node = pgdat->node_id;
 224        struct page *page;
 225
 226        nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
 227        page = virt_to_page(pgdat);
 228
 229        for (i = 0; i < nr_pages; i++, page++)
 230                get_page_bootmem(node, page, NODE_INFO);
 231
 232        pfn = pgdat->node_start_pfn;
 233        end_pfn = pgdat_end_pfn(pgdat);
 234
 235        /* register section info */
 236        for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
 237                /*
 238                 * Some platforms can assign the same pfn to multiple nodes - on
 239                 * node0 as well as nodeN.  To avoid registering a pfn against
 240                 * multiple nodes we check that this pfn does not already
 241                 * reside in some other nodes.
 242                 */
 243                if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node))
 244                        register_page_bootmem_info_section(pfn);
 245        }
 246}
 247#endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */
 248
 249static int __meminit __add_section(int nid, unsigned long phys_start_pfn,
 250                struct vmem_altmap *altmap, bool want_memblock)
 251{
 252        int ret;
 253
 254        if (pfn_valid(phys_start_pfn))
 255                return -EEXIST;
 256
 257        ret = sparse_add_one_section(NODE_DATA(nid), phys_start_pfn, altmap);
 258        if (ret < 0)
 259                return ret;
 260
 261        if (!want_memblock)
 262                return 0;
 263
 264        return hotplug_memory_register(nid, __pfn_to_section(phys_start_pfn));
 265}
 266
 267/*
 268 * Reasonably generic function for adding memory.  It is
 269 * expected that archs that support memory hotplug will
 270 * call this function after deciding the zone to which to
 271 * add the new pages.
 272 */
 273int __ref __add_pages(int nid, unsigned long phys_start_pfn,
 274                unsigned long nr_pages, struct vmem_altmap *altmap,
 275                bool want_memblock)
 276{
 277        unsigned long i;
 278        int err = 0;
 279        int start_sec, end_sec;
 280
 281        /* during initialize mem_map, align hot-added range to section */
 282        start_sec = pfn_to_section_nr(phys_start_pfn);
 283        end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
 284
 285        if (altmap) {
 286                /*
 287                 * Validate altmap is within bounds of the total request
 288                 */
 289                if (altmap->base_pfn != phys_start_pfn
 290                                || vmem_altmap_offset(altmap) > nr_pages) {
 291                        pr_warn_once("memory add fail, invalid altmap\n");
 292                        err = -EINVAL;
 293                        goto out;
 294                }
 295                altmap->alloc = 0;
 296        }
 297
 298        for (i = start_sec; i <= end_sec; i++) {
 299                err = __add_section(nid, section_nr_to_pfn(i), altmap,
 300                                want_memblock);
 301
 302                /*
 303                 * EEXIST is finally dealt with by ioresource collision
 304                 * check. see add_memory() => register_memory_resource()
 305                 * Warning will be printed if there is collision.
 306                 */
 307                if (err && (err != -EEXIST))
 308                        break;
 309                err = 0;
 310                cond_resched();
 311        }
 312        vmemmap_populate_print_last();
 313out:
 314        return err;
 315}
 316
 317#ifdef CONFIG_MEMORY_HOTREMOVE
 318/* find the smallest valid pfn in the range [start_pfn, end_pfn) */
 319static unsigned long find_smallest_section_pfn(int nid, struct zone *zone,
 320                                     unsigned long start_pfn,
 321                                     unsigned long end_pfn)
 322{
 323        struct mem_section *ms;
 324
 325        for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SECTION) {
 326                ms = __pfn_to_section(start_pfn);
 327
 328                if (unlikely(!valid_section(ms)))
 329                        continue;
 330
 331                if (unlikely(pfn_to_nid(start_pfn) != nid))
 332                        continue;
 333
 334                if (zone && zone != page_zone(pfn_to_page(start_pfn)))
 335                        continue;
 336
 337                return start_pfn;
 338        }
 339
 340        return 0;
 341}
 342
 343/* find the biggest valid pfn in the range [start_pfn, end_pfn). */
 344static unsigned long find_biggest_section_pfn(int nid, struct zone *zone,
 345                                    unsigned long start_pfn,
 346                                    unsigned long end_pfn)
 347{
 348        struct mem_section *ms;
 349        unsigned long pfn;
 350
 351        /* pfn is the end pfn of a memory section. */
 352        pfn = end_pfn - 1;
 353        for (; pfn >= start_pfn; pfn -= PAGES_PER_SECTION) {
 354                ms = __pfn_to_section(pfn);
 355
 356                if (unlikely(!valid_section(ms)))
 357                        continue;
 358
 359                if (unlikely(pfn_to_nid(pfn) != nid))
 360                        continue;
 361
 362                if (zone && zone != page_zone(pfn_to_page(pfn)))
 363                        continue;
 364
 365                return pfn;
 366        }
 367
 368        return 0;
 369}
 370
 371static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
 372                             unsigned long end_pfn)
 373{
 374        unsigned long zone_start_pfn = zone->zone_start_pfn;
 375        unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */
 376        unsigned long zone_end_pfn = z;
 377        unsigned long pfn;
 378        struct mem_section *ms;
 379        int nid = zone_to_nid(zone);
 380
 381        zone_span_writelock(zone);
 382        if (zone_start_pfn == start_pfn) {
 383                /*
 384                 * If the section is smallest section in the zone, it need
 385                 * shrink zone->zone_start_pfn and zone->zone_spanned_pages.
 386                 * In this case, we find second smallest valid mem_section
 387                 * for shrinking zone.
 388                 */
 389                pfn = find_smallest_section_pfn(nid, zone, end_pfn,
 390                                                zone_end_pfn);
 391                if (pfn) {
 392                        zone->zone_start_pfn = pfn;
 393                        zone->spanned_pages = zone_end_pfn - pfn;
 394                }
 395        } else if (zone_end_pfn == end_pfn) {
 396                /*
 397                 * If the section is biggest section in the zone, it need
 398                 * shrink zone->spanned_pages.
 399                 * In this case, we find second biggest valid mem_section for
 400                 * shrinking zone.
 401                 */
 402                pfn = find_biggest_section_pfn(nid, zone, zone_start_pfn,
 403                                               start_pfn);
 404                if (pfn)
 405                        zone->spanned_pages = pfn - zone_start_pfn + 1;
 406        }
 407
 408        /*
 409         * The section is not biggest or smallest mem_section in the zone, it
 410         * only creates a hole in the zone. So in this case, we need not
 411         * change the zone. But perhaps, the zone has only hole data. Thus
 412         * it check the zone has only hole or not.
 413         */
 414        pfn = zone_start_pfn;
 415        for (; pfn < zone_end_pfn; pfn += PAGES_PER_SECTION) {
 416                ms = __pfn_to_section(pfn);
 417
 418                if (unlikely(!valid_section(ms)))
 419                        continue;
 420
 421                if (page_zone(pfn_to_page(pfn)) != zone)
 422                        continue;
 423
 424                 /* If the section is current section, it continues the loop */
 425                if (start_pfn == pfn)
 426                        continue;
 427
 428                /* If we find valid section, we have nothing to do */
 429                zone_span_writeunlock(zone);
 430                return;
 431        }
 432
 433        /* The zone has no valid section */
 434        zone->zone_start_pfn = 0;
 435        zone->spanned_pages = 0;
 436        zone_span_writeunlock(zone);
 437}
 438
 439static void shrink_pgdat_span(struct pglist_data *pgdat,
 440                              unsigned long start_pfn, unsigned long end_pfn)
 441{
 442        unsigned long pgdat_start_pfn = pgdat->node_start_pfn;
 443        unsigned long p = pgdat_end_pfn(pgdat); /* pgdat_end_pfn namespace clash */
 444        unsigned long pgdat_end_pfn = p;
 445        unsigned long pfn;
 446        struct mem_section *ms;
 447        int nid = pgdat->node_id;
 448
 449        if (pgdat_start_pfn == start_pfn) {
 450                /*
 451                 * If the section is smallest section in the pgdat, it need
 452                 * shrink pgdat->node_start_pfn and pgdat->node_spanned_pages.
 453                 * In this case, we find second smallest valid mem_section
 454                 * for shrinking zone.
 455                 */
 456                pfn = find_smallest_section_pfn(nid, NULL, end_pfn,
 457                                                pgdat_end_pfn);
 458                if (pfn) {
 459                        pgdat->node_start_pfn = pfn;
 460                        pgdat->node_spanned_pages = pgdat_end_pfn - pfn;
 461                }
 462        } else if (pgdat_end_pfn == end_pfn) {
 463                /*
 464                 * If the section is biggest section in the pgdat, it need
 465                 * shrink pgdat->node_spanned_pages.
 466                 * In this case, we find second biggest valid mem_section for
 467                 * shrinking zone.
 468                 */
 469                pfn = find_biggest_section_pfn(nid, NULL, pgdat_start_pfn,
 470                                               start_pfn);
 471                if (pfn)
 472                        pgdat->node_spanned_pages = pfn - pgdat_start_pfn + 1;
 473        }
 474
 475        /*
 476         * If the section is not biggest or smallest mem_section in the pgdat,
 477         * it only creates a hole in the pgdat. So in this case, we need not
 478         * change the pgdat.
 479         * But perhaps, the pgdat has only hole data. Thus it check the pgdat
 480         * has only hole or not.
 481         */
 482        pfn = pgdat_start_pfn;
 483        for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SECTION) {
 484                ms = __pfn_to_section(pfn);
 485
 486                if (unlikely(!valid_section(ms)))
 487                        continue;
 488
 489                if (pfn_to_nid(pfn) != nid)
 490                        continue;
 491
 492                 /* If the section is current section, it continues the loop */
 493                if (start_pfn == pfn)
 494                        continue;
 495
 496                /* If we find valid section, we have nothing to do */
 497                return;
 498        }
 499
 500        /* The pgdat has no valid section */
 501        pgdat->node_start_pfn = 0;
 502        pgdat->node_spanned_pages = 0;
 503}
 504
 505static void __remove_zone(struct zone *zone, unsigned long start_pfn)
 506{
 507        struct pglist_data *pgdat = zone->zone_pgdat;
 508        int nr_pages = PAGES_PER_SECTION;
 509        unsigned long flags;
 510
 511        pgdat_resize_lock(zone->zone_pgdat, &flags);
 512        shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
 513        shrink_pgdat_span(pgdat, start_pfn, start_pfn + nr_pages);
 514        pgdat_resize_unlock(zone->zone_pgdat, &flags);
 515}
 516
 517static int __remove_section(struct zone *zone, struct mem_section *ms,
 518                unsigned long map_offset, struct vmem_altmap *altmap)
 519{
 520        unsigned long start_pfn;
 521        int scn_nr;
 522        int ret = -EINVAL;
 523
 524        if (!valid_section(ms))
 525                return ret;
 526
 527        ret = unregister_memory_section(ms);
 528        if (ret)
 529                return ret;
 530
 531        scn_nr = __section_nr(ms);
 532        start_pfn = section_nr_to_pfn((unsigned long)scn_nr);
 533        __remove_zone(zone, start_pfn);
 534
 535        sparse_remove_one_section(zone, ms, map_offset, altmap);
 536        return 0;
 537}
 538
 539/**
 540 * __remove_pages() - remove sections of pages from a zone
 541 * @zone: zone from which pages need to be removed
 542 * @phys_start_pfn: starting pageframe (must be aligned to start of a section)
 543 * @nr_pages: number of pages to remove (must be multiple of section size)
 544 * @altmap: alternative device page map or %NULL if default memmap is used
 545 *
 546 * Generic helper function to remove section mappings and sysfs entries
 547 * for the section of the memory we are removing. Caller needs to make
 548 * sure that pages are marked reserved and zones are adjust properly by
 549 * calling offline_pages().
 550 */
 551int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
 552                 unsigned long nr_pages, struct vmem_altmap *altmap)
 553{
 554        unsigned long i;
 555        unsigned long map_offset = 0;
 556        int sections_to_remove, ret = 0;
 557
 558        /* In the ZONE_DEVICE case device driver owns the memory region */
 559        if (is_dev_zone(zone)) {
 560                if (altmap)
 561                        map_offset = vmem_altmap_offset(altmap);
 562        } else {
 563                resource_size_t start, size;
 564
 565                start = phys_start_pfn << PAGE_SHIFT;
 566                size = nr_pages * PAGE_SIZE;
 567
 568                ret = release_mem_region_adjustable(&iomem_resource, start,
 569                                        size);
 570                if (ret) {
 571                        resource_size_t endres = start + size - 1;
 572
 573                        pr_warn("Unable to release resource <%pa-%pa> (%d)\n",
 574                                        &start, &endres, ret);
 575                }
 576        }
 577
 578        clear_zone_contiguous(zone);
 579
 580        /*
 581         * We can only remove entire sections
 582         */
 583        BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK);
 584        BUG_ON(nr_pages % PAGES_PER_SECTION);
 585
 586        sections_to_remove = nr_pages / PAGES_PER_SECTION;
 587        for (i = 0; i < sections_to_remove; i++) {
 588                unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
 589
 590                ret = __remove_section(zone, __pfn_to_section(pfn), map_offset,
 591                                altmap);
 592                map_offset = 0;
 593                if (ret)
 594                        break;
 595        }
 596
 597        set_zone_contiguous(zone);
 598
 599        return ret;
 600}
 601#endif /* CONFIG_MEMORY_HOTREMOVE */
 602
 603int set_online_page_callback(online_page_callback_t callback)
 604{
 605        int rc = -EINVAL;
 606
 607        get_online_mems();
 608        mutex_lock(&online_page_callback_lock);
 609
 610        if (online_page_callback == generic_online_page) {
 611                online_page_callback = callback;
 612                rc = 0;
 613        }
 614
 615        mutex_unlock(&online_page_callback_lock);
 616        put_online_mems();
 617
 618        return rc;
 619}
 620EXPORT_SYMBOL_GPL(set_online_page_callback);
 621
 622int restore_online_page_callback(online_page_callback_t callback)
 623{
 624        int rc = -EINVAL;
 625
 626        get_online_mems();
 627        mutex_lock(&online_page_callback_lock);
 628
 629        if (online_page_callback == callback) {
 630                online_page_callback = generic_online_page;
 631                rc = 0;
 632        }
 633
 634        mutex_unlock(&online_page_callback_lock);
 635        put_online_mems();
 636
 637        return rc;
 638}
 639EXPORT_SYMBOL_GPL(restore_online_page_callback);
 640
 641void __online_page_set_limits(struct page *page)
 642{
 643}
 644EXPORT_SYMBOL_GPL(__online_page_set_limits);
 645
 646void __online_page_increment_counters(struct page *page)
 647{
 648        adjust_managed_page_count(page, 1);
 649}
 650EXPORT_SYMBOL_GPL(__online_page_increment_counters);
 651
 652void __online_page_free(struct page *page)
 653{
 654        __free_reserved_page(page);
 655}
 656EXPORT_SYMBOL_GPL(__online_page_free);
 657
 658static void generic_online_page(struct page *page)
 659{
 660        __online_page_set_limits(page);
 661        __online_page_increment_counters(page);
 662        __online_page_free(page);
 663}
 664
 665static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
 666                        void *arg)
 667{
 668        unsigned long i;
 669        unsigned long onlined_pages = *(unsigned long *)arg;
 670        struct page *page;
 671
 672        if (PageReserved(pfn_to_page(start_pfn)))
 673                for (i = 0; i < nr_pages; i++) {
 674                        page = pfn_to_page(start_pfn + i);
 675                        (*online_page_callback)(page);
 676                        onlined_pages++;
 677                }
 678
 679        online_mem_sections(start_pfn, start_pfn + nr_pages);
 680
 681        *(unsigned long *)arg = onlined_pages;
 682        return 0;
 683}
 684
 685/* check which state of node_states will be changed when online memory */
 686static void node_states_check_changes_online(unsigned long nr_pages,
 687        struct zone *zone, struct memory_notify *arg)
 688{
 689        int nid = zone_to_nid(zone);
 690        enum zone_type zone_last = ZONE_NORMAL;
 691
 692        /*
 693         * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY]
 694         * contains nodes which have zones of 0...ZONE_NORMAL,
 695         * set zone_last to ZONE_NORMAL.
 696         *
 697         * If we don't have HIGHMEM nor movable node,
 698         * node_states[N_NORMAL_MEMORY] contains nodes which have zones of
 699         * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE.
 700         */
 701        if (N_MEMORY == N_NORMAL_MEMORY)
 702                zone_last = ZONE_MOVABLE;
 703
 704        /*
 705         * if the memory to be online is in a zone of 0...zone_last, and
 706         * the zones of 0...zone_last don't have memory before online, we will
 707         * need to set the node to node_states[N_NORMAL_MEMORY] after
 708         * the memory is online.
 709         */
 710        if (zone_idx(zone) <= zone_last && !node_state(nid, N_NORMAL_MEMORY))
 711                arg->status_change_nid_normal = nid;
 712        else
 713                arg->status_change_nid_normal = -1;
 714
 715#ifdef CONFIG_HIGHMEM
 716        /*
 717         * If we have movable node, node_states[N_HIGH_MEMORY]
 718         * contains nodes which have zones of 0...ZONE_HIGHMEM,
 719         * set zone_last to ZONE_HIGHMEM.
 720         *
 721         * If we don't have movable node, node_states[N_NORMAL_MEMORY]
 722         * contains nodes which have zones of 0...ZONE_MOVABLE,
 723         * set zone_last to ZONE_MOVABLE.
 724         */
 725        zone_last = ZONE_HIGHMEM;
 726        if (N_MEMORY == N_HIGH_MEMORY)
 727                zone_last = ZONE_MOVABLE;
 728
 729        if (zone_idx(zone) <= zone_last && !node_state(nid, N_HIGH_MEMORY))
 730                arg->status_change_nid_high = nid;
 731        else
 732                arg->status_change_nid_high = -1;
 733#else
 734        arg->status_change_nid_high = arg->status_change_nid_normal;
 735#endif
 736
 737        /*
 738         * if the node don't have memory befor online, we will need to
 739         * set the node to node_states[N_MEMORY] after the memory
 740         * is online.
 741         */
 742        if (!node_state(nid, N_MEMORY))
 743                arg->status_change_nid = nid;
 744        else
 745                arg->status_change_nid = -1;
 746}
 747
 748static void node_states_set_node(int node, struct memory_notify *arg)
 749{
 750        if (arg->status_change_nid_normal >= 0)
 751                node_set_state(node, N_NORMAL_MEMORY);
 752
 753        if (arg->status_change_nid_high >= 0)
 754                node_set_state(node, N_HIGH_MEMORY);
 755
 756        node_set_state(node, N_MEMORY);
 757}
 758
 759static void __meminit resize_zone_range(struct zone *zone, unsigned long start_pfn,
 760                unsigned long nr_pages)
 761{
 762        unsigned long old_end_pfn = zone_end_pfn(zone);
 763
 764        if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn)
 765                zone->zone_start_pfn = start_pfn;
 766
 767        zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn;
 768}
 769
 770static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned long start_pfn,
 771                                     unsigned long nr_pages)
 772{
 773        unsigned long old_end_pfn = pgdat_end_pfn(pgdat);
 774
 775        if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn)
 776                pgdat->node_start_pfn = start_pfn;
 777
 778        pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn;
 779}
 780
 781void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
 782                unsigned long nr_pages, struct vmem_altmap *altmap)
 783{
 784        struct pglist_data *pgdat = zone->zone_pgdat;
 785        int nid = pgdat->node_id;
 786        unsigned long flags;
 787
 788        if (zone_is_empty(zone))
 789                init_currently_empty_zone(zone, start_pfn, nr_pages);
 790
 791        clear_zone_contiguous(zone);
 792
 793        /* TODO Huh pgdat is irqsave while zone is not. It used to be like that before */
 794        pgdat_resize_lock(pgdat, &flags);
 795        zone_span_writelock(zone);
 796        resize_zone_range(zone, start_pfn, nr_pages);
 797        zone_span_writeunlock(zone);
 798        resize_pgdat_range(pgdat, start_pfn, nr_pages);
 799        pgdat_resize_unlock(pgdat, &flags);
 800
 801        /*
 802         * TODO now we have a visible range of pages which are not associated
 803         * with their zone properly. Not nice but set_pfnblock_flags_mask
 804         * expects the zone spans the pfn range. All the pages in the range
 805         * are reserved so nobody should be touching them so we should be safe
 806         */
 807        memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn,
 808                        MEMMAP_HOTPLUG, altmap);
 809
 810        set_zone_contiguous(zone);
 811}
 812
 813/*
 814 * Returns a default kernel memory zone for the given pfn range.
 815 * If no kernel zone covers this pfn range it will automatically go
 816 * to the ZONE_NORMAL.
 817 */
 818static struct zone *default_kernel_zone_for_pfn(int nid, unsigned long start_pfn,
 819                unsigned long nr_pages)
 820{
 821        struct pglist_data *pgdat = NODE_DATA(nid);
 822        int zid;
 823
 824        for (zid = 0; zid <= ZONE_NORMAL; zid++) {
 825                struct zone *zone = &pgdat->node_zones[zid];
 826
 827                if (zone_intersects(zone, start_pfn, nr_pages))
 828                        return zone;
 829        }
 830
 831        return &pgdat->node_zones[ZONE_NORMAL];
 832}
 833
 834static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn,
 835                unsigned long nr_pages)
 836{
 837        struct zone *kernel_zone = default_kernel_zone_for_pfn(nid, start_pfn,
 838                        nr_pages);
 839        struct zone *movable_zone = &NODE_DATA(nid)->node_zones[ZONE_MOVABLE];
 840        bool in_kernel = zone_intersects(kernel_zone, start_pfn, nr_pages);
 841        bool in_movable = zone_intersects(movable_zone, start_pfn, nr_pages);
 842
 843        /*
 844         * We inherit the existing zone in a simple case where zones do not
 845         * overlap in the given range
 846         */
 847        if (in_kernel ^ in_movable)
 848                return (in_kernel) ? kernel_zone : movable_zone;
 849
 850        /*
 851         * If the range doesn't belong to any zone or two zones overlap in the
 852         * given range then we use movable zone only if movable_node is
 853         * enabled because we always online to a kernel zone by default.
 854         */
 855        return movable_node_enabled ? movable_zone : kernel_zone;
 856}
 857
 858struct zone * zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
 859                unsigned long nr_pages)
 860{
 861        if (online_type == MMOP_ONLINE_KERNEL)
 862                return default_kernel_zone_for_pfn(nid, start_pfn, nr_pages);
 863
 864        if (online_type == MMOP_ONLINE_MOVABLE)
 865                return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE];
 866
 867        return default_zone_for_pfn(nid, start_pfn, nr_pages);
 868}
 869
 870/*
 871 * Associates the given pfn range with the given node and the zone appropriate
 872 * for the given online type.
 873 */
 874static struct zone * __meminit move_pfn_range(int online_type, int nid,
 875                unsigned long start_pfn, unsigned long nr_pages)
 876{
 877        struct zone *zone;
 878
 879        zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages);
 880        move_pfn_range_to_zone(zone, start_pfn, nr_pages, NULL);
 881        return zone;
 882}
 883
 884/* Must be protected by mem_hotplug_begin() or a device_lock */
 885int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type)
 886{
 887        unsigned long flags;
 888        unsigned long onlined_pages = 0;
 889        struct zone *zone;
 890        int need_zonelists_rebuild = 0;
 891        int nid;
 892        int ret;
 893        struct memory_notify arg;
 894        struct memory_block *mem;
 895
 896        /*
 897         * We can't use pfn_to_nid() because nid might be stored in struct page
 898         * which is not yet initialized. Instead, we find nid from memory block.
 899         */
 900        mem = find_memory_block(__pfn_to_section(pfn));
 901        nid = mem->nid;
 902
 903        /* associate pfn range with the zone */
 904        zone = move_pfn_range(online_type, nid, pfn, nr_pages);
 905
 906        arg.start_pfn = pfn;
 907        arg.nr_pages = nr_pages;
 908        node_states_check_changes_online(nr_pages, zone, &arg);
 909
 910        ret = memory_notify(MEM_GOING_ONLINE, &arg);
 911        ret = notifier_to_errno(ret);
 912        if (ret)
 913                goto failed_addition;
 914
 915        /*
 916         * If this zone is not populated, then it is not in zonelist.
 917         * This means the page allocator ignores this zone.
 918         * So, zonelist must be updated after online.
 919         */
 920        if (!populated_zone(zone)) {
 921                need_zonelists_rebuild = 1;
 922                setup_zone_pageset(zone);
 923        }
 924
 925        ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages,
 926                online_pages_range);
 927        if (ret) {
 928                if (need_zonelists_rebuild)
 929                        zone_pcp_reset(zone);
 930                goto failed_addition;
 931        }
 932
 933        zone->present_pages += onlined_pages;
 934
 935        pgdat_resize_lock(zone->zone_pgdat, &flags);
 936        zone->zone_pgdat->node_present_pages += onlined_pages;
 937        pgdat_resize_unlock(zone->zone_pgdat, &flags);
 938
 939        if (onlined_pages) {
 940                node_states_set_node(nid, &arg);
 941                if (need_zonelists_rebuild)
 942                        build_all_zonelists(NULL);
 943                else
 944                        zone_pcp_update(zone);
 945        }
 946
 947        init_per_zone_wmark_min();
 948
 949        if (onlined_pages) {
 950                kswapd_run(nid);
 951                kcompactd_run(nid);
 952        }
 953
 954        vm_total_pages = nr_free_pagecache_pages();
 955
 956        writeback_set_ratelimit();
 957
 958        if (onlined_pages)
 959                memory_notify(MEM_ONLINE, &arg);
 960        return 0;
 961
 962failed_addition:
 963        pr_debug("online_pages [mem %#010llx-%#010llx] failed\n",
 964                 (unsigned long long) pfn << PAGE_SHIFT,
 965                 (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1);
 966        memory_notify(MEM_CANCEL_ONLINE, &arg);
 967        return ret;
 968}
 969#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
 970
 971static void reset_node_present_pages(pg_data_t *pgdat)
 972{
 973        struct zone *z;
 974
 975        for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
 976                z->present_pages = 0;
 977
 978        pgdat->node_present_pages = 0;
 979}
 980
 981/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
 982static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
 983{
 984        struct pglist_data *pgdat;
 985        unsigned long zones_size[MAX_NR_ZONES] = {0};
 986        unsigned long zholes_size[MAX_NR_ZONES] = {0};
 987        unsigned long start_pfn = PFN_DOWN(start);
 988
 989        pgdat = NODE_DATA(nid);
 990        if (!pgdat) {
 991                pgdat = arch_alloc_nodedata(nid);
 992                if (!pgdat)
 993                        return NULL;
 994
 995                arch_refresh_nodedata(nid, pgdat);
 996        } else {
 997                /*
 998                 * Reset the nr_zones, order and classzone_idx before reuse.
 999                 * Note that kswapd will init kswapd_classzone_idx properly
1000                 * when it starts in the near future.
1001                 */
1002                pgdat->nr_zones = 0;
1003                pgdat->kswapd_order = 0;
1004                pgdat->kswapd_classzone_idx = 0;
1005        }
1006
1007        /* we can use NODE_DATA(nid) from here */
1008
1009        /* init node's zones as empty zones, we don't have any present pages.*/
1010        free_area_init_node(nid, zones_size, start_pfn, zholes_size);
1011        pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat);
1012
1013        /*
1014         * The node we allocated has no zone fallback lists. For avoiding
1015         * to access not-initialized zonelist, build here.
1016         */
1017        build_all_zonelists(pgdat);
1018
1019        /*
1020         * zone->managed_pages is set to an approximate value in
1021         * free_area_init_core(), which will cause
1022         * /sys/device/system/node/nodeX/meminfo has wrong data.
1023         * So reset it to 0 before any memory is onlined.
1024         */
1025        reset_node_managed_pages(pgdat);
1026
1027        /*
1028         * When memory is hot-added, all the memory is in offline state. So
1029         * clear all zones' present_pages because they will be updated in
1030         * online_pages() and offline_pages().
1031         */
1032        reset_node_present_pages(pgdat);
1033
1034        return pgdat;
1035}
1036
1037static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
1038{
1039        arch_refresh_nodedata(nid, NULL);
1040        free_percpu(pgdat->per_cpu_nodestats);
1041        arch_free_nodedata(pgdat);
1042        return;
1043}
1044
1045
1046/**
1047 * try_online_node - online a node if offlined
1048 * @nid: the node ID
1049 *
1050 * called by cpu_up() to online a node without onlined memory.
1051 */
1052int try_online_node(int nid)
1053{
1054        pg_data_t       *pgdat;
1055        int     ret;
1056
1057        if (node_online(nid))
1058                return 0;
1059
1060        mem_hotplug_begin();
1061        pgdat = hotadd_new_pgdat(nid, 0);
1062        if (!pgdat) {
1063                pr_err("Cannot online node %d due to NULL pgdat\n", nid);
1064                ret = -ENOMEM;
1065                goto out;
1066        }
1067        node_set_online(nid);
1068        ret = register_one_node(nid);
1069        BUG_ON(ret);
1070out:
1071        mem_hotplug_done();
1072        return ret;
1073}
1074
1075static int check_hotplug_memory_range(u64 start, u64 size)
1076{
1077        unsigned long block_sz = memory_block_size_bytes();
1078        u64 block_nr_pages = block_sz >> PAGE_SHIFT;
1079        u64 nr_pages = size >> PAGE_SHIFT;
1080        u64 start_pfn = PFN_DOWN(start);
1081
1082        /* memory range must be block size aligned */
1083        if (!nr_pages || !IS_ALIGNED(start_pfn, block_nr_pages) ||
1084            !IS_ALIGNED(nr_pages, block_nr_pages)) {
1085                pr_err("Block size [%#lx] unaligned hotplug range: start %#llx, size %#llx",
1086                       block_sz, start, size);
1087                return -EINVAL;
1088        }
1089
1090        return 0;
1091}
1092
1093static int online_memory_block(struct memory_block *mem, void *arg)
1094{
1095        return device_online(&mem->dev);
1096}
1097
1098/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
1099int __ref add_memory_resource(int nid, struct resource *res, bool online)
1100{
1101        u64 start, size;
1102        pg_data_t *pgdat = NULL;
1103        bool new_pgdat;
1104        bool new_node;
1105        int ret;
1106
1107        start = res->start;
1108        size = resource_size(res);
1109
1110        ret = check_hotplug_memory_range(start, size);
1111        if (ret)
1112                return ret;
1113
1114        {       /* Stupid hack to suppress address-never-null warning */
1115                void *p = NODE_DATA(nid);
1116                new_pgdat = !p;
1117        }
1118
1119        mem_hotplug_begin();
1120
1121        /*
1122         * Add new range to memblock so that when hotadd_new_pgdat() is called
1123         * to allocate new pgdat, get_pfn_range_for_nid() will be able to find
1124         * this new range and calculate total pages correctly.  The range will
1125         * be removed at hot-remove time.
1126         */
1127        memblock_add_node(start, size, nid);
1128
1129        new_node = !node_online(nid);
1130        if (new_node) {
1131                pgdat = hotadd_new_pgdat(nid, start);
1132                ret = -ENOMEM;
1133                if (!pgdat)
1134                        goto error;
1135        }
1136
1137        /* call arch's memory hotadd */
1138        ret = arch_add_memory(nid, start, size, NULL, true);
1139
1140        if (ret < 0)
1141                goto error;
1142
1143        /* we online node here. we can't roll back from here. */
1144        node_set_online(nid);
1145
1146        if (new_node) {
1147                unsigned long start_pfn = start >> PAGE_SHIFT;
1148                unsigned long nr_pages = size >> PAGE_SHIFT;
1149
1150                ret = __register_one_node(nid);
1151                if (ret)
1152                        goto register_fail;
1153
1154                /*
1155                 * link memory sections under this node. This is already
1156                 * done when creatig memory section in register_new_memory
1157                 * but that depends to have the node registered so offline
1158                 * nodes have to go through register_node.
1159                 * TODO clean up this mess.
1160                 */
1161                ret = link_mem_sections(nid, start_pfn, nr_pages, false);
1162register_fail:
1163                /*
1164                 * If sysfs file of new node can't create, cpu on the node
1165                 * can't be hot-added. There is no rollback way now.
1166                 * So, check by BUG_ON() to catch it reluctantly..
1167                 */
1168                BUG_ON(ret);
1169        }
1170
1171        /* create new memmap entry */
1172        firmware_map_add_hotplug(start, start + size, "System RAM");
1173
1174        /* online pages if requested */
1175        if (online)
1176                walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1),
1177                                  NULL, online_memory_block);
1178
1179        goto out;
1180
1181error:
1182        /* rollback pgdat allocation and others */
1183        if (new_pgdat && pgdat)
1184                rollback_node_hotadd(nid, pgdat);
1185        memblock_remove(start, size);
1186
1187out:
1188        mem_hotplug_done();
1189        return ret;
1190}
1191EXPORT_SYMBOL_GPL(add_memory_resource);
1192
1193int __ref add_memory(int nid, u64 start, u64 size)
1194{
1195        struct resource *res;
1196        int ret;
1197
1198        res = register_memory_resource(start, size);
1199        if (IS_ERR(res))
1200                return PTR_ERR(res);
1201
1202        ret = add_memory_resource(nid, res, memhp_auto_online);
1203        if (ret < 0)
1204                release_memory_resource(res);
1205        return ret;
1206}
1207EXPORT_SYMBOL_GPL(add_memory);
1208
1209#ifdef CONFIG_MEMORY_HOTREMOVE
1210/*
1211 * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy
1212 * set and the size of the free page is given by page_order(). Using this,
1213 * the function determines if the pageblock contains only free pages.
1214 * Due to buddy contraints, a free page at least the size of a pageblock will
1215 * be located at the start of the pageblock
1216 */
1217static inline int pageblock_free(struct page *page)
1218{
1219        return PageBuddy(page) && page_order(page) >= pageblock_order;
1220}
1221
1222/* Return the start of the next active pageblock after a given page */
1223static struct page *next_active_pageblock(struct page *page)
1224{
1225        /* Ensure the starting page is pageblock-aligned */
1226        BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
1227
1228        /* If the entire pageblock is free, move to the end of free page */
1229        if (pageblock_free(page)) {
1230                int order;
1231                /* be careful. we don't have locks, page_order can be changed.*/
1232                order = page_order(page);
1233                if ((order < MAX_ORDER) && (order >= pageblock_order))
1234                        return page + (1 << order);
1235        }
1236
1237        return page + pageblock_nr_pages;
1238}
1239
1240/* Checks if this range of memory is likely to be hot-removable. */
1241bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
1242{
1243        struct page *page = pfn_to_page(start_pfn);
1244        struct page *end_page = page + nr_pages;
1245
1246        /* Check the starting page of each pageblock within the range */
1247        for (; page < end_page; page = next_active_pageblock(page)) {
1248                if (!is_pageblock_removable_nolock(page))
1249                        return false;
1250                cond_resched();
1251        }
1252
1253        /* All pageblocks in the memory block are likely to be hot-removable */
1254        return true;
1255}
1256
1257/*
1258 * Confirm all pages in a range [start, end) belong to the same zone.
1259 * When true, return its valid [start, end).
1260 */
1261int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
1262                         unsigned long *valid_start, unsigned long *valid_end)
1263{
1264        unsigned long pfn, sec_end_pfn;
1265        unsigned long start, end;
1266        struct zone *zone = NULL;
1267        struct page *page;
1268        int i;
1269        for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1);
1270             pfn < end_pfn;
1271             pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) {
1272                /* Make sure the memory section is present first */
1273                if (!present_section_nr(pfn_to_section_nr(pfn)))
1274                        continue;
1275                for (; pfn < sec_end_pfn && pfn < end_pfn;
1276                     pfn += MAX_ORDER_NR_PAGES) {
1277                        i = 0;
1278                        /* This is just a CONFIG_HOLES_IN_ZONE check.*/
1279                        while ((i < MAX_ORDER_NR_PAGES) &&
1280                                !pfn_valid_within(pfn + i))
1281                                i++;
1282                        if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn)
1283                                continue;
1284                        page = pfn_to_page(pfn + i);
1285                        if (zone && page_zone(page) != zone)
1286                                return 0;
1287                        if (!zone)
1288                                start = pfn + i;
1289                        zone = page_zone(page);
1290                        end = pfn + MAX_ORDER_NR_PAGES;
1291                }
1292        }
1293
1294        if (zone) {
1295                *valid_start = start;
1296                *valid_end = min(end, end_pfn);
1297                return 1;
1298        } else {
1299                return 0;
1300        }
1301}
1302
1303/*
1304 * Scan pfn range [start,end) to find movable/migratable pages (LRU pages,
1305 * non-lru movable pages and hugepages). We scan pfn because it's much
1306 * easier than scanning over linked list. This function returns the pfn
1307 * of the first found movable page if it's found, otherwise 0.
1308 */
1309static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
1310{
1311        unsigned long pfn;
1312        struct page *page;
1313        for (pfn = start; pfn < end; pfn++) {
1314                if (pfn_valid(pfn)) {
1315                        page = pfn_to_page(pfn);
1316                        if (PageLRU(page))
1317                                return pfn;
1318                        if (__PageMovable(page))
1319                                return pfn;
1320                        if (PageHuge(page)) {
1321                                if (page_huge_active(page))
1322                                        return pfn;
1323                                else
1324                                        pfn = round_up(pfn + 1,
1325                                                1 << compound_order(page)) - 1;
1326                        }
1327                }
1328        }
1329        return 0;
1330}
1331
1332static struct page *new_node_page(struct page *page, unsigned long private)
1333{
1334        int nid = page_to_nid(page);
1335        nodemask_t nmask = node_states[N_MEMORY];
1336
1337        /*
1338         * try to allocate from a different node but reuse this node if there
1339         * are no other online nodes to be used (e.g. we are offlining a part
1340         * of the only existing node)
1341         */
1342        node_clear(nid, nmask);
1343        if (nodes_empty(nmask))
1344                node_set(nid, nmask);
1345
1346        return new_page_nodemask(page, nid, &nmask);
1347}
1348
1349#define NR_OFFLINE_AT_ONCE_PAGES        (256)
1350static int
1351do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
1352{
1353        unsigned long pfn;
1354        struct page *page;
1355        int move_pages = NR_OFFLINE_AT_ONCE_PAGES;
1356        int not_managed = 0;
1357        int ret = 0;
1358        LIST_HEAD(source);
1359
1360        for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) {
1361                if (!pfn_valid(pfn))
1362                        continue;
1363                page = pfn_to_page(pfn);
1364
1365                if (PageHuge(page)) {
1366                        struct page *head = compound_head(page);
1367                        pfn = page_to_pfn(head) + (1<<compound_order(head)) - 1;
1368                        if (compound_order(head) > PFN_SECTION_SHIFT) {
1369                                ret = -EBUSY;
1370                                break;
1371                        }
1372                        if (isolate_huge_page(page, &source))
1373                                move_pages -= 1 << compound_order(head);
1374                        continue;
1375                } else if (PageTransHuge(page))
1376                        pfn = page_to_pfn(compound_head(page))
1377                                + hpage_nr_pages(page) - 1;
1378
1379                if (!get_page_unless_zero(page))
1380                        continue;
1381                /*
1382                 * We can skip free pages. And we can deal with pages on
1383                 * LRU and non-lru movable pages.
1384                 */
1385                if (PageLRU(page))
1386                        ret = isolate_lru_page(page);
1387                else
1388                        ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
1389                if (!ret) { /* Success */
1390                        put_page(page);
1391                        list_add_tail(&page->lru, &source);
1392                        move_pages--;
1393                        if (!__PageMovable(page))
1394                                inc_node_page_state(page, NR_ISOLATED_ANON +
1395                                                    page_is_file_cache(page));
1396
1397                } else {
1398#ifdef CONFIG_DEBUG_VM
1399                        pr_alert("failed to isolate pfn %lx\n", pfn);
1400                        dump_page(page, "isolation failed");
1401#endif
1402                        put_page(page);
1403                        /* Because we don't have big zone->lock. we should
1404                           check this again here. */
1405                        if (page_count(page)) {
1406                                not_managed++;
1407                                ret = -EBUSY;
1408                                break;
1409                        }
1410                }
1411        }
1412        if (!list_empty(&source)) {
1413                if (not_managed) {
1414                        putback_movable_pages(&source);
1415                        goto out;
1416                }
1417
1418                /* Allocate a new page from the nearest neighbor node */
1419                ret = migrate_pages(&source, new_node_page, NULL, 0,
1420                                        MIGRATE_SYNC, MR_MEMORY_HOTPLUG);
1421                if (ret)
1422                        putback_movable_pages(&source);
1423        }
1424out:
1425        return ret;
1426}
1427
1428/*
1429 * remove from free_area[] and mark all as Reserved.
1430 */
1431static int
1432offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages,
1433                        void *data)
1434{
1435        __offline_isolated_pages(start, start + nr_pages);
1436        return 0;
1437}
1438
1439static void
1440offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
1441{
1442        walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL,
1443                                offline_isolated_pages_cb);
1444}
1445
1446/*
1447 * Check all pages in range, recoreded as memory resource, are isolated.
1448 */
1449static int
1450check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,
1451                        void *data)
1452{
1453        int ret;
1454        long offlined = *(long *)data;
1455        ret = test_pages_isolated(start_pfn, start_pfn + nr_pages, true);
1456        offlined = nr_pages;
1457        if (!ret)
1458                *(long *)data += offlined;
1459        return ret;
1460}
1461
1462static long
1463check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
1464{
1465        long offlined = 0;
1466        int ret;
1467
1468        ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined,
1469                        check_pages_isolated_cb);
1470        if (ret < 0)
1471                offlined = (long)ret;
1472        return offlined;
1473}
1474
1475static int __init cmdline_parse_movable_node(char *p)
1476{
1477#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1478        movable_node_enabled = true;
1479#else
1480        pr_warn("movable_node parameter depends on CONFIG_HAVE_MEMBLOCK_NODE_MAP to work properly\n");
1481#endif
1482        return 0;
1483}
1484early_param("movable_node", cmdline_parse_movable_node);
1485
1486/* check which state of node_states will be changed when offline memory */
1487static void node_states_check_changes_offline(unsigned long nr_pages,
1488                struct zone *zone, struct memory_notify *arg)
1489{
1490        struct pglist_data *pgdat = zone->zone_pgdat;
1491        unsigned long present_pages = 0;
1492        enum zone_type zt, zone_last = ZONE_NORMAL;
1493
1494        /*
1495         * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY]
1496         * contains nodes which have zones of 0...ZONE_NORMAL,
1497         * set zone_last to ZONE_NORMAL.
1498         *
1499         * If we don't have HIGHMEM nor movable node,
1500         * node_states[N_NORMAL_MEMORY] contains nodes which have zones of
1501         * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE.
1502         */
1503        if (N_MEMORY == N_NORMAL_MEMORY)
1504                zone_last = ZONE_MOVABLE;
1505
1506        /*
1507         * check whether node_states[N_NORMAL_MEMORY] will be changed.
1508         * If the memory to be offline is in a zone of 0...zone_last,
1509         * and it is the last present memory, 0...zone_last will
1510         * become empty after offline , thus we can determind we will
1511         * need to clear the node from node_states[N_NORMAL_MEMORY].
1512         */
1513        for (zt = 0; zt <= zone_last; zt++)
1514                present_pages += pgdat->node_zones[zt].present_pages;
1515        if (zone_idx(zone) <= zone_last && nr_pages >= present_pages)
1516                arg->status_change_nid_normal = zone_to_nid(zone);
1517        else
1518                arg->status_change_nid_normal = -1;
1519
1520#ifdef CONFIG_HIGHMEM
1521        /*
1522         * If we have movable node, node_states[N_HIGH_MEMORY]
1523         * contains nodes which have zones of 0...ZONE_HIGHMEM,
1524         * set zone_last to ZONE_HIGHMEM.
1525         *
1526         * If we don't have movable node, node_states[N_NORMAL_MEMORY]
1527         * contains nodes which have zones of 0...ZONE_MOVABLE,
1528         * set zone_last to ZONE_MOVABLE.
1529         */
1530        zone_last = ZONE_HIGHMEM;
1531        if (N_MEMORY == N_HIGH_MEMORY)
1532                zone_last = ZONE_MOVABLE;
1533
1534        for (; zt <= zone_last; zt++)
1535                present_pages += pgdat->node_zones[zt].present_pages;
1536        if (zone_idx(zone) <= zone_last && nr_pages >= present_pages)
1537                arg->status_change_nid_high = zone_to_nid(zone);
1538        else
1539                arg->status_change_nid_high = -1;
1540#else
1541        arg->status_change_nid_high = arg->status_change_nid_normal;
1542#endif
1543
1544        /*
1545         * node_states[N_HIGH_MEMORY] contains nodes which have 0...ZONE_MOVABLE
1546         */
1547        zone_last = ZONE_MOVABLE;
1548
1549        /*
1550         * check whether node_states[N_HIGH_MEMORY] will be changed
1551         * If we try to offline the last present @nr_pages from the node,
1552         * we can determind we will need to clear the node from
1553         * node_states[N_HIGH_MEMORY].
1554         */
1555        for (; zt <= zone_last; zt++)
1556                present_pages += pgdat->node_zones[zt].present_pages;
1557        if (nr_pages >= present_pages)
1558                arg->status_change_nid = zone_to_nid(zone);
1559        else
1560                arg->status_change_nid = -1;
1561}
1562
1563static void node_states_clear_node(int node, struct memory_notify *arg)
1564{
1565        if (arg->status_change_nid_normal >= 0)
1566                node_clear_state(node, N_NORMAL_MEMORY);
1567
1568        if ((N_MEMORY != N_NORMAL_MEMORY) &&
1569            (arg->status_change_nid_high >= 0))
1570                node_clear_state(node, N_HIGH_MEMORY);
1571
1572        if ((N_MEMORY != N_HIGH_MEMORY) &&
1573            (arg->status_change_nid >= 0))
1574                node_clear_state(node, N_MEMORY);
1575}
1576
1577static int __ref __offline_pages(unsigned long start_pfn,
1578                  unsigned long end_pfn)
1579{
1580        unsigned long pfn, nr_pages;
1581        long offlined_pages;
1582        int ret, node;
1583        unsigned long flags;
1584        unsigned long valid_start, valid_end;
1585        struct zone *zone;
1586        struct memory_notify arg;
1587
1588        /* at least, alignment against pageblock is necessary */
1589        if (!IS_ALIGNED(start_pfn, pageblock_nr_pages))
1590                return -EINVAL;
1591        if (!IS_ALIGNED(end_pfn, pageblock_nr_pages))
1592                return -EINVAL;
1593        /* This makes hotplug much easier...and readable.
1594           we assume this for now. .*/
1595        if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
1596                return -EINVAL;
1597
1598        zone = page_zone(pfn_to_page(valid_start));
1599        node = zone_to_nid(zone);
1600        nr_pages = end_pfn - start_pfn;
1601
1602        /* set above range as isolated */
1603        ret = start_isolate_page_range(start_pfn, end_pfn,
1604                                       MIGRATE_MOVABLE, true);
1605        if (ret)
1606                return ret;
1607
1608        arg.start_pfn = start_pfn;
1609        arg.nr_pages = nr_pages;
1610        node_states_check_changes_offline(nr_pages, zone, &arg);
1611
1612        ret = memory_notify(MEM_GOING_OFFLINE, &arg);
1613        ret = notifier_to_errno(ret);
1614        if (ret)
1615                goto failed_removal;
1616
1617        pfn = start_pfn;
1618repeat:
1619        /* start memory hot removal */
1620        ret = -EINTR;
1621        if (signal_pending(current))
1622                goto failed_removal;
1623
1624        cond_resched();
1625        lru_add_drain_all();
1626        drain_all_pages(zone);
1627
1628        pfn = scan_movable_pages(start_pfn, end_pfn);
1629        if (pfn) { /* We have movable pages */
1630                ret = do_migrate_range(pfn, end_pfn);
1631                goto repeat;
1632        }
1633
1634        /*
1635         * dissolve free hugepages in the memory block before doing offlining
1636         * actually in order to make hugetlbfs's object counting consistent.
1637         */
1638        ret = dissolve_free_huge_pages(start_pfn, end_pfn);
1639        if (ret)
1640                goto failed_removal;
1641        /* check again */
1642        offlined_pages = check_pages_isolated(start_pfn, end_pfn);
1643        if (offlined_pages < 0)
1644                goto repeat;
1645        pr_info("Offlined Pages %ld\n", offlined_pages);
1646        /* Ok, all of our target is isolated.
1647           We cannot do rollback at this point. */
1648        offline_isolated_pages(start_pfn, end_pfn);
1649        /* reset pagetype flags and makes migrate type to be MOVABLE */
1650        undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
1651        /* removal success */
1652        adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages);
1653        zone->present_pages -= offlined_pages;
1654
1655        pgdat_resize_lock(zone->zone_pgdat, &flags);
1656        zone->zone_pgdat->node_present_pages -= offlined_pages;
1657        pgdat_resize_unlock(zone->zone_pgdat, &flags);
1658
1659        init_per_zone_wmark_min();
1660
1661        if (!populated_zone(zone)) {
1662                zone_pcp_reset(zone);
1663                build_all_zonelists(NULL);
1664        } else
1665                zone_pcp_update(zone);
1666
1667        node_states_clear_node(node, &arg);
1668        if (arg.status_change_nid >= 0) {
1669                kswapd_stop(node);
1670                kcompactd_stop(node);
1671        }
1672
1673        vm_total_pages = nr_free_pagecache_pages();
1674        writeback_set_ratelimit();
1675
1676        memory_notify(MEM_OFFLINE, &arg);
1677        return 0;
1678
1679failed_removal:
1680        pr_debug("memory offlining [mem %#010llx-%#010llx] failed\n",
1681                 (unsigned long long) start_pfn << PAGE_SHIFT,
1682                 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
1683        memory_notify(MEM_CANCEL_OFFLINE, &arg);
1684        /* pushback to free area */
1685        undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
1686        return ret;
1687}
1688
1689/* Must be protected by mem_hotplug_begin() or a device_lock */
1690int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
1691{
1692        return __offline_pages(start_pfn, start_pfn + nr_pages);
1693}
1694#endif /* CONFIG_MEMORY_HOTREMOVE */
1695
1696/**
1697 * walk_memory_range - walks through all mem sections in [start_pfn, end_pfn)
1698 * @start_pfn: start pfn of the memory range
1699 * @end_pfn: end pfn of the memory range
1700 * @arg: argument passed to func
1701 * @func: callback for each memory section walked
1702 *
1703 * This function walks through all present mem sections in range
1704 * [start_pfn, end_pfn) and call func on each mem section.
1705 *
1706 * Returns the return value of func.
1707 */
1708int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
1709                void *arg, int (*func)(struct memory_block *, void *))
1710{
1711        struct memory_block *mem = NULL;
1712        struct mem_section *section;
1713        unsigned long pfn, section_nr;
1714        int ret;
1715
1716        for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
1717                section_nr = pfn_to_section_nr(pfn);
1718                if (!present_section_nr(section_nr))
1719                        continue;
1720
1721                section = __nr_to_section(section_nr);
1722                /* same memblock? */
1723                if (mem)
1724                        if ((section_nr >= mem->start_section_nr) &&
1725                            (section_nr <= mem->end_section_nr))
1726                                continue;
1727
1728                mem = find_memory_block_hinted(section, mem);
1729                if (!mem)
1730                        continue;
1731
1732                ret = func(mem, arg);
1733                if (ret) {
1734                        kobject_put(&mem->dev.kobj);
1735                        return ret;
1736                }
1737        }
1738
1739        if (mem)
1740                kobject_put(&mem->dev.kobj);
1741
1742        return 0;
1743}
1744
1745#ifdef CONFIG_MEMORY_HOTREMOVE
1746static int check_memblock_offlined_cb(struct memory_block *mem, void *arg)
1747{
1748        int ret = !is_memblock_offlined(mem);
1749
1750        if (unlikely(ret)) {
1751                phys_addr_t beginpa, endpa;
1752
1753                beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr));
1754                endpa = PFN_PHYS(section_nr_to_pfn(mem->end_section_nr + 1))-1;
1755                pr_warn("removing memory fails, because memory [%pa-%pa] is onlined\n",
1756                        &beginpa, &endpa);
1757        }
1758
1759        return ret;
1760}
1761
1762static int check_cpu_on_node(pg_data_t *pgdat)
1763{
1764        int cpu;
1765
1766        for_each_present_cpu(cpu) {
1767                if (cpu_to_node(cpu) == pgdat->node_id)
1768                        /*
1769                         * the cpu on this node isn't removed, and we can't
1770                         * offline this node.
1771                         */
1772                        return -EBUSY;
1773        }
1774
1775        return 0;
1776}
1777
1778static void unmap_cpu_on_node(pg_data_t *pgdat)
1779{
1780#ifdef CONFIG_ACPI_NUMA
1781        int cpu;
1782
1783        for_each_possible_cpu(cpu)
1784                if (cpu_to_node(cpu) == pgdat->node_id)
1785                        numa_clear_node(cpu);
1786#endif
1787}
1788
1789static int check_and_unmap_cpu_on_node(pg_data_t *pgdat)
1790{
1791        int ret;
1792
1793        ret = check_cpu_on_node(pgdat);
1794        if (ret)
1795                return ret;
1796
1797        /*
1798         * the node will be offlined when we come here, so we can clear
1799         * the cpu_to_node() now.
1800         */
1801
1802        unmap_cpu_on_node(pgdat);
1803        return 0;
1804}
1805
1806/**
1807 * try_offline_node
1808 * @nid: the node ID
1809 *
1810 * Offline a node if all memory sections and cpus of the node are removed.
1811 *
1812 * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
1813 * and online/offline operations before this call.
1814 */
1815void try_offline_node(int nid)
1816{
1817        pg_data_t *pgdat = NODE_DATA(nid);
1818        unsigned long start_pfn = pgdat->node_start_pfn;
1819        unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
1820        unsigned long pfn;
1821
1822        for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
1823                unsigned long section_nr = pfn_to_section_nr(pfn);
1824
1825                if (!present_section_nr(section_nr))
1826                        continue;
1827
1828                if (pfn_to_nid(pfn) != nid)
1829                        continue;
1830
1831                /*
1832                 * some memory sections of this node are not removed, and we
1833                 * can't offline node now.
1834                 */
1835                return;
1836        }
1837
1838        if (check_and_unmap_cpu_on_node(pgdat))
1839                return;
1840
1841        /*
1842         * all memory/cpu of this node are removed, we can offline this
1843         * node now.
1844         */
1845        node_set_offline(nid);
1846        unregister_one_node(nid);
1847}
1848EXPORT_SYMBOL(try_offline_node);
1849
1850/**
1851 * remove_memory
1852 * @nid: the node ID
1853 * @start: physical address of the region to remove
1854 * @size: size of the region to remove
1855 *
1856 * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
1857 * and online/offline operations before this call, as required by
1858 * try_offline_node().
1859 */
1860void __ref remove_memory(int nid, u64 start, u64 size)
1861{
1862        int ret;
1863
1864        BUG_ON(check_hotplug_memory_range(start, size));
1865
1866        mem_hotplug_begin();
1867
1868        /*
1869         * All memory blocks must be offlined before removing memory.  Check
1870         * whether all memory blocks in question are offline and trigger a BUG()
1871         * if this is not the case.
1872         */
1873        ret = walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1), NULL,
1874                                check_memblock_offlined_cb);
1875        if (ret)
1876                BUG();
1877
1878        /* remove memmap entry */
1879        firmware_map_remove(start, start + size, "System RAM");
1880        memblock_free(start, size);
1881        memblock_remove(start, size);
1882
1883        arch_remove_memory(start, size, NULL);
1884
1885        try_offline_node(nid);
1886
1887        mem_hotplug_done();
1888}
1889EXPORT_SYMBOL_GPL(remove_memory);
1890#endif /* CONFIG_MEMORY_HOTREMOVE */
1891